├── .cargo └── config.toml ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.yml │ ├── config.yml │ ├── enhancement.yml │ └── feature_request.yml └── workflows │ ├── ci.yml │ ├── nightly.yml │ └── release.yml ├── .gitignore ├── CONTRIBUTING.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── NOTICE.md ├── README.md ├── examples ├── Cargo.toml ├── README.md ├── examples │ ├── file-writer │ │ ├── file-writer.yaml │ │ └── src │ │ │ └── lib.rs │ └── greetings-maker │ │ ├── greetings-maker.yaml │ │ └── src │ │ └── lib.rs └── flows │ └── getting-started.yaml ├── justfile ├── rust-toolchain.toml ├── rustfmt.toml ├── tests ├── expected-standalone-runtime.txt └── zenoh-plugin-zenoh-flow.json ├── zenoh-flow-commons ├── Cargo.toml └── src │ ├── configuration.rs │ ├── deserialize.rs │ ├── identifiers.rs │ ├── lib.rs │ ├── merge.rs │ ├── shared_memory.rs │ ├── utils.rs │ └── vars.rs ├── zenoh-flow-daemon ├── Cargo.toml └── src │ ├── daemon │ ├── configuration.rs │ ├── mod.rs │ └── queryables.rs │ ├── lib.rs │ └── queries │ ├── instances │ ├── abort.rs │ ├── create.rs │ ├── delete.rs │ ├── mod.rs │ └── start.rs │ ├── mod.rs │ ├── runtime.rs │ └── selectors.rs ├── zenoh-flow-derive ├── Cargo.toml └── src │ └── lib.rs ├── zenoh-flow-descriptors ├── Cargo.toml ├── src │ ├── dataflow.rs │ ├── flattened │ │ ├── dataflow.rs │ │ ├── mod.rs │ │ ├── nodes │ │ │ ├── mod.rs │ │ │ ├── operator.rs │ │ │ ├── sink.rs │ │ │ └── source.rs │ │ ├── tests.rs │ │ └── validator │ │ │ ├── mod.rs │ │ │ └── tests.rs │ ├── io.rs │ ├── lib.rs │ ├── nodes │ │ ├── builtin │ │ │ ├── mod.rs │ │ │ └── zenoh.rs │ │ ├── mod.rs │ │ ├── operator │ │ │ ├── composite.rs │ │ │ └── mod.rs │ │ ├── sink.rs │ │ └── source.rs │ └── uri.rs └── tests │ └── descriptors │ ├── composite-nested.yml │ ├── composite-outer.yml │ ├── data-flow-recursion-duplicate-composite.yml │ ├── data-flow-recursion.yml │ ├── data-flow.yml │ ├── operator-1.yml │ ├── operator-2.yml │ ├── operator-composite.yml │ ├── operator-infinite.yml │ ├── operator.yml │ ├── sink-composite.yml │ ├── sink.yml │ ├── source-composite.yml │ ├── source.yml │ ├── sub-operator-1.yml │ ├── sub-operator-2.yml │ ├── sub-operator-composite.yml │ ├── sub-sub-operator-1.yml │ └── sub-sub-operator-2.yml ├── zenoh-flow-nodes ├── Cargo.toml ├── build.rs └── src │ ├── context.rs │ ├── declaration.rs │ ├── io │ ├── inputs.rs │ ├── mod.rs │ ├── outputs.rs │ └── tests │ │ ├── input-tests.rs │ │ ├── output-tests.rs │ │ └── test_types.proto │ ├── lib.rs │ ├── messages.rs │ └── traits.rs ├── zenoh-flow-records ├── Cargo.toml └── src │ ├── connectors.rs │ ├── dataflow.rs │ ├── lib.rs │ └── tests.rs ├── zenoh-flow-runtime ├── Cargo.toml └── src │ ├── instance.rs │ ├── lib.rs │ ├── loader │ ├── extensions.rs │ └── mod.rs │ ├── runners │ ├── builtin │ │ ├── mod.rs │ │ └── zenoh │ │ │ ├── mod.rs │ │ │ ├── sink.rs │ │ │ └── source.rs │ ├── connectors.rs │ └── mod.rs │ ├── runtime │ ├── builder.rs │ ├── load.rs │ └── mod.rs │ └── shared_memory.rs ├── zenoh-plugin-zenoh-flow ├── Cargo.toml └── src │ └── lib.rs └── zfctl ├── Cargo.toml └── src ├── daemon_command.rs ├── instance_command.rs ├── main.rs ├── run_local_command.rs └── utils.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [target.x86_64-unknown-linux-musl] 2 | rustflags = "-Ctarget-feature=-crt-static" 3 | 4 | [target.aarch64-unknown-linux-musl] 5 | rustflags = "-Ctarget-feature=-crt-static" -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: Report a bug 2 | description: | 3 | Create a bug report to help us improve Zenoh Flow. 4 | title: "[Bug] " 5 | labels: ["bug"] 6 | body: 7 | - type: textarea 8 | id: summary 9 | attributes: 10 | label: "Describe the bug" 11 | description: | 12 | A clear and concise description of the expected behaviour and what the bug is. 13 | placeholder: | 14 | E.g. zenoh peers can not automatically establish a connection. 15 | validations: 16 | required: true 17 | - type: textarea 18 | id: reproduce 19 | attributes: 20 | label: To reproduce 21 | description: "Steps to reproduce the behavior:" 22 | placeholder: | 23 | 1. Start a subscriber "..." 24 | 2. Start a publisher "...." 25 | 3. See error 26 | validations: 27 | required: true 28 | - type: textarea 29 | id: system 30 | attributes: 31 | label: System info 32 | description: "Please complete the following information:" 33 | placeholder: | 34 | - Platform: [e.g. Ubuntu 20.04 64-bit] 35 | - CPU [e.g. AMD Ryzen 3800X] 36 | - Zenoh version/commit [e.g. 6f172ea985d42d20d423a192a2d0d46bb0ce0d11] 37 | validations: 38 | required: true 39 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: Talk with us! 4 | url: https://discord.gg/WsUF9GSkUJ 5 | about: The Zenoh and Zenoh-Flow Discord server. 6 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/enhancement.yml: -------------------------------------------------------------------------------- 1 | name: Enhancement 2 | 3 | description: | 4 | How the API or internal structure of Zenoh-Flow can be improved. 5 | 6 | title: "[Enhancement] " 7 | 8 | labels: ["enhancement"] 9 | 10 | body: 11 | - type: textarea 12 | id: current 13 | attributes: 14 | label: "Current behaviour and limitations" 15 | description: | 16 | A description of the current state / behaviour of Zenoh-Flow and how it 17 | is limiting. 18 | placeholder: | 19 | E.g. A node does not have access to the path of its implementation on 20 | the device running it. This is limiting for the bindings as for some 21 | languages this information is required (i.e. Python with PyO3). 22 | validations: 23 | required: true 24 | 25 | - type: textarea 26 | id: solution 27 | attributes: 28 | label: "Proposed solution?" 29 | description: | 30 | A description of a possible solution that would help address the 31 | limitation(s) described above. 32 | placeholder: | 33 | E.g. The path of the implementation of a node can be passed in the 34 | `Context` structure. 35 | validations: 36 | required: false 37 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.yml: -------------------------------------------------------------------------------- 1 | name: Request a feature 2 | description: | 3 | Suggest a new feature specific to this repository. NOTE: for generic Zenoh Flow ideas use "Ask a question". 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | **Guidelines for a good issue** 9 | 10 | *Is your feature request related to a problem?* 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | *Describe the solution you'd like* 14 | A clear and concise description of what you want to happen. 15 | 16 | *Describe alternatives you've considered* 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | *Additional context* 20 | Add any other context about the feature request here. 21 | - type: textarea 22 | id: feature 23 | attributes: 24 | label: "Describe the feature" 25 | validations: 26 | required: true 27 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2022 ZettaScale Technology 3 | # 4 | # This program and the accompanying materials are made available under the 5 | # terms of the Eclipse Public License 2.0 which is available at 6 | # http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | # which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | # 11 | # Contributors: 12 | # ZettaScale Zenoh Team, 13 | # 14 | 15 | name: CI 16 | 17 | on: 18 | push: 19 | branches: [ '**' ] 20 | pull_request: 21 | branches: [ '**' ] 22 | schedule: 23 | - cron: '0 0 * * 1-5' 24 | 25 | jobs: 26 | checks: 27 | name: Run checks on ${{ matrix.os }} 28 | runs-on: ${{ matrix.os }} 29 | strategy: 30 | fail-fast: false 31 | matrix: 32 | os: [ubuntu-latest, macOS-latest] 33 | steps: 34 | - uses: actions/checkout@v4 35 | 36 | - name: Install Rust toolchain 37 | run: | 38 | rustup show 39 | rustup component add rustfmt clippy 40 | 41 | - name: Code format check 42 | run: cargo fmt --check 43 | env: 44 | CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse 45 | 46 | - name: Clippy 47 | run: cargo clippy --all-targets -- --deny warnings 48 | env: 49 | CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse 50 | 51 | tests: 52 | name: Run tests on ${{ matrix.os }} 53 | needs: [checks] 54 | runs-on: ${{ matrix.os }} 55 | strategy: 56 | fail-fast: false 57 | matrix: 58 | os: [ubuntu-latest, macOS-latest] 59 | 60 | steps: 61 | - uses: actions/checkout@v4 62 | 63 | - name: Install Rust toolchain 64 | run: | 65 | rustup show 66 | 67 | - name: Install latest nextest 68 | uses: taiki-e/install-action@nextest 69 | 70 | - name: Run tests 71 | run: cargo nextest run 72 | env: 73 | CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse 74 | ASYNC_STD_THREAD_COUNT: 4 75 | 76 | - name: Run doctests 77 | run: cargo test --doc 78 | env: 79 | CARGO_REGISTRIES_CRATES_IO_PROTOCOL: sparse 80 | ASYNC_STD_THREAD_COUNT: 4 81 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | **/target 4 | 5 | # Ignore all Cargo.lock but one at top-level, since it's committed in git. 6 | */**/Cargo.lock 7 | 8 | # These are backup files generated by rustfmt 9 | **/*.rs.bk 10 | 11 | # CLion project directory 12 | .idea 13 | 14 | # Emacs temps 15 | *~ 16 | *.el 17 | 18 | # MacOS Related 19 | .DS_Store 20 | 21 | # Output files 22 | outfile.png 23 | output.dot 24 | 25 | **/Makefile 26 | **/*.zfext 27 | **/.rustfmt.toml 28 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Eclipse zenoh 2 | 3 | Thanks for your interest in this project. 4 | 5 | ## Project description 6 | 7 | Eclipse zenoh provides is a stack designed to 8 | 1. minimize network overhead, 9 | 2. support extremely constrained devices, 10 | 3. supports devices with low duty-cycle by allowing the negotiation of data exchange modes and schedules, 11 | 4. provide a rich set of abstraction for distributing, querying and storing data along the entire system, and 12 | 5. provide extremely low latency and high throughput. 13 | 14 | * https://projects.eclipse.org/projects/iot.zenoh 15 | 16 | ## Developer resources 17 | 18 | Information regarding source code management, builds, coding standards, and 19 | more. 20 | 21 | * https://projects.eclipse.org/projects/iot.zenoh/developer 22 | 23 | The project maintains the following source code repositories 24 | 25 | * https://github.com/eclipse-zenoh 26 | 27 | ## Eclipse Contributor Agreement 28 | 29 | Before your contribution can be accepted by the project team contributors must 30 | electronically sign the Eclipse Contributor Agreement (ECA). 31 | 32 | * http://www.eclipse.org/legal/ECA.php 33 | 34 | Commits that are provided by non-committers must have a Signed-off-by field in 35 | the footer indicating that the author is aware of the terms by which the 36 | contribution has been provided to the project. The non-committer must 37 | additionally have an Eclipse Foundation account and must have a signed Eclipse 38 | Contributor Agreement (ECA) on file. 39 | 40 | For more information, please see the Eclipse Committer Handbook: 41 | https://www.eclipse.org/projects/handbook/#resources-commit 42 | 43 | ## Contact 44 | 45 | Contact the project developers via the project's "dev" list. 46 | 47 | * https://accounts.eclipse.org/mailing-list/zenoh-dev 48 | 49 | Or via the Gitter channel. 50 | 51 | * https://gitter.im/atolab/zenoh 52 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2021 ZettaScale Technology 3 | # 4 | # This program and the accompanying materials are made available under the 5 | # terms of the Eclipse Public License 2.0 which is available at 6 | # http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | # which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | # 11 | # Contributors: 12 | # ZettaScale Zenoh Team, 13 | # 14 | [workspace] 15 | resolver = "2" 16 | members = [ 17 | "examples", 18 | "zenoh-flow-commons", 19 | "zenoh-flow-daemon", 20 | "zenoh-flow-derive", 21 | "zenoh-flow-descriptors", 22 | "zenoh-flow-nodes", 23 | "zenoh-flow-records", 24 | "zenoh-flow-runtime", 25 | "zenoh-plugin-zenoh-flow", 26 | "zfctl", 27 | ] 28 | 29 | [workspace.package] 30 | authors = ["ZettaScale Zenoh Team "] 31 | categories = ["network-programming"] 32 | description = "Zenoh-Flow: a Zenoh-based data flow programming framework for computations that span from the cloud to the device." 33 | edition = "2021" 34 | homepage = "https://github.com/eclipse-zenoh/zenoh-flow" 35 | license = " EPL-2.0 OR Apache-2.0" 36 | readme = "README.md" 37 | repository = "https://github.com/eclipse-zenoh/zenoh-flow" 38 | version = "0.6.0-dev" 39 | 40 | [workspace.dependencies] 41 | anyhow = "1" 42 | async-std = { version = "1.12", features = ["attributes"] } 43 | async-trait = "0.1.50" 44 | base64 = "0.21" 45 | bytesize = "1.2.0" 46 | clap = { version = "4.4", features = ["derive"] } 47 | flume = "0.11" 48 | futures = "0.3.15" 49 | git-version = "0.3" 50 | log = "0.4" 51 | serde = { version = "1.0", features = ["derive", "rc"] } 52 | serde_cbor = "0.11" 53 | serde_derive = "1.0" 54 | serde_json = "1.0" 55 | serde_yaml = "0.9" 56 | tracing = { version = "0.1", features = ["log"] } 57 | tracing-subscriber = { version = "0.3" } 58 | uhlc = "0.6" 59 | url = { version = "2.2", features = ["serde"] } 60 | uuid = { version = "1.1", features = ["serde", "v4"] } 61 | # ⚠️ To update the version of Zenoh, first *manually* copy the `Cargo.lock` from 62 | # the targeted version, then run `cargo build` and finally commit the updates. 63 | zenoh = { version = "=1.1.0", features = ["unstable", "internal", "plugins"] } 64 | zenoh-config = { version = "=1.1.0" } 65 | zenoh-flow-commons = { path = "./zenoh-flow-commons" } 66 | zenoh-flow-daemon = { path = "./zenoh-flow-daemon" } 67 | zenoh-flow-descriptors = { path = "./zenoh-flow-descriptors" } 68 | zenoh-flow-nodes = { path = "./zenoh-flow-nodes" } 69 | zenoh-flow-records = { path = "./zenoh-flow-records" } 70 | zenoh-flow-runtime = { path = "./zenoh-flow-runtime" } 71 | zenoh-plugin-trait = { version = "=1.1.0" } 72 | zenoh-keyexpr = { version = "=1.1.0" } 73 | 74 | [profile.dev] 75 | debug = true 76 | opt-level = 0 77 | 78 | [profile.release] 79 | codegen-units = 1 80 | debug = true 81 | lto = "fat" 82 | opt-level = 3 83 | panic = "abort" 84 | -------------------------------------------------------------------------------- /NOTICE.md: -------------------------------------------------------------------------------- 1 | # Notices for Eclipse zenoh 2 | 3 | This content is produced and maintained by the Eclipse zenoh project. 4 | 5 | * Project home: https://projects.eclipse.org/projects/iot.zenoh 6 | 7 | ## Trademarks 8 | 9 | Eclipse zenoh is trademark of the Eclipse Foundation. 10 | Eclipse, and the Eclipse Logo are registered trademarks of the Eclipse Foundation. 11 | 12 | ## Copyright 13 | 14 | All content is the property of the respective authors or their employers. 15 | For more information regarding authorship of content, please consult the 16 | listed source code repository logs. 17 | 18 | ## Declared Project Licenses 19 | 20 | This program and the accompanying materials are made available under the 21 | terms of the Eclipse Public License 2.0 which is available at 22 | http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 23 | which is available at https://www.apache.org/licenses/LICENSE-2.0. 24 | 25 | SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 26 | 27 | ## Source Code 28 | 29 | The project maintains the source code of all repositories listed in https://github.com/eclipse-zenoh . 30 | 31 | ## Third-party Content 32 | 33 | *To be completed...* 34 | 35 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | [![Eclipse CI](https://ci.eclipse.org/zenoh/buildStatus/icon?job=zenoh-flow-nightly&subject=Eclipse%20CI)](https://ci.eclipse.org/zenoh/view/Zenoh%20Flow/job/zenoh-flow-nightly/) 5 | [![CI](https://github.com/eclipse-zenoh/zenoh-flow/actions/workflows/ci.yml/badge.svg)](https://github.com/eclipse-zenoh/zenoh-flow/actions/workflows/ci.yml) 6 | [![Discussion](https://img.shields.io/badge/discussion-on%20github-blue)](https://github.com/eclipse-zenoh/roadmap/discussions) 7 | [![Discord](https://img.shields.io/badge/chat-on%20discord-blue)](https://discord.gg/vSDSpqnbkm) 8 | 9 | 10 | # Eclipse Zenoh-Flow 11 | 12 | Zenoh-Flow is the union of Zenoh and data flow programming: a declarative framework for computations that span from the _Cloud_ to the _Thing_. 13 | 14 | ## Description 15 | 16 | Zenoh-Flow aims at simplifying and structuring (i) the _declaration_, (ii) the _deployment_ and (iii) the _writing_ of "complex" applications that can span from the Cloud to the Thing (or close to it). 17 | 18 | To these ends, Zenoh-Flow leverages the _data flow programming model_ --- where applications are viewed as a directed graph of computing units, and _Zenoh_ --- an Edge-native, data-centric, location transparent, communication middleware. 19 | 20 | This makes for a powerful combination as Zenoh offers flexibility and extensibility while data flow programming structures computations. The main benefit of this approach is that this allows us to decorrelate applications from the underlying infrastructure: data are published and subscribed to (_automatically_ with Zenoh-Flow) without the need to know where they are actually located. 21 | 22 | ## Features Requests 23 | If you would like to see additional features, please submit an issue or reach us out on [Discord](https://discord.gg/zARxf4Dr8Y) 24 | 25 | ----- 26 | 27 | 🧑‍💻 We are currently keeping our documentation and guides in the [Wiki](https://github.com/eclipse-zenoh/zenoh-flow/wiki) tab of this repository. 28 | 29 | ----- 30 | 31 | ## Installation 32 | 33 | Follow our guide [here](https://github.com/eclipse-zenoh/zenoh-flow/wiki/Installation-(v0.4.0))! 34 | 35 | ## Getting Started 36 | 37 | The best way to learn Zenoh-Flow is to go through our [getting started guide](https://github.com/eclipse-zenoh/zenoh-flow/wiki/Getting-started-(v0.4.0)). 38 | 39 | ## Examples 40 | 41 | We encourage you to look at the examples available in our [examples repository](https://github.com/ZettaScaleLabs/zenoh-flow-examples). 42 | 43 | 🚗 If you still want more, we also ported an [Autonomous Driving Pipeline](https://github.com/ZettaScaleLabs/stunt)! 44 | -------------------------------------------------------------------------------- /examples/Cargo.toml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2021 ZettaScale Technology 3 | # 4 | # This program and the accompanying materials are made available under the 5 | # terms of the Eclipse Public License 2.0 which is available at 6 | # http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | # which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | # 11 | # Contributors: 12 | # ZettaScale Zenoh Team, 13 | # 14 | 15 | [package] 16 | authors = { workspace = true } 17 | categories = { workspace = true } 18 | description = "A set of Zenoh-Flow data flow examples" 19 | edition = { workspace = true } 20 | homepage = { workspace = true } 21 | license = { workspace = true } 22 | name = "zenoh-flow-examples" 23 | repository = { workspace = true } 24 | version = { workspace = true } 25 | 26 | [dependencies] 27 | async-std = { workspace = true } 28 | async-trait = { workspace = true } 29 | prost = "0.11" 30 | zenoh-flow-nodes = { workspace = true } 31 | 32 | [[example]] 33 | name = "greetings-maker" 34 | path = "examples/greetings-maker/src/lib.rs" 35 | crate-type = ["cdylib"] 36 | 37 | [[example]] 38 | name = "file-writer" 39 | path = "examples/file-writer/src/lib.rs" 40 | crate-type = ["cdylib"] 41 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Zenoh-Flow examples 2 | 3 | ## How to run 4 | 5 | ### Build 6 | 7 | We can create all the zenoh-flow node libraries used in the examples with the following command: 8 | ```bash 9 | cargo build --examples 10 | ``` 11 | 12 | Alternatively, we can create a single library of a zenoh-flow node with the following command: 13 | ```bash 14 | cargo build --example 15 | ``` 16 | 17 | ### Configure and run the examples 18 | 19 | We first have to update all the occurrences of `{{ BASE_DIR }}` in the YAML descriptors to match our system. 20 | 21 | #### Launch the flow 22 | 23 | ```shell 24 | ./target/debug/zfctl launch ~/dev/zenoh-flow/examples/data-flow.yaml 25 | ``` 26 | 27 | If you have enabled the REST plugin of Zenoh 28 | ```shell 29 | curl -X PUT -d 'world' http://localhost:8000/zf/getting-started/hello 30 | ``` 31 | 32 | For the "period-miss-detector" example: 33 | 34 | ```shell 35 | curl -X PUT -d '2340' http://localhost:8000/zf/period-miss-detector 36 | ``` 37 | #### Show the result: 38 | 39 | The Sink node used in both examples creates a text file where the node writes the strings it receives. 40 | We can see the "getting-started" test file with: 41 | 42 | ``` 43 | tail -f /tmp/greetings.txt 44 | ``` 45 | 46 | For the "period-miss-detector" example: 47 | 48 | ``` 49 | tail -f /tmp/period-log.txt 50 | ``` 51 | 52 | -------------------------------------------------------------------------------- /examples/examples/file-writer/file-writer.yaml: -------------------------------------------------------------------------------- 1 | id: file-writer 2 | 3 | vars: 4 | BASE_DIR: "/path/to/zenoh-flow" 5 | 6 | # Do not forget to change the extension depending on your operating system! 7 | # Linux -> .so 8 | # Windows -> .dll (and remove the "lib" in front) 9 | # MacOS -> .dylib 10 | uri: "file://{{ BASE_DIR }}/target/debug/examples/libfile_writer.dylib" 11 | # If the compilation is in release: 12 | # uri: file:///absolute/path/to/target/release/libfile_writer.so 13 | 14 | inputs: [in] 15 | -------------------------------------------------------------------------------- /examples/examples/file-writer/src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2022 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use async_std::{fs::File, io::WriteExt, sync::Mutex}; 16 | use prost::Message as pMessage; 17 | use zenoh_flow_nodes::prelude::*; 18 | 19 | #[export_sink] 20 | pub struct FileWriter { 21 | input: Input, 22 | file: Mutex, 23 | } 24 | 25 | #[async_trait::async_trait] 26 | impl Node for FileWriter { 27 | async fn iteration(&self) -> Result<()> { 28 | let (greeting, _) = self.input.recv().await?; 29 | 30 | let mut file = self.file.lock().await; 31 | file.write_all(greeting.as_bytes()) 32 | .await 33 | .map_err(|e| anyhow!("{:?}", e))?; 34 | 35 | file.flush().await.map_err(|e| anyhow!("{:?}", e)) 36 | } 37 | } 38 | 39 | #[async_trait::async_trait] 40 | impl Sink for FileWriter { 41 | async fn new( 42 | _context: Context, 43 | configuration: Configuration, 44 | mut inputs: Inputs, 45 | ) -> Result { 46 | let file_path = if let Some(value) = configuration.get("file") { 47 | value 48 | .as_str() 49 | .unwrap_or_else(|| panic!("Unable to interpret < {} > as a string", value)) 50 | } else { 51 | "/tmp/greetings.txt" 52 | }; 53 | 54 | Ok(FileWriter { 55 | file: Mutex::new( 56 | File::create(file_path) 57 | .await 58 | .unwrap_or_else(|e| panic!("Could not create '{}'", e)), 59 | ), 60 | input: inputs 61 | .take("in") 62 | .expect("No Input called 'in' found") 63 | .typed(|bytes| String::decode(bytes).map_err(|e| anyhow!(e))), 64 | }) 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /examples/examples/greetings-maker/greetings-maker.yaml: -------------------------------------------------------------------------------- 1 | id: greetings-maker 2 | 3 | vars: 4 | BASE_DIR: "/path/to/zenoh-flow" 5 | 6 | # Do not forget to change the extension depending on your operating system! 7 | # Linux -> .so 8 | # Windows -> .dll (and remove the "lib" in front) 9 | # MacOS -> .dylib 10 | uri: "file://{{ BASE_DIR }}/target/debug/examples/libgreetings_maker.dylib" 11 | # If the compilation is in release: 12 | # uri: file:///absolute/path/to/target/release/libgreetings_maker.so 13 | 14 | inputs: [name] 15 | outputs: [greeting] 16 | -------------------------------------------------------------------------------- /examples/examples/greetings-maker/src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2022 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use prost::Message as pMessage; 16 | use zenoh_flow_nodes::prelude::*; 17 | 18 | #[export_operator] 19 | pub struct GreetingsMaker { 20 | input: Input, 21 | output: Output, 22 | } 23 | 24 | #[async_trait::async_trait] 25 | impl Operator for GreetingsMaker { 26 | async fn new( 27 | _context: Context, 28 | _configuration: Configuration, 29 | mut inputs: Inputs, 30 | mut outputs: Outputs, 31 | ) -> Result { 32 | Ok(GreetingsMaker { 33 | input: inputs 34 | .take("name") 35 | .expect("No input 'name' found") 36 | .typed(|bytes| String::from_utf8(bytes.into()).map_err(|e| anyhow!(e))), 37 | output: outputs 38 | .take("greeting") 39 | .expect("No output 'greeting' found") 40 | .typed(|buffer, data: &String| data.encode(buffer).map_err(|e| anyhow!(e))), 41 | }) 42 | } 43 | } 44 | 45 | #[async_trait::async_trait] 46 | impl Node for GreetingsMaker { 47 | async fn iteration(&self) -> Result<()> { 48 | let (characters, _) = self.input.recv().await?; 49 | let name = characters.trim_end(); 50 | 51 | let greetings = match name { 52 | "Sofia" | "Leonardo" => format!("Ciao, {}!\n", name), 53 | "Lucia" | "Martin" => format!("¡Hola, {}!\n", name), 54 | "Jade" | "Gabriel" => format!("Bonjour, {} !\n", name), 55 | _ => format!("Hello, {}!\n", name), 56 | }; 57 | 58 | self.output.send(greetings, None).await 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /examples/flows/getting-started.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2021 ZettaScale Technology 3 | # 4 | # This program and the accompanying materials are made available under the 5 | # terms of the Eclipse Public License 2.0 which is available at 6 | # http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | # which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | # 11 | # Contributors: 12 | # ZettaScale Zenoh Team, 13 | # 14 | 15 | name: getting-started 16 | 17 | vars: 18 | TARGET_DIR: "/path/to/zenoh-flow/target" 19 | BUILD: "debug" 20 | DLL_EXTENSION: "so" 21 | OUT_FILE: "/tmp/greetings.txt" 22 | 23 | sources: 24 | - id: zenoh-sub 25 | description: The Source receiving the names 26 | zenoh-subscribers: 27 | hello: "zf/getting-started/hello" 28 | 29 | operators: 30 | - id: greetings-maker 31 | description: "This node will create the greeting, transforming for instance 'John' to: 'Hello, John!'" 32 | library: "file://{{ TARGET_DIR }}/{{ BUILD }}/examples/libgreetings_maker.{{ DLL_EXTENSION }}" 33 | inputs: 34 | - name 35 | outputs: 36 | - greeting 37 | 38 | sinks: 39 | - id: file-writer 40 | configuration: 41 | file: "{{ OUT_FILE }}" 42 | library: "file://{{ TARGET_DIR }}/{{ BUILD }}/examples/libfile_writer.{{ DLL_EXTENSION }}" 43 | description: "This Sink will write the greetings in a temporary file." 44 | inputs: 45 | - in 46 | 47 | - id: zenoh-writer 48 | description: The Sink publishing the result of the processing 49 | zenoh-publishers: 50 | greeting: "zf/getting-started/greeting" 51 | 52 | links: 53 | - from: 54 | node: zenoh-sub 55 | output: hello 56 | to: 57 | node: greetings-maker 58 | input: name 59 | 60 | - from: 61 | node: greetings-maker 62 | output: greeting 63 | to: 64 | node: file-writer 65 | input: in 66 | 67 | - from: 68 | node: greetings-maker 69 | output: greeting 70 | to: 71 | node: zenoh-writer 72 | input: greeting 73 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2024 ZettaScale Technology 3 | # 4 | # This program and the accompanying materials are made available under the 5 | # terms of the Eclipse Public License 2.0 which is available at 6 | # http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | # which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | # 11 | # Contributors: 12 | # ZettaScale Zenoh Team, 13 | # 14 | 15 | TARGET_DIR := `pwd` / "target" 16 | OUT_FILE := `pwd` / "greetings.txt" 17 | BUILD := "debug" 18 | 19 | # Perform all verification on the code 20 | code-checks: 21 | cargo build -p zenoh-flow-runtime --no-default-features 22 | cargo build -p zenoh-flow-runtime --no-default-features --features zenoh 23 | cargo build -p zenoh-flow-daemon 24 | cargo nextest run 25 | cargo test --doc 26 | cargo clippy --all-targets -- --deny warnings 27 | cargo +nightly fmt --check 28 | 29 | # Test the standalone-runtime executable with the getting-started flow 30 | test-standalone-runtime: standalone-runtime--bg 31 | @sleep 3 32 | z_put -k "zf/getting-started/hello" -v "standalone runtime" 33 | @sleep 3 34 | diff {{ OUT_FILE }} tests/expected-standalone-runtime.txt 35 | killall zenoh-flow-standalone-runtime 36 | rm {{ OUT_FILE }} 37 | 38 | [macos] 39 | standalone-runtime--bg: build-runtime build-examples 40 | RUST_LOG=zenoh_flow=trace ./target/debug/zenoh-flow-standalone-runtime \ 41 | --vars TARGET_DIR={{ TARGET_DIR }} \ 42 | --vars BUILD={{ BUILD }} \ 43 | --vars DLL_EXTENSION="dylib" \ 44 | --vars OUT_FILE={{ OUT_FILE }} \ 45 | ./examples/flows/getting-started.yaml & 46 | 47 | [linux] 48 | standalone-runtime--bg: build-runtime build-examples 49 | RUST_LOG=zenoh_flow=trace ./target/debug/zenoh-flow-standalone-runtime \ 50 | --vars TARGET_DIR={{ TARGET_DIR }} \ 51 | --vars BUILD={{ BUILD }} \ 52 | --vars DLL_EXTENSION="so" \ 53 | --vars OUT_FILE={{ OUT_FILE }} \ 54 | ./examples/flows/getting-started.yaml & 55 | 56 | # Build just the standalone-runtime. 57 | build-runtime: 58 | cargo build -p zenoh-flow-standalone-runtime 59 | 60 | # Build all the examples. 61 | build-examples: 62 | cargo build --examples 63 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "1.75.0" 3 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2024 ZettaScale Technology 3 | # 4 | # This program and the accompanying materials are made available under the 5 | # terms of the Eclipse Public License 2.0 which is available at 6 | # http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | # which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | # 11 | # Contributors: 12 | # ZettaScale Zenoh Team, 13 | # 14 | 15 | # NOTE: The features below are "unstable" and either require to run `rustfmt` 16 | # with the extra argument "+nightly" or, if the latter is not possible, to pass 17 | # all these configuration via the "--config" option when invoking `rustfmt`. 18 | unstable_features = true 19 | 20 | format_code_in_doc_comments = true 21 | imports_granularity = "Crate" 22 | group_imports = "StdExternalCrate" 23 | -------------------------------------------------------------------------------- /tests/expected-standalone-runtime.txt: -------------------------------------------------------------------------------- 1 | Hello, standalone runtime! 2 | -------------------------------------------------------------------------------- /tests/zenoh-plugin-zenoh-flow.json: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2024 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | // 16 | // To launch Zenoh-Flow as a Zenoh plugin, use the following command: 17 | // 18 | // zenohd \ 19 | // --plugin-search-dir /path/to/zenoh-flow/target/debug/ \ 20 | // -c /path/to/zenoh-flow/tests/zenoh-plugin-zenoh-flow.json 21 | // 22 | 23 | { 24 | "plugins_loading": { 25 | "enabled": true, 26 | "search_dirs": [], 27 | }, 28 | "plugins": { 29 | "zenoh_flow": { 30 | // Forces the loading of Zenoh-Flow. 31 | "__required__": true, 32 | "name": "zenoh-plugin-test", 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /zenoh-flow-commons/Cargo.toml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2021 ZettaScale Technology 3 | # 4 | # This program and the accompanying materials are made available under the 5 | # terms of the Eclipse Public License 2.0 which is available at 6 | # http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | # which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | # 11 | # Contributors: 12 | # ZettaScale Zenoh Team, 13 | # 14 | 15 | [package] 16 | authors = { workspace = true } 17 | categories = { workspace = true } 18 | description = "Internal crate for Zenoh-Flow." 19 | edition = { workspace = true } 20 | homepage = { workspace = true } 21 | license = { workspace = true } 22 | name = "zenoh-flow-commons" 23 | repository = { workspace = true } 24 | version = { workspace = true } 25 | 26 | [dependencies] 27 | anyhow = { workspace = true } 28 | bytesize = { workspace = true } 29 | handlebars = "5.1.0" 30 | humantime = "2.1" 31 | serde = { workspace = true } 32 | serde_json = { workspace = true } 33 | serde_yaml = { workspace = true } 34 | tracing = { workspace = true } 35 | uuid = { workspace = true } 36 | zenoh-keyexpr = { workspace = true } 37 | zenoh-config = { workspace = true } 38 | -------------------------------------------------------------------------------- /zenoh-flow-commons/src/configuration.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::ops::{Deref, DerefMut}; 16 | 17 | use serde::{Deserialize, Serialize}; 18 | 19 | use crate::merge::IMergeOverwrite; 20 | 21 | /// A `Configuration` is a recursive key-value structure that allows modifying the behaviour of a node without altering 22 | /// its implementation. 23 | /// 24 | /// It is effectively a re-export of [serde_json::Value]. 25 | /// 26 | /// # Declaration, propagation and merging 27 | /// 28 | /// Zenoh-Flow allows users to declare a configuration at 3 locations: 29 | /// - at the top-level of a data flow descriptor, 30 | /// - at the top-level of a composite operator descriptor, 31 | /// - in a node (be it within a data flow descriptor, a composite descriptor or in its dedicated file). 32 | /// 33 | /// If a configuration is declared at a top-level it is propagated to all the nodes it includes. Hence, a declaration at 34 | /// the top-level of a data flow is propagated to all the nodes it contains. 35 | /// 36 | /// When two configuration keys collide, the configuration with the highest order is kept. The priorities are (from 37 | /// highest to lowest): 38 | /// - the configuration in a node within a data flow descriptor, 39 | /// - the configuration at the top-level of a data flow descriptor, 40 | /// - the configuration in a node within a composite operator descriptor, 41 | /// - the configuration at the top-level of a composite operator descriptor, 42 | /// - the configuration in a dedicated file of a node. 43 | /// 44 | /// Hence, configuration at the data flow level are propagating to all nodes, possibly overwriting default values. The 45 | /// same rules apply at the composite operator level. If a node should have a slightly different setting compared to all 46 | /// others, then, thanks to these priorities, only that node needs to be tweaked (either in the data flow or in the 47 | /// composite operator). 48 | /// 49 | /// # Examples 50 | /// 51 | /// - YAML 52 | /// 53 | /// ```yaml 54 | /// configuration: 55 | /// name: "John Doe", 56 | /// age: 43, 57 | /// phones: 58 | /// - "+44 1234567" 59 | /// - "+44 2345678" 60 | /// ``` 61 | /// 62 | /// - JSON 63 | /// 64 | /// ```json 65 | /// "configuration": { 66 | /// "name": "John Doe", 67 | /// "age": 43, 68 | /// "phones": [ 69 | /// "+44 1234567", 70 | /// "+44 2345678" 71 | /// ] 72 | /// } 73 | /// ``` 74 | // 75 | // NOTE: we take the `serde_json` representation because: 76 | // - JSON is the most supported representation when going online, 77 | // - a `serde_json::Value` can be converted to a `serde_yaml::Value` whereas the opposite is not true (YAML introduces 78 | // "tags" which are not supported by JSON). 79 | #[derive(Default, Deserialize, Debug, Serialize, Clone, PartialEq, Eq)] 80 | pub struct Configuration(serde_json::Value); 81 | 82 | impl Deref for Configuration { 83 | type Target = serde_json::Value; 84 | 85 | fn deref(&self) -> &Self::Target { 86 | &self.0 87 | } 88 | } 89 | 90 | impl DerefMut for Configuration { 91 | fn deref_mut(&mut self) -> &mut Self::Target { 92 | &mut self.0 93 | } 94 | } 95 | 96 | impl IMergeOverwrite for Configuration { 97 | fn merge_overwrite(self, other: Self) -> Self { 98 | if self == Configuration::default() { 99 | return other; 100 | } 101 | 102 | if other == Configuration::default() { 103 | return self; 104 | } 105 | 106 | match (self.as_object(), other.as_object()) { 107 | (Some(this), Some(other)) => { 108 | let mut other = other.clone(); 109 | let mut this = this.clone(); 110 | 111 | other.append(&mut this); 112 | Configuration(other.into()) 113 | } 114 | (_, _) => unreachable!( 115 | "We are checking, when deserialising, that a Configuration is a JSON object." 116 | ), 117 | } 118 | } 119 | } 120 | 121 | impl From for Configuration { 122 | fn from(value: serde_json::Value) -> Self { 123 | Self(value) 124 | } 125 | } 126 | 127 | #[cfg(test)] 128 | mod tests { 129 | use serde_json::json; 130 | 131 | use super::*; 132 | 133 | #[test] 134 | fn test_merge_configurations() { 135 | let global = Configuration(json!({ "a": { "nested": true }, "b": ["an", "array"] })); 136 | let local = Configuration(json!({ "a": { "not-nested": false }, "c": 1 })); 137 | 138 | assert_eq!( 139 | global.clone().merge_overwrite(local), 140 | Configuration(json!({ "a": { "nested": true }, "b": ["an", "array"], "c": 1 })) 141 | ); 142 | 143 | assert_eq!( 144 | global, 145 | global.clone().merge_overwrite(Configuration::default()) 146 | ); 147 | assert_eq!( 148 | global, 149 | Configuration::default().merge_overwrite(global.clone()) 150 | ); 151 | assert_eq!( 152 | Configuration::default(), 153 | Configuration::default().merge_overwrite(Configuration::default()) 154 | ) 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /zenoh-flow-commons/src/deserialize.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | //! This module exposes the functions [deserialize_size] and [deserialize_time] that are used 16 | //! throughout Zenoh-Flow to "parse" values used to express time or size. 17 | //! 18 | //! The external crates [bytesize] and [humantime] are leveraged for these purposes. 19 | 20 | use std::{str::FromStr, sync::Arc}; 21 | 22 | use serde::Deserializer; 23 | use zenoh_keyexpr::OwnedKeyExpr; 24 | 25 | /// Deserialise, from a String, an `Arc` that is guaranteed to be a valid Zenoh-Flow [NodeId](crate::NodeId) or 26 | /// [PortId](crate::PortId). 27 | /// 28 | /// # Errors 29 | /// 30 | /// The deserialisation will fail if: 31 | /// - the String is empty, 32 | /// - the String contains any of the symbols: * # $ ? > 33 | /// - the String is not a valid Zenoh key expression in its canonical form (see [autocanonize]). 34 | /// 35 | /// [autocanonize]: zenoh_keyexpr::OwnedKeyExpr::autocanonize 36 | pub fn deserialize_id<'de, D>(deserializer: D) -> std::result::Result, D::Error> 37 | where 38 | D: Deserializer<'de>, 39 | { 40 | let id: String = serde::de::Deserialize::deserialize(deserializer)?; 41 | if id.contains(['*', '#', '$', '?', '>']) { 42 | return Err(serde::de::Error::custom(format!( 43 | r#" 44 | Identifiers (for nodes or ports) in Zenoh-Flow must *not* contain any of the characters: '*', '#', '$', '?', '>'. 45 | The identifier < {} > does not satisfy that condition. 46 | 47 | These characters, except for '>', have a special meaning in Zenoh and they could negatively impact Zenoh-Flow's 48 | behaviour. 49 | 50 | The character '>' is used as a separator when flattening a composite operator. Allowing it could also negatively impact 51 | Zenoh-Flow's behaviour. 52 | "#, 53 | id 54 | ))); 55 | } 56 | 57 | OwnedKeyExpr::autocanonize(id.clone()).map_err(|e| { 58 | serde::de::Error::custom(format!( 59 | r#" 60 | Identifiers (for nodes or ports) in Zenoh-Flow *must* be valid key-expressions in their canonical form. 61 | The identifier < {} > does not satisfy that condition. 62 | 63 | Caused by: 64 | {:?} 65 | "#, 66 | id, e 67 | )) 68 | })?; 69 | 70 | Ok(id.into()) 71 | } 72 | 73 | /// Deserialise a bytes size leveraging the [bytesize] crate. 74 | /// 75 | /// This allows parsing, for instance, "1Ko" into "1024" bytes. For more example, see the [bytesize] crate. 76 | /// 77 | /// # Errors 78 | /// 79 | /// See the [bytesize] documentation. 80 | pub fn deserialize_size<'de, D>(deserializer: D) -> std::result::Result 81 | where 82 | D: Deserializer<'de>, 83 | { 84 | let size_str: String = serde::de::Deserialize::deserialize(deserializer)?; 85 | let size_u64 = bytesize::ByteSize::from_str(&size_str) 86 | .map_err(|e| { 87 | serde::de::Error::custom(format!( 88 | "Unable to parse value as bytes {size_str}:\n{:?}", 89 | e 90 | )) 91 | })? 92 | .as_u64(); 93 | 94 | usize::try_from(size_u64).map_err(|e| serde::de::Error::custom(format!( 95 | "Unable to convert < {} > into a `usize`. Maybe check the architecture of the target device?\n{:?}", 96 | size_u64, e 97 | ))) 98 | } 99 | 100 | /// Deserialise a duration in *microseconds* leveraging the [humantime] crate. 101 | /// 102 | /// This allows parsing, for instance, "1ms" as 1000 microseconds. 103 | /// 104 | /// # Errors 105 | /// 106 | /// See the [humantime] documentation. 107 | pub fn deserialize_time<'de, D>(deserializer: D) -> std::result::Result 108 | where 109 | D: Deserializer<'de>, 110 | { 111 | let buf: &str = serde::de::Deserialize::deserialize(deserializer)?; 112 | let time_u128 = buf 113 | .parse::() 114 | .map_err(serde::de::Error::custom)? 115 | .as_micros(); 116 | 117 | u64::try_from(time_u128).map_err(|e| { 118 | serde::de::Error::custom(format!( 119 | "Unable to convert < {} > into a `u64`. Maybe lower the value?\n{:?}", 120 | time_u128, e 121 | )) 122 | }) 123 | } 124 | 125 | #[cfg(test)] 126 | mod tests { 127 | use serde::Deserialize; 128 | 129 | use crate::NodeId; 130 | 131 | #[derive(Deserialize, Debug)] 132 | pub struct TestStruct { 133 | pub id: NodeId, 134 | } 135 | 136 | #[test] 137 | fn test_deserialize_id() { 138 | let json_str = r#" 139 | { 140 | "id": "my//chunk" 141 | } 142 | "#; 143 | assert!(serde_json::from_str::(json_str).is_err()); 144 | 145 | let json_str = r#" 146 | { 147 | "id": "my*chunk" 148 | } 149 | "#; 150 | assert!(serde_json::from_str::(json_str).is_err()); 151 | 152 | let json_str = r##" 153 | { 154 | "id": "#chunk" 155 | } 156 | "##; 157 | assert!(serde_json::from_str::(json_str).is_err()); 158 | 159 | let json_str = r#" 160 | { 161 | "id": "?chunk" 162 | } 163 | "#; 164 | assert!(serde_json::from_str::(json_str).is_err()); 165 | 166 | let json_str = r#" 167 | { 168 | "id": "$chunk" 169 | } 170 | "#; 171 | assert!(serde_json::from_str::(json_str).is_err()); 172 | 173 | let json_str = r#" 174 | { 175 | "id": "my>chunk" 176 | } 177 | "#; 178 | assert!(serde_json::from_str::(json_str).is_err()); 179 | 180 | let json_str = r#" 181 | { 182 | "id": "my/chunk/is/alright" 183 | } 184 | "#; 185 | assert!(serde_json::from_str::(json_str).is_ok()); 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /zenoh-flow-commons/src/identifiers.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::{fmt::Display, ops::Deref, str::FromStr, sync::Arc}; 16 | 17 | use anyhow::anyhow; 18 | use serde::{Deserialize, Serialize}; 19 | use uuid::Uuid; 20 | use zenoh_config::ZenohId; 21 | 22 | use crate::deserialize::deserialize_id; 23 | 24 | /// A `NodeId` uniquely identifies a Node within a data flow. 25 | /// 26 | /// A `NodeId` additionally satisfies the following constraints: 27 | /// - it does *not* contain any of the symbols: * # $ ? > 28 | /// - it is a valid [canonical Zenoh key expression](zenoh_keyexpr::OwnedKeyExpr::autocanonize). 29 | /// 30 | /// # Performance 31 | /// 32 | /// A `NodeId` is encapsulated in an [Arc] rendering clone operations inexpensive. 33 | #[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Clone, Hash)] 34 | pub struct NodeId(#[serde(deserialize_with = "deserialize_id")] Arc); 35 | 36 | impl Deref for NodeId { 37 | type Target = Arc; 38 | 39 | fn deref(&self) -> &Self::Target { 40 | &self.0 41 | } 42 | } 43 | 44 | impl Display for NodeId { 45 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 46 | write!(f, "{}", &self.0) 47 | } 48 | } 49 | 50 | impl From for NodeId { 51 | fn from(value: String) -> Self { 52 | Self(value.into()) 53 | } 54 | } 55 | 56 | impl From<&str> for NodeId { 57 | fn from(value: &str) -> Self { 58 | Self(value.into()) 59 | } 60 | } 61 | 62 | /// A `PortId` identifies an `Input` or an `Output` of a Node. 63 | /// 64 | /// A `PortId` additionally satisfies the following constraints: 65 | /// - it does *not* contain any of the symbols: * # $ ? > 66 | /// - it is a valid [canonical Zenoh key expression](zenoh_keyexpr::OwnedKeyExpr::autocanonize). 67 | /// 68 | /// # Uniqueness 69 | /// 70 | /// A `PortId` does not need to be unique within a data flow. It should only be unique among the ports of the same node 71 | /// and of the same type (i.e. `Input` or `Output`). 72 | /// For instance, a node can have an `Input` and an `Output` with the same `PortId`. 73 | /// 74 | /// # Performance 75 | /// 76 | /// A `PortId` is encapsulated in an [Arc] rendering clone operations inexpensive. 77 | #[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize)] 78 | pub struct PortId(#[serde(deserialize_with = "deserialize_id")] Arc); 79 | 80 | impl Deref for PortId { 81 | type Target = Arc; 82 | 83 | fn deref(&self) -> &Self::Target { 84 | &self.0 85 | } 86 | } 87 | 88 | impl Display for PortId { 89 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 90 | write!(f, "{}", &self.0) 91 | } 92 | } 93 | 94 | impl From for PortId { 95 | fn from(value: String) -> Self { 96 | Self(value.into()) 97 | } 98 | } 99 | 100 | impl From<&str> for PortId { 101 | fn from(value: &str) -> Self { 102 | Self(value.into()) 103 | } 104 | } 105 | 106 | /// A `RuntimeId` uniquely identifies a Zenoh-Flow runtime within a Zenoh network. 107 | /// 108 | /// The `RuntimeId` structure simply wraps a [ZenohId]. Similar to a Uuid, this identifier is (with a high probability) 109 | /// guaranteed to be unique within your infrastructure. 110 | /// 111 | /// A Zenoh-Flow runtime will, by default, reuse the [ZenohId] of the Zenoh 112 | /// [session](https://docs.rs/zenoh/0.10.1-rc/zenoh/struct.Session.html) it will create to connect to the Zenoh network. 113 | #[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize, Default)] 114 | #[repr(transparent)] 115 | pub struct RuntimeId(ZenohId); 116 | 117 | impl RuntimeId { 118 | /// Generate a new random identifier, guaranteed (with a high probability) to be unique. 119 | pub fn rand() -> Self { 120 | // NOTE: The `Default` trait implementation internally calls `rand()`. 121 | Self(ZenohId::default()) 122 | } 123 | } 124 | 125 | impl Display for RuntimeId { 126 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 127 | write!(f, "{}", &self.0) 128 | } 129 | } 130 | 131 | impl Deref for RuntimeId { 132 | type Target = ZenohId; 133 | 134 | fn deref(&self) -> &Self::Target { 135 | &self.0 136 | } 137 | } 138 | 139 | impl From for RuntimeId { 140 | fn from(value: ZenohId) -> Self { 141 | Self(value) 142 | } 143 | } 144 | 145 | impl FromStr for RuntimeId { 146 | type Err = anyhow::Error; 147 | 148 | fn from_str(s: &str) -> Result { 149 | Ok(ZenohId::from_str(s) 150 | .map_err(|e| anyhow!("Failed to parse < {} > as a valid ZenohId:\n{:?}", s, e))? 151 | .into()) 152 | } 153 | } 154 | 155 | /// An `InstanceId` uniquely identifies a data flow instance. 156 | /// 157 | /// A data flow instance is created every time Zenoh-Flow is tasked to run a data flow. Each instance of the same data 158 | /// flow will have a different `InstanceId`. 159 | /// 160 | /// Internally, it uses a [Uuid v4](uuid::Uuid::new_v4) that it wraps inside an [Arc]. This allows for inexpensive 161 | /// `clone` operations. 162 | #[derive(Debug, Clone, Hash, PartialEq, Eq, Deserialize, Serialize)] 163 | pub struct InstanceId(Arc); 164 | 165 | impl Display for InstanceId { 166 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 167 | write!(f, "{}", &self.0) 168 | } 169 | } 170 | 171 | impl From for InstanceId { 172 | fn from(value: Uuid) -> Self { 173 | Self(Arc::new(value)) 174 | } 175 | } 176 | 177 | impl Deref for InstanceId { 178 | type Target = Uuid; 179 | 180 | fn deref(&self) -> &Self::Target { 181 | &self.0 182 | } 183 | } 184 | -------------------------------------------------------------------------------- /zenoh-flow-commons/src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | //! This crate centralises structures that are shared across Zenoh-Flow. 16 | //! 17 | //! ⚠️ This crate is intended for internal usage within Zenoh-Flow. All structures that are exposed in public 18 | //! facing API are re-exposed in the relevant crates. 19 | 20 | mod configuration; 21 | pub use configuration::Configuration; 22 | 23 | mod deserialize; 24 | pub use deserialize::deserialize_id; 25 | 26 | mod identifiers; 27 | pub use identifiers::{InstanceId, NodeId, PortId, RuntimeId}; 28 | 29 | mod merge; 30 | pub use merge::IMergeOverwrite; 31 | 32 | mod shared_memory; 33 | pub use shared_memory::SharedMemoryConfiguration; 34 | 35 | mod utils; 36 | pub use utils::try_parse_from_file; 37 | 38 | mod vars; 39 | pub use vars::{parse_vars, Vars}; 40 | 41 | /// Zenoh-Flow's result type. 42 | pub type Result = std::result::Result; 43 | -------------------------------------------------------------------------------- /zenoh-flow-commons/src/merge.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | /// Types which can be combined with another instance of the same type and for which, in case there are common elements, 16 | /// the elements of `self` would be kept over those held by `other`. 17 | /// 18 | /// For instance, a map-like type with keys shared by both would see the associated values in `self` preserved. 19 | /// 20 | /// This trait is leveraged in Zenoh-Flow for the [Configuration](crate::Configuration) and the [Vars](crate::Vars). 21 | pub trait IMergeOverwrite { 22 | fn merge_overwrite(self, other: Self) -> Self; 23 | } 24 | -------------------------------------------------------------------------------- /zenoh-flow-commons/src/shared_memory.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::fmt::Display; 16 | 17 | use serde::{Deserialize, Serialize}; 18 | 19 | use crate::deserialize::{deserialize_size, deserialize_time}; 20 | 21 | /// Structure to configure how Zenoh-Flow uses the [shared memory](https://docs.rs/zenoh-shm/0.10.1-rc/zenoh_shm/) 22 | /// feature provided by Zenoh. 23 | /// 24 | /// This configuration is applied on a link basis 25 | /// 26 | /// A Zenoh-Flow runtime can be configured to always attempt to send data through shared-memory first. When this feature 27 | /// is enabled this structure allows tweaking two aspects: (i) the size of the shared memory buffer Zenoh should 28 | /// allocate and (ii) the back-off period. 29 | #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash)] 30 | pub struct SharedMemoryConfiguration { 31 | /// Size, converted in bytes, of the entire shared memory buffer. 32 | #[serde(deserialize_with = "deserialize_size")] 33 | pub size: usize, 34 | /// Duration, converted in nanoseconds, to wait before retrying the last operation. 35 | #[serde(deserialize_with = "deserialize_time")] 36 | pub backoff: u64, 37 | } 38 | 39 | // TODO@J-Loudet 40 | impl Display for SharedMemoryConfiguration { 41 | fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 42 | todo!() 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /zenoh-flow-commons/src/utils.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::{ 16 | io::Read, 17 | path::{Path, PathBuf}, 18 | }; 19 | 20 | use anyhow::{bail, Context}; 21 | use handlebars::Handlebars; 22 | use serde::Deserialize; 23 | 24 | use crate::{IMergeOverwrite, Result, Vars}; 25 | 26 | /// Given the [Path] of a file, return the function we should call to deserialize an instance of 27 | /// `N`. 28 | /// 29 | /// This function will look at the extension of the [Path] to decide on a deserializer. 30 | /// 31 | /// # Errors 32 | /// 33 | /// This function will fail if the extension of the [Path] is not supported. For now, the only 34 | /// supported extensions are: 35 | /// - ".yml" 36 | /// - ".yaml" 37 | /// - ".json" 38 | pub(crate) fn deserializer(path: &PathBuf) -> Result Result> 39 | where 40 | N: for<'a> Deserialize<'a>, 41 | { 42 | match path.extension().and_then(|ext| ext.to_str()) { 43 | Some("json") => Ok(|buf| { 44 | serde_json::from_str::(buf) 45 | .context(format!("Failed to deserialize from JSON:\n{}", buf)) 46 | }), 47 | Some("yml") | Some("yaml") => Ok(|buf| { 48 | serde_yaml::from_str::(buf) 49 | .context(format!("Failed to deserialize from YAML:\n{}", buf)) 50 | }), 51 | Some(extension) => bail!( 52 | r#" 53 | Unsupported file extension < {} > in: 54 | {:?} 55 | 56 | Currently supported file extensions are: 57 | - .json 58 | - .yml 59 | - .yaml 60 | "#, 61 | extension, 62 | path 63 | ), 64 | None => bail!("Missing file extension in path:\n{}", path.display()), 65 | } 66 | } 67 | 68 | /// Attempts to parse an instance of `N` from the content of the file located at `path`, overwriting 69 | /// (or complementing) the [Vars] declared in said file with the provided `vars`. 70 | /// 71 | /// This function is notably used to parse a data flow descriptor. Two file types are supported, 72 | /// identified by their extension: 73 | /// - JSON (`.json` file extension) 74 | /// - YAML (`.yaml` or `.yml` extensions) 75 | /// 76 | /// This function does not impose writing *all* descriptor file(s), within the same data flow, in 77 | /// the same format. 78 | /// 79 | /// # Errors 80 | /// 81 | /// The parsing can fail for several reasons (listed in sequential order): 82 | /// - the OS failed to [canonicalize](std::fs::canonicalize()) the path of the file, 83 | /// - the OS failed to open (in read mode) the file, 84 | /// - the extension of the file is not supported by Zenoh-Flow (i.e. it's neither a YAML file or a 85 | /// JSON file), 86 | /// - parsing the [Vars] section failed (if there is one), 87 | /// - expanding the variables located in the [Vars] section failed (if there are any) --- see the 88 | /// documentation [handlebars] for a more complete list of reasons, 89 | /// - parsing an instance of `N` failed. 90 | pub fn try_parse_from_file(path: impl AsRef, vars: Vars) -> Result<(N, Vars)> 91 | where 92 | N: for<'a> Deserialize<'a>, 93 | { 94 | let path_buf = std::fs::canonicalize(path.as_ref()).context(format!( 95 | "Failed to canonicalize path (did you put an absolute path?):\n{}", 96 | path.as_ref().to_string_lossy() 97 | ))?; 98 | 99 | let mut buf = String::default(); 100 | std::fs::File::open(&path_buf) 101 | .context(format!("Failed to open file:\n{}", path_buf.display()))? 102 | .read_to_string(&mut buf) 103 | .context(format!( 104 | "Failed to read the content of file:\n{}", 105 | path_buf.display() 106 | ))?; 107 | 108 | let mut merged_vars = vars; 109 | match deserializer::(&path_buf)?(&buf) { 110 | Ok(parsed_vars) => { 111 | merged_vars = merged_vars.merge_overwrite(parsed_vars); 112 | } 113 | Err(e) => { 114 | // NOTE: Maybe the deserialisation fails because there is no `Vars` section. This is not 115 | // necessarily an issue, hence the level "debug" for the log. 116 | tracing::debug!("Could not parse Vars"); 117 | tracing::trace!("{e:?}"); 118 | tracing::trace!("Maybe the above error is normal as there is no `Vars` section?") 119 | } 120 | } 121 | 122 | let mut handlebars = Handlebars::new(); 123 | handlebars.set_strict_mode(true); 124 | 125 | let rendered_descriptor = handlebars 126 | // NOTE: We have to dereference `merged_vars` (this: `&(*merged_vars)`) and pass the 127 | // contained `HashMap` such that `handlebars` can correctly manipulate it. 128 | // 129 | // We have to have this indirection in the structure such that `serde` can correctly 130 | // deserialise the descriptor. 131 | .render_template(buf.as_str(), &(*merged_vars)) 132 | .context("Failed to expand descriptor")?; 133 | 134 | Ok(( 135 | (deserializer::(&path_buf))?(&rendered_descriptor) 136 | .context(format!("Failed to deserialize {}", &path_buf.display()))?, 137 | merged_vars, 138 | )) 139 | } 140 | -------------------------------------------------------------------------------- /zenoh-flow-commons/src/vars.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::{collections::HashMap, error::Error, ops::Deref, rc::Rc}; 16 | 17 | use serde::{Deserialize, Serialize}; 18 | 19 | use crate::IMergeOverwrite; 20 | 21 | /// `Vars` is an internal structure that we use to expand the "moustache variables" in a descriptor file. 22 | /// 23 | /// Moustache variables take the form: `{{ var }}` where the number of spaces after the `{{` and before the `}}` do 24 | /// not matter. 25 | /// 26 | /// We first parse the descriptor file to only extract the `vars` section and build a `HashMap` out of it. 27 | /// 28 | /// We then load the descriptor file as a template and "render" it, substituting every "moustache variable" with its 29 | /// corresponding value in the HashMap. 30 | /// 31 | /// # Declaration, propagation and merging 32 | /// 33 | /// Zenoh-Flow allows users to declare `vars` at 3 locations: 34 | /// - at the top-level of a data flow descriptor, 35 | /// - at the top-level of a composite operator descriptor, 36 | /// - at the top-level of a node descriptor (not contained within a data flow or composite operator descriptor). 37 | /// 38 | /// The `vars` are propagated to all "contained" descriptors. For instance, a data flow descriptor that references a 39 | /// composite operator whose descriptor resides in a separate file will have its `vars` propagated there. 40 | /// 41 | /// At the same time, if a "contained" descriptor also has a `vars` section, that section will be merged and all 42 | /// duplicated keys overwritten with the values of the "container" descriptor. 43 | /// 44 | /// This allows defining default values for substitutions in leaf descriptors and overwriting them if necessary. 45 | /// 46 | /// # Example (YAML) 47 | /// 48 | /// Declaration within a descriptor: 49 | /// 50 | /// ```yaml 51 | /// vars: 52 | /// BUILD: debug 53 | /// DLL_EXT: so 54 | /// ``` 55 | /// 56 | /// Its usage within the descriptor: 57 | /// 58 | /// ```yaml 59 | /// sources: 60 | /// - id: my-source 61 | /// library: "file:///zenoh-flow/target/{{ BUILD }}/libmy_source.{{ DLL_EXT }}" 62 | /// ``` 63 | #[derive(Debug, Default, Clone, Serialize, Deserialize, PartialEq)] 64 | pub struct Vars { 65 | #[serde(default)] 66 | vars: Rc, Rc>>, 67 | } 68 | 69 | impl Deref for Vars { 70 | type Target = HashMap, Rc>; 71 | 72 | fn deref(&self) -> &Self::Target { 73 | &self.vars 74 | } 75 | } 76 | 77 | impl IMergeOverwrite for Vars { 78 | fn merge_overwrite(self, other: Self) -> Self { 79 | let mut merged = (*other.vars).clone(); 80 | merged.extend((*self.vars).clone()); 81 | 82 | Self { 83 | vars: Rc::new(merged), 84 | } 85 | } 86 | } 87 | 88 | impl, U: AsRef, const N: usize> From<[(T, U); N]> for Vars { 89 | fn from(value: [(T, U); N]) -> Self { 90 | Self { 91 | vars: Rc::new( 92 | value 93 | .into_iter() 94 | .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())) 95 | .collect::, Rc>>(), 96 | ), 97 | } 98 | } 99 | } 100 | 101 | impl, U: AsRef> From> for Vars { 102 | fn from(value: Vec<(T, U)>) -> Self { 103 | Self { 104 | vars: Rc::new( 105 | value 106 | .into_iter() 107 | .map(|(k, v)| (k.as_ref().into(), v.as_ref().into())) 108 | .collect::, Rc>>(), 109 | ), 110 | } 111 | } 112 | } 113 | 114 | /// Parse a single [Var](Vars) from a string of the format "KEY=VALUE". 115 | /// 116 | /// Note that if several "=" characters are present in the string, only the first one will be considered as a separator 117 | /// and the others will be treated as being part of the VALUE. 118 | /// 119 | /// # Errors 120 | /// 121 | /// This function will return an error if no "=" character was found. 122 | pub fn parse_vars( 123 | s: &str, 124 | ) -> std::result::Result<(T, U), Box> 125 | where 126 | T: std::str::FromStr, 127 | T::Err: Error + Send + Sync + 'static, 128 | U: std::str::FromStr, 129 | U::Err: Error + Send + Sync + 'static, 130 | { 131 | let pos = s 132 | .find('=') 133 | .ok_or_else(|| format!("invalid KEY=value: no `=` found in `{s}`"))?; 134 | Ok((s[..pos].parse()?, s[pos + 1..].parse()?)) 135 | } 136 | -------------------------------------------------------------------------------- /zenoh-flow-daemon/Cargo.toml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2021 ZettaScale Technology 3 | # 4 | # This program and the accompanying materials are made available under the 5 | # terms of the Eclipse Public License 2.0 which is available at 6 | # http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | # which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | # 11 | # Contributors: 12 | # ZettaScale Zenoh Team, 13 | # 14 | 15 | [package] 16 | authors = { workspace = true } 17 | categories = { workspace = true } 18 | description = "The Zenoh-Flow daemon exposes a Zenoh-Flow runtime on Zenoh" 19 | edition = { workspace = true } 20 | homepage = { workspace = true } 21 | license = { workspace = true } 22 | name = "zenoh-flow-daemon" 23 | repository = { workspace = true } 24 | version = { workspace = true } 25 | 26 | [dependencies] 27 | anyhow = { workspace = true } 28 | async-std = { workspace = true } 29 | flume = { workspace = true } 30 | futures = { workspace = true } 31 | log = { workspace = true } 32 | serde = { workspace = true } 33 | serde_json = { workspace = true } 34 | sysinfo = "0.30.5" 35 | tracing = { workspace = true } 36 | tracing-subscriber = { workspace = true } 37 | uhlc = { workspace = true } 38 | uuid = { workspace = true } 39 | zenoh = { workspace = true } 40 | zenoh-flow-commons = { workspace = true } 41 | zenoh-flow-descriptors = { workspace = true } 42 | zenoh-flow-records = { workspace = true } 43 | zenoh-flow-runtime = { workspace = true } 44 | 45 | [features] 46 | default = [] 47 | plugin = [] 48 | 49 | [dev-dependencies] 50 | serde_yaml = { workspace = true } 51 | zenoh-flow-runtime = { workspace = true, features = ["test-utils"] } 52 | -------------------------------------------------------------------------------- /zenoh-flow-daemon/src/daemon/configuration.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use serde::Deserialize; 16 | use zenoh_flow_runtime::Extensions; 17 | 18 | /// The configuration of a Zenoh-Flow Daemon. 19 | #[derive(Deserialize, Debug)] 20 | pub struct ZenohFlowConfiguration { 21 | /// A human-readable name for this Daemon and its embedded Runtime. 22 | pub name: String, 23 | /// Additionally supported [Extensions]. 24 | pub extensions: Option, 25 | } 26 | -------------------------------------------------------------------------------- /zenoh-flow-daemon/src/daemon/queryables.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::sync::Arc; 16 | 17 | use anyhow::bail; 18 | use flume::{Receiver, Sender}; 19 | use futures::select; 20 | use zenoh::Session; 21 | use zenoh_flow_commons::Result; 22 | use zenoh_flow_runtime::Runtime; 23 | 24 | use crate::queries::{ 25 | instances::InstancesQuery, runtime::RuntimesQuery, selectors, validate_query, 26 | }; 27 | 28 | /// Spawns an async task to answer queries received on `zenoh-flow/{runtime_id}/instances`. 29 | pub(crate) async fn spawn_instances_queryable( 30 | zenoh_session: Session, 31 | runtime: Arc, 32 | abort_rx: Receiver<()>, 33 | abort_ack_tx: Sender<()>, 34 | ) -> Result<()> { 35 | let ke_instances = selectors::selector_instances(runtime.id()); 36 | let queryable = match zenoh_session.declare_queryable(ke_instances.clone()).await { 37 | Ok(queryable) => { 38 | tracing::trace!("declared queryable: {}", ke_instances); 39 | queryable 40 | } 41 | Err(e) => { 42 | bail!("Failed to declare Zenoh queryable 'instances': {:?}", e) 43 | } 44 | }; 45 | 46 | async_std::task::spawn(async move { 47 | loop { 48 | select!( 49 | _ = abort_rx.recv_async() => { 50 | tracing::trace!("Received abort signal"); 51 | break; 52 | } 53 | 54 | query = queryable.recv_async() => { 55 | match query { 56 | Ok(query) => { 57 | let instance_query: InstancesQuery = match validate_query(&query).await { 58 | Ok(instance_query) => instance_query, 59 | Err(e) => { 60 | tracing::error!("Unable to parse `InstancesQuery`: {:?}", e); 61 | return; 62 | } 63 | }; 64 | 65 | let runtime = runtime.clone(); 66 | async_std::task::spawn(async move { 67 | instance_query.process(query, runtime).await; 68 | }); 69 | } 70 | Err(e) => { 71 | tracing::error!("Queryable 'instances' dropped: {:?}", e); 72 | return; 73 | } 74 | } 75 | } 76 | ) 77 | } 78 | 79 | abort_ack_tx.send_async(()).await.unwrap_or_else(|e| { 80 | tracing::error!("Queryable 'instances' failed to acknowledge abort: {:?}", e); 81 | }); 82 | }); 83 | 84 | Ok(()) 85 | } 86 | 87 | pub(crate) async fn spawn_runtime_queryable( 88 | zenoh_session: Session, 89 | runtime: Arc, 90 | abort_rx: Receiver<()>, 91 | abort_ack_tx: Sender<()>, 92 | ) -> Result<()> { 93 | let ke_runtime = selectors::selector_runtimes(runtime.id()); 94 | 95 | let queryable = match zenoh_session.declare_queryable(ke_runtime.clone()).await { 96 | Ok(queryable) => { 97 | tracing::trace!("declared queryable < {} >", ke_runtime); 98 | queryable 99 | } 100 | Err(e) => { 101 | bail!("Failed to declare Zenoh queryable 'runtimes': {:?}", e) 102 | } 103 | }; 104 | 105 | async_std::task::spawn(async move { 106 | loop { 107 | select!( 108 | _ = abort_rx.recv_async() => { 109 | tracing::trace!("Received abort signal"); 110 | break; 111 | } 112 | 113 | query = queryable.recv_async() => { 114 | match query { 115 | Ok(query) => { 116 | let runtime_query: RuntimesQuery = match validate_query(&query).await { 117 | Ok(runtime_query) => runtime_query, 118 | Err(e) => { 119 | tracing::error!("Unable to parse `RuntimesQuery`: {:?}", e); 120 | return; 121 | } 122 | }; 123 | 124 | let runtime = runtime.clone(); 125 | async_std::task::spawn(async move { 126 | runtime_query.process(query, runtime).await; 127 | }); 128 | } 129 | Err(e) => { 130 | tracing::error!("Queryable 'runtimes' dropped: {:?}", e); 131 | return; 132 | } 133 | } 134 | } 135 | ) 136 | } 137 | 138 | abort_ack_tx.send_async(()).await.unwrap_or_else(|e| { 139 | tracing::error!("Queryable 'runtime' failed to acknowledge abort: {:?}", e); 140 | }); 141 | }); 142 | 143 | Ok(()) 144 | } 145 | -------------------------------------------------------------------------------- /zenoh-flow-daemon/src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | //! This crate provides the Zenoh-Flow Daemon: a wrapper around a Zenoh-Flow [Runtime] that can process requests, made 16 | //! on specific key expressions, to manage data flows. 17 | //! 18 | //! In particular, a Zenoh-Flow Daemon is able to coordinate with other Zenoh-Flow Daemon(s) to manage data flows --- 19 | //! provided that they can reach each other through Zenoh. 20 | //! 21 | //! Therefore, instantiating a data flow only requires communicating with a single Daemon: it will automatically request 22 | //! the other Daemons involved in the deployment to manage their respective nodes. 23 | //! 24 | //! Users interested in integrating a [Daemon] in their system should look into the [spawn()] and [spawn_from_config()] 25 | //! methods. 26 | //! 27 | //! [Daemon]: crate::daemon::Daemon 28 | //! [Runtime]: crate::daemon::Runtime 29 | //! [spawn()]: crate::daemon::Daemon::spawn() 30 | //! [spawn_from_config()]: crate::daemon::Daemon::spawn_from_config() 31 | 32 | pub mod daemon; 33 | pub mod queries; 34 | -------------------------------------------------------------------------------- /zenoh-flow-daemon/src/queries/instances/abort.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::sync::Arc; 16 | 17 | use zenoh::Session; 18 | use zenoh_flow_commons::{InstanceId, RuntimeId}; 19 | use zenoh_flow_runtime::Runtime; 20 | 21 | use super::{InstancesQuery, Origin}; 22 | use crate::queries::selectors; 23 | 24 | pub(crate) fn abort(runtime: Arc, origin: Origin, instance_id: InstanceId) { 25 | async_std::task::spawn(async move { 26 | if matches!(origin, Origin::Client) { 27 | match runtime.try_get_record(&instance_id).await { 28 | Ok(record) => { 29 | query_abort( 30 | runtime.session(), 31 | record 32 | .mapping() 33 | .keys() 34 | .filter(|&runtime_id| runtime_id != runtime.id()), 35 | &instance_id, 36 | ) 37 | .await 38 | } 39 | Err(e) => { 40 | tracing::error!( 41 | "Could not get record of data flow < {} >: {e:?}", 42 | instance_id 43 | ); 44 | return; 45 | } 46 | } 47 | } 48 | 49 | if let Err(e) = runtime.try_abort_instance(&instance_id).await { 50 | tracing::error!("Failed to abort instance < {} >: {:?}", instance_id, e); 51 | } 52 | }); 53 | } 54 | 55 | pub(crate) async fn query_abort( 56 | session: &Session, 57 | runtimes: impl Iterator, 58 | instance_id: &InstanceId, 59 | ) { 60 | let abort_query = match serde_json::to_vec(&InstancesQuery::Abort { 61 | origin: Origin::Daemon, 62 | instance_id: instance_id.clone(), 63 | }) { 64 | Ok(query) => query, 65 | Err(e) => { 66 | tracing::error!( 67 | "serde_json failed to serialize InstancesQuery::Abort: {:?}", 68 | e 69 | ); 70 | return; 71 | } 72 | }; 73 | 74 | for runtime_id in runtimes { 75 | let selector = selectors::selector_instances(runtime_id); 76 | 77 | if let Err(e) = session.get(selector).payload(abort_query.clone()).await { 78 | tracing::error!( 79 | "Sending abort query to runtime < {} > failed with error: {:?}", 80 | runtime_id, 81 | e 82 | ); 83 | } 84 | tracing::trace!("Sent abort query to runtime < {} >", runtime_id); 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /zenoh-flow-daemon/src/queries/instances/delete.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::sync::Arc; 16 | 17 | use async_std::task::JoinHandle; 18 | use zenoh::Session; 19 | use zenoh_flow_commons::{InstanceId, RuntimeId}; 20 | use zenoh_flow_runtime::{DataFlowErr, Runtime}; 21 | 22 | use super::{InstancesQuery, Origin}; 23 | use crate::queries::selectors; 24 | 25 | /// Query all the runtimes to delete the provided data flow instance. 26 | pub(crate) async fn query_delete( 27 | session: &Session, 28 | runtimes: impl Iterator, 29 | instance_id: &InstanceId, 30 | ) { 31 | let delete_query = serde_json::to_vec(&InstancesQuery::Delete { 32 | origin: Origin::Daemon, 33 | instance_id: instance_id.clone(), 34 | }) 35 | .expect("serde_json failed to serialize InstancesQuery::Delete"); 36 | 37 | for runtime_id in runtimes { 38 | let selector = selectors::selector_instances(runtime_id); 39 | 40 | // NOTE: No need to process the request, as, even if the query failed, this is not something we want to recover 41 | // from. 42 | if let Err(e) = session.get(selector).payload(delete_query.clone()).await { 43 | tracing::error!( 44 | "Sending delete query to runtime < {} > failed with error: {:?}", 45 | runtime_id, 46 | e 47 | ); 48 | } 49 | tracing::trace!("Sent delete query to runtime < {} >", runtime_id); 50 | } 51 | } 52 | 53 | /// Deletes the data flow instance. 54 | /// 55 | /// If the query comes from a [Client](Origin::Client) then this daemon will query all the runtimes involved in this 56 | /// instance to make them also delete the data flow instance. 57 | pub(crate) fn delete_instance( 58 | runtime: Arc, 59 | origin: Origin, 60 | instance_id: InstanceId, 61 | ) -> JoinHandle<()> { 62 | async_std::task::spawn(async move { 63 | if matches!(origin, Origin::Client) { 64 | match runtime.try_get_record(&instance_id).await { 65 | Ok(record) => { 66 | query_delete( 67 | runtime.session(), 68 | record 69 | .mapping() 70 | .keys() 71 | .filter(|&runtime_id| runtime_id != runtime.id()), 72 | &instance_id, 73 | ) 74 | .await 75 | } 76 | Err(DataFlowErr::NotFound) => return, 77 | // NOTE: If the data flow is in a failed state we still want to process the delete request but only on 78 | // this runtime. 79 | Err(DataFlowErr::FailedState) => {} 80 | } 81 | } 82 | 83 | if let Err(e) = runtime.try_delete_instance(&instance_id).await { 84 | tracing::error!("Failed to delete instance < {} >: {:?}", instance_id, e); 85 | } 86 | }) 87 | } 88 | -------------------------------------------------------------------------------- /zenoh-flow-daemon/src/queries/mod.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | //! Queries and their response to interact with a Zenoh-Flow [Daemon]. 16 | //! 17 | //! This module exposes the available queries to manage data flow instances and interact with other Zenoh-Flow Daemons. 18 | //! The queries are divided into two sets: 19 | //! - [instances](InstancesQuery) 20 | //! - [runtime](RuntimesQuery) 21 | //! 22 | //! [Daemon]: crate::daemon::Daemon 23 | 24 | pub(crate) mod instances; 25 | pub(crate) mod runtime; 26 | pub(crate) mod selectors; 27 | 28 | use anyhow::{anyhow, bail}; 29 | use serde::Deserialize; 30 | use zenoh::query::Query; 31 | use zenoh_flow_commons::Result; 32 | pub use zenoh_flow_runtime::InstanceStatus; 33 | 34 | pub use self::{ 35 | instances::{InstancesQuery, Origin}, 36 | runtime::{RuntimeInfo, RuntimeStatus, RuntimesQuery}, 37 | selectors::*, 38 | }; 39 | 40 | /// Validate a query and try to deserialize into an instance of `T`. 41 | /// 42 | /// This function checks that the query is correct: 43 | /// - it has a payload, 44 | /// - the encoding is "correct", 45 | /// - the payload can be deserialized into an instance of `T`. 46 | /// 47 | /// If any check fails, an error message is logged and the query is dropped. 48 | /// 49 | /// After these checks, the method `process` is called on the variant of `InstancesQuery`. 50 | pub(crate) async fn validate_query Deserialize<'a>>(query: &Query) -> Result { 51 | let Some(payload) = query.payload() else { 52 | bail!("Received Query with empty payload") 53 | }; 54 | 55 | serde_json::from_slice::(&payload.to_bytes()).map_err(|e| anyhow!("{:?}", e)) 56 | } 57 | -------------------------------------------------------------------------------- /zenoh-flow-daemon/src/queries/runtime.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::{collections::HashMap, sync::Arc}; 16 | 17 | use serde::{Deserialize, Serialize}; 18 | use sysinfo::{CpuRefreshKind, MemoryRefreshKind, RefreshKind}; 19 | use zenoh::query::Query; 20 | use zenoh_flow_commons::{InstanceId, RuntimeId}; 21 | use zenoh_flow_runtime::{InstanceState, Runtime}; 22 | 23 | /// The available interactions with Zenoh-Flow Daemon(s). 24 | #[derive(Debug, Deserialize, Serialize)] 25 | pub enum RuntimesQuery { 26 | /// To list all the reachable Zenoh-Flow Daemon(s). 27 | /// 28 | /// This query will display the name and [unique identifier](RuntimeId) of each Zenoh-Flow Daemon. See the 29 | /// corresponding structure, [RuntimeInfo], for usage within your code. 30 | List, 31 | /// To obtain detailed information about a Zenoh-Flow Daemon and its host. 32 | /// 33 | /// This query will display: 34 | /// - the name of the Zenoh-Flow Daemon, 35 | /// - the number of CPUs of the host, 36 | /// - the quantity of RAM of the host, 37 | /// - the hostname of the host, 38 | /// - the CPU architecture of the host, 39 | /// - the operating system of the host, 40 | /// - the status of all the data flows managed by the Zenoh-Flow Daemon. 41 | /// 42 | /// See the corresponding structure, [RuntimeStatus], for usage within your code. 43 | Status, 44 | } 45 | 46 | /// The answer to a [List] query. 47 | /// 48 | /// [List]: RuntimesQuery::List 49 | #[derive(Debug, Deserialize, Serialize)] 50 | pub struct RuntimeInfo { 51 | pub id: RuntimeId, 52 | pub name: Arc, 53 | } 54 | 55 | /// The answer to a [Status] query. 56 | /// 57 | /// [Status]: RuntimesQuery::Status 58 | #[derive(Debug, Deserialize, Serialize)] 59 | pub struct RuntimeStatus { 60 | pub name: Arc, 61 | pub hostname: Option, 62 | pub architecture: Option, 63 | pub operating_system: Option, 64 | pub cpus: usize, 65 | pub ram_total: u64, 66 | pub data_flows_status: HashMap, InstanceState)>, 67 | } 68 | 69 | impl RuntimesQuery { 70 | pub(crate) async fn process(self, query: Query, runtime: Arc) { 71 | let payload = match self { 72 | RuntimesQuery::List => { 73 | // TODO We could probably try to generate that structure the moment we create the daemon, I don't see 74 | // these values changing at runtime. 75 | let runtime_info = RuntimeInfo { 76 | id: runtime.id().clone(), 77 | name: runtime.name(), 78 | }; 79 | 80 | serde_json::to_vec(&runtime_info) 81 | } 82 | 83 | RuntimesQuery::Status => { 84 | let data_flows_status = runtime.instances_state().await; 85 | 86 | // TODO For better performance, we should initialise this structure once and simply refresh it whenever 87 | // we want to access some data about the machine. 88 | let system = sysinfo::System::new_with_specifics( 89 | RefreshKind::new() 90 | .with_memory(MemoryRefreshKind::new().with_ram()) 91 | .with_cpu(CpuRefreshKind::new()), 92 | ); 93 | 94 | serde_json::to_vec(&RuntimeStatus { 95 | name: runtime.name(), 96 | cpus: system.cpus().len(), 97 | ram_total: system.total_memory(), 98 | data_flows_status, 99 | hostname: sysinfo::System::host_name(), 100 | architecture: sysinfo::System::cpu_arch(), 101 | operating_system: sysinfo::System::name(), 102 | }) 103 | } 104 | }; 105 | 106 | if let Err(e) = match payload { 107 | Ok(payload) => query.reply(query.key_expr(), payload).await, 108 | Err(e) => query.reply_err(e.to_string()).await, 109 | } { 110 | tracing::error!( 111 | r#"Failed to reply to query < {} >: 112 | Caused by: 113 | {:?}"#, 114 | query, 115 | e 116 | ); 117 | } 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /zenoh-flow-daemon/src/queries/selectors.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use zenoh::key_expr::OwnedKeyExpr; 16 | use zenoh_flow_commons::RuntimeId; 17 | 18 | const ZENOH_FLOW: &str = "zenoh-flow"; 19 | const INSTANCES: &str = "instances"; 20 | const RUNTIMES: &str = "runtimes"; 21 | 22 | /// This function generates an [OwnedKeyExpr] from the provided String. 23 | /// 24 | /// # Panic 25 | /// 26 | /// This function will panic if the provided String cannot be transformed into a canonical key expression. See the 27 | /// [documentation of Zenoh's autocanonize](OwnedKeyExpr::autocanonize()) for all the possible scenarios where this 28 | /// could happen. 29 | /// 30 | /// Although panicking seems like a strong posture, we believe this choice strikes a correct balance: we, the Zenoh-Flow 31 | /// team, know the internals of Zenoh and can guarantee the validity of the key expressions we are building. Plus, all 32 | /// the exposed API leveraging this function only accepts [RuntimeId] which is a thin wrapper over ZenohId. 33 | fn autocanonize(maybe_ke: String) -> OwnedKeyExpr { 34 | OwnedKeyExpr::autocanonize(maybe_ke.clone()).unwrap_or_else(|e| { 35 | panic!( 36 | r#"Zenoh-Flow internal error: < {} > is not a valid or canonical key expression 37 | 38 | {e:?}"#, 39 | maybe_ke 40 | ) 41 | }) 42 | } 43 | 44 | /// Helper function to generate an [OwnedKeyExpr] to query the data flow instances managed by a specific Zenoh-Flow 45 | /// runtime. 46 | /// 47 | /// The generated key expression has the following structure: `zenoh-flow//instances` 48 | /// 49 | /// where `` corresponds to the unique identifier of the chosen runtime. 50 | /// 51 | /// # Panic 52 | /// 53 | /// This function will panic in the impossible scenario (although never say never…) where the provided [RuntimeId] would 54 | /// make the key expression not valid or not canonical. 55 | pub fn selector_instances(runtime_id: &RuntimeId) -> OwnedKeyExpr { 56 | autocanonize(format!("{ZENOH_FLOW}/{runtime_id}/{INSTANCES}")) 57 | } 58 | 59 | /// Helper function to generate an [OwnedKeyExpr] to query the data flow instances managed by a Zenoh-Flow Daemon. 60 | /// 61 | /// # Performance 62 | /// 63 | /// As this selector will attempt to reach all the Zenoh-Flow runtime, it is possible that the query will take 64 | /// longer to finish and consume more network resources. 65 | /// 66 | /// # Panic 67 | /// 68 | /// This function will panic in the impossible scenario where the key expression we internally rely on is no longer 69 | /// valid or canonical. 70 | pub fn selector_all_instances() -> OwnedKeyExpr { 71 | autocanonize(format!("{ZENOH_FLOW}/*/{INSTANCES}")) 72 | } 73 | 74 | /// Helper function to generate an [OwnedKeyExpr] to query the provided runtime. 75 | /// 76 | /// The generated key expression has the following structure: `zenoh-flow/{runtime id}/runtime`. 77 | /// 78 | /// where `{runtime id}` corresponds to the unique identifier of the chosen runtime. 79 | /// 80 | /// # Panic 81 | /// 82 | /// This function will panic in the impossible scenario (although never say never…) where the provided [RuntimeId] would 83 | /// make the key expression not valid or not canonical. 84 | pub fn selector_runtimes(runtime_id: &RuntimeId) -> OwnedKeyExpr { 85 | autocanonize(format!("{ZENOH_FLOW}/{runtime_id}/{RUNTIMES}")) 86 | } 87 | 88 | /// Helper function to generate an [OwnedKeyExpr] to query all the reachable Zenoh-Flow runtimes. 89 | /// 90 | /// # Performance 91 | /// 92 | /// As this selector will attempt to reach all the Zenoh-Flow runtime, it is possible that the query will take 93 | /// longer to finish and consume more network resources. 94 | /// 95 | /// # Panic 96 | /// 97 | /// This function will panic in the impossible scenario where the key expression we internally rely on is no longer 98 | /// valid or canonical. 99 | pub fn selector_all_runtimes() -> OwnedKeyExpr { 100 | autocanonize(format!("{ZENOH_FLOW}/*/{RUNTIMES}")) 101 | } 102 | -------------------------------------------------------------------------------- /zenoh-flow-derive/Cargo.toml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2021 ZettaScale Technology 3 | # 4 | # This program and the accompanying materials are made available under the 5 | # terms of the Eclipse Public License 2.0 which is available at 6 | # http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | # which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | # 11 | # Contributors: 12 | # ZettaScale Zenoh Team, 13 | # 14 | 15 | [package] 16 | authors.workspace = true 17 | categories.workspace = true 18 | description.workspace = true 19 | edition.workspace = true 20 | homepage.workspace = true 21 | license.workspace = true 22 | name = "zenoh-flow-derive" 23 | readme.workspace = true 24 | repository.workspace = true 25 | version.workspace = true 26 | 27 | # To build with debug on macros: RUSTFLAGS="-Z macro-backtrace" 28 | 29 | [dependencies] 30 | proc-macro2 = "1.0" 31 | quote = "1.0" 32 | syn = { version = "2", features = ["full"] } 33 | 34 | [dev-dependencies] 35 | async-trait = { workspace = true } 36 | zenoh-flow-nodes = { workspace = true } 37 | 38 | [lib] 39 | proc-macro = true 40 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/Cargo.toml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2021 ZettaScale Technology 3 | # 4 | # This program and the accompanying materials are made available under the 5 | # terms of the Eclipse Public License 2.0 which is available at 6 | # http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | # which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | # 11 | # Contributors: 12 | # ZettaScale Zenoh Team, 13 | # 14 | 15 | [package] 16 | authors = { workspace = true } 17 | categories = { workspace = true } 18 | description = "Internal crate for Zenoh-Flow." 19 | edition = { workspace = true } 20 | homepage = { workspace = true } 21 | license = { workspace = true } 22 | name = "zenoh-flow-descriptors" 23 | repository = { workspace = true } 24 | version = { workspace = true } 25 | 26 | [dependencies] 27 | anyhow = { workspace = true } 28 | serde = { workspace = true } 29 | serde_json = { workspace = true } 30 | serde_yaml = { workspace = true } 31 | tracing = { workspace = true } 32 | url = { workspace = true } 33 | zenoh-flow-commons = { workspace = true } 34 | zenoh-keyexpr = { workspace = true } 35 | 36 | [features] 37 | default = [] 38 | shared-memory = [] 39 | 40 | [dev-dependencies] 41 | uuid = { workspace = true } 42 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/src/flattened/mod.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | pub(crate) mod dataflow; 16 | pub(crate) mod nodes; 17 | pub(crate) mod validator; 18 | 19 | use std::{ 20 | collections::HashMap, 21 | fmt::Display, 22 | hash::Hash, 23 | ops::{Deref, DerefMut}, 24 | }; 25 | 26 | use zenoh_flow_commons::NodeId; 27 | 28 | use crate::{ 29 | nodes::operator::composite::{CompositeInputDescriptor, CompositeOutputDescriptor}, 30 | InputDescriptor, LinkDescriptor, OutputDescriptor, 31 | }; 32 | 33 | /// TODO@J-Loudet documentation? 34 | pub trait ISubstituable { 35 | fn substitute(&mut self, subs: &Substitutions); 36 | } 37 | 38 | impl ISubstituable for LinkDescriptor { 39 | fn substitute(&mut self, subs: &Substitutions) { 40 | if let Some(new_id) = subs.get(&self.from.node) { 41 | self.from.node = new_id.clone(); 42 | } 43 | 44 | if let Some(new_id) = subs.get(&self.to.node) { 45 | self.to.node = new_id.clone(); 46 | } 47 | } 48 | } 49 | 50 | impl ISubstituable for LinkDescriptor { 51 | fn substitute(&mut self, subs: &Substitutions) { 52 | if let Some(new_output) = subs.get(&self.from) { 53 | self.from = new_output.clone(); 54 | } 55 | } 56 | } 57 | 58 | impl ISubstituable for LinkDescriptor { 59 | fn substitute(&mut self, subs: &Substitutions) { 60 | if let Some(new_input) = subs.get(&self.to) { 61 | self.to = new_input.clone(); 62 | } 63 | } 64 | } 65 | 66 | impl ISubstituable for CompositeOutputDescriptor { 67 | fn substitute(&mut self, subs: &Substitutions) { 68 | if let Some(new_id) = subs.get(&self.node) { 69 | self.node = new_id.clone(); 70 | } 71 | } 72 | } 73 | 74 | impl ISubstituable for CompositeInputDescriptor { 75 | fn substitute(&mut self, subs: &Substitutions) { 76 | if let Some(new_id) = subs.get(&self.node) { 77 | self.node = new_id.clone(); 78 | } 79 | } 80 | } 81 | 82 | /// `Substitutions` is an insert only structure that keeps track of all the substitutions to perform. 83 | /// 84 | /// It is leveraged in Zenoh-Flow during the flattening of data flows. 85 | #[derive(Debug, PartialEq, Eq)] 86 | pub struct Substitutions(HashMap); 87 | 88 | impl Default for Substitutions { 89 | fn default() -> Self { 90 | Self(HashMap::new()) 91 | } 92 | } 93 | 94 | impl Deref for Substitutions { 95 | type Target = HashMap; 96 | 97 | fn deref(&self) -> &Self::Target { 98 | &self.0 99 | } 100 | } 101 | 102 | impl DerefMut for Substitutions { 103 | fn deref_mut(&mut self) -> &mut Self::Target { 104 | &mut self.0 105 | } 106 | } 107 | 108 | impl From> for Substitutions { 109 | fn from(value: HashMap) -> Self { 110 | Self(value) 111 | } 112 | } 113 | 114 | impl From<[(T, T); N]> for Substitutions { 115 | fn from(value: [(T, T); N]) -> Self { 116 | Self(HashMap::from(value)) 117 | } 118 | } 119 | 120 | impl Substitutions { 121 | pub fn apply(&self, substituables: &mut [impl ISubstituable]) { 122 | substituables 123 | .iter_mut() 124 | .for_each(|substituable| substituable.substitute(self)) 125 | } 126 | } 127 | 128 | #[derive(Default, Debug, PartialEq, Eq)] 129 | pub struct Patch { 130 | pub subs_inputs: Substitutions, 131 | pub subs_outputs: Substitutions, 132 | } 133 | 134 | impl Patch { 135 | pub fn new( 136 | subs_inputs: Substitutions, 137 | subs_outputs: Substitutions, 138 | ) -> Self { 139 | Self { 140 | subs_inputs, 141 | subs_outputs, 142 | } 143 | } 144 | 145 | pub fn apply(self, links: &mut [LinkDescriptor]) { 146 | self.subs_inputs.apply(links); 147 | self.subs_outputs.apply(links); 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/src/flattened/nodes/mod.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | pub(crate) mod operator; 16 | pub(crate) mod sink; 17 | pub(crate) mod source; 18 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/src/flattened/validator/mod.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::collections::HashSet; 16 | 17 | use anyhow::bail; 18 | use zenoh_flow_commons::{NodeId, PortId, Result}; 19 | 20 | use crate::FlattenedDataFlowDescriptor; 21 | 22 | #[derive(Default)] 23 | pub(crate) struct Validator<'a> { 24 | node_ids: HashSet<&'a NodeId>, 25 | outputs: HashSet<(&'a NodeId, &'a PortId)>, 26 | inputs: HashSet<(&'a NodeId, &'a PortId)>, 27 | } 28 | 29 | impl<'a> Validator<'a> { 30 | pub(crate) fn validate_node_id(&mut self, node_id: &'a NodeId) -> Result<()> { 31 | if !self.node_ids.insert(node_id) { 32 | bail!( 33 | "Two nodes share the same identifier: < {} >. The identifiers must be unique.", 34 | node_id 35 | ); 36 | } 37 | 38 | Ok(()) 39 | } 40 | 41 | pub(crate) fn validate_input(&mut self, node_id: &'a NodeId, input: &'a PortId) -> Result<()> { 42 | if !self.inputs.insert((node_id, input)) { 43 | bail!( 44 | "Node < {} > declares the following input (at least) twice: < {} >", 45 | node_id, 46 | input 47 | ); 48 | } 49 | 50 | Ok(()) 51 | } 52 | 53 | pub(crate) fn validate_output( 54 | &mut self, 55 | node_id: &'a NodeId, 56 | output: &'a PortId, 57 | ) -> Result<()> { 58 | if !self.outputs.insert((node_id, output)) { 59 | bail!( 60 | "Node < {} > declares the following output (at least) twice: < {} >", 61 | node_id, 62 | output 63 | ); 64 | } 65 | 66 | Ok(()) 67 | } 68 | 69 | pub(crate) fn validate(data_flow: &FlattenedDataFlowDescriptor) -> Result<()> { 70 | let mut this = Validator::default(); 71 | 72 | if data_flow.sources.is_empty() { 73 | bail!("A data flow must specify at least ONE Source."); 74 | } 75 | 76 | if data_flow.sinks.is_empty() { 77 | bail!("A data flow must specify at least ONE Sink."); 78 | } 79 | 80 | for flat_source in &data_flow.sources { 81 | this.validate_node_id(&flat_source.id)?; 82 | 83 | for output in flat_source.outputs.iter() { 84 | this.validate_output(&flat_source.id, output)?; 85 | } 86 | } 87 | 88 | for flat_operator in &data_flow.operators { 89 | this.validate_node_id(&flat_operator.id)?; 90 | 91 | for output in flat_operator.outputs.iter() { 92 | this.validate_output(&flat_operator.id, output)?; 93 | } 94 | 95 | for input in flat_operator.inputs.iter() { 96 | this.validate_input(&flat_operator.id, input)?; 97 | } 98 | } 99 | 100 | for flat_sink in &data_flow.sinks { 101 | this.validate_node_id(&flat_sink.id)?; 102 | 103 | for input in flat_sink.inputs.iter() { 104 | this.validate_input(&flat_sink.id, input)?; 105 | } 106 | } 107 | 108 | let mut unused_inputs = this.inputs.clone(); 109 | let mut unused_outputs = this.outputs.clone(); 110 | 111 | for link in data_flow.links.iter() { 112 | if !this.outputs.contains(&(&link.from.node, &link.from.output)) { 113 | bail!( 114 | r#" 115 | The following `from` section of this link does not exist: 116 | {} 117 | 118 | Does the node < {} > exist? 119 | Does it declare an output named < {} >? 120 | "#, 121 | link, 122 | link.from.node, 123 | link.from.output 124 | ); 125 | } 126 | unused_outputs.remove(&(&link.from.node, &link.from.output)); 127 | 128 | if !this.inputs.contains(&(&link.to.node, &link.to.input)) { 129 | bail!( 130 | r#" 131 | The following `to` section of this link does not exist: 132 | {} 133 | 134 | Does the node < {} > exist? 135 | Does it declare an input named < {} >? 136 | "#, 137 | link, 138 | link.to.node, 139 | link.to.input 140 | ); 141 | } 142 | 143 | // Contrary to outputs, there cannot be multiple incoming links pointing to a single input. 144 | if !unused_inputs.remove(&(&link.to.node, &link.to.input)) { 145 | let links = data_flow 146 | .links 147 | .iter() 148 | .filter(|&l| l.to == link.to) 149 | .collect::>(); 150 | 151 | bail!( 152 | r#" 153 | An Input can only receive data from a single Output. 154 | We have detected several links that point the same Input < {} >: 155 | 156 | {:?} 157 | "#, 158 | link.to, 159 | links 160 | ) 161 | } 162 | } 163 | 164 | if !unused_inputs.is_empty() { 165 | let mut error_message = "The following inputs are not connected: ".to_string(); 166 | for (node, input) in unused_inputs { 167 | error_message = format!("{}\n- {}: {}", error_message, node, input); 168 | } 169 | 170 | bail!(error_message); 171 | } 172 | 173 | if !unused_outputs.is_empty() { 174 | let mut error_message = "The following outputs are not connected:".to_string(); 175 | for (node, output) in unused_outputs { 176 | error_message = format!("{}\n- {}: {}", error_message, node, output); 177 | } 178 | 179 | bail!(error_message); 180 | } 181 | 182 | Ok(()) 183 | } 184 | } 185 | 186 | #[cfg(test)] 187 | #[path = "./tests.rs"] 188 | mod tests; 189 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/src/io.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::fmt; 16 | 17 | use serde::{Deserialize, Serialize}; 18 | #[cfg(feature = "shared-memory")] 19 | use zenoh_flow_commons::SharedMemoryConfiguration; 20 | use zenoh_flow_commons::{NodeId, PortId}; 21 | 22 | /// An `InputDescriptor` uniquely describes an Input port of a Zenoh-Flow node. 23 | /// 24 | /// # Example 25 | /// 26 | /// ``` 27 | /// # use zenoh_flow_descriptors::InputDescriptor; 28 | /// # let input_desc = r#" 29 | /// node: Operator 30 | /// input: i-operator 31 | /// # "#; 32 | /// # serde_yaml::from_str::(input_desc).unwrap(); 33 | /// ``` 34 | #[derive(Debug, Hash, Serialize, Deserialize, Clone, PartialEq, Eq)] 35 | pub struct InputDescriptor { 36 | pub node: NodeId, 37 | pub input: PortId, 38 | } 39 | 40 | impl fmt::Display for InputDescriptor { 41 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 42 | f.write_fmt(format_args!("{}.{}", self.node, self.input)) 43 | } 44 | } 45 | 46 | impl InputDescriptor { 47 | pub fn new(node: impl AsRef, input: impl AsRef) -> Self { 48 | Self { 49 | node: node.as_ref().into(), 50 | input: input.as_ref().into(), 51 | } 52 | } 53 | } 54 | 55 | /// An `OutputDescriptor` uniquely describes an Output port of a Zenoh-Flow node. 56 | /// 57 | /// # Example 58 | /// 59 | /// ``` 60 | /// # use zenoh_flow_descriptors::OutputDescriptor; 61 | /// # let output_desc = r#" 62 | /// node: Operator 63 | /// output: o-operator 64 | /// # "#; 65 | /// # serde_yaml::from_str::(output_desc).unwrap(); 66 | /// ``` 67 | #[derive(Debug, Clone, Hash, Serialize, Deserialize, PartialEq, Eq)] 68 | pub struct OutputDescriptor { 69 | pub node: NodeId, 70 | pub output: PortId, 71 | } 72 | 73 | impl fmt::Display for OutputDescriptor { 74 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 75 | f.write_fmt(format_args!("{}.{}", self.node, self.output)) 76 | } 77 | } 78 | 79 | impl OutputDescriptor { 80 | pub fn new(node: impl AsRef, output: impl AsRef) -> Self { 81 | Self { 82 | node: node.as_ref().into(), 83 | output: output.as_ref().into(), 84 | } 85 | } 86 | } 87 | 88 | /// A `LinkDescriptor` describes a link in Zenoh-Flow: a connection from an Output to an Input. 89 | /// 90 | /// A link is composed of: 91 | /// - an [OutputDescriptor], 92 | /// - an [InputDescriptor], 93 | /// - *(optional, disabled by default)* Zenoh shared-memory parameters. 94 | /// 95 | /// # Example 96 | /// 97 | /// The textual representation, in YAML, of a link is as following: 98 | /// ``` 99 | /// # use zenoh_flow_descriptors::LinkDescriptor; 100 | /// # let link_desc = r#" 101 | /// from: 102 | /// node : Operator 103 | /// output : o-operator 104 | /// to: 105 | /// node : Sink 106 | /// input : i-sink 107 | /// # "#; 108 | /// # serde_yaml::from_str::(link_desc).unwrap(); 109 | /// ``` 110 | #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] 111 | pub struct LinkDescriptor { 112 | pub from: OutputDescriptor, 113 | pub to: InputDescriptor, 114 | #[cfg(feature = "shared-memory")] 115 | #[serde(default, alias = "shm", alias = "shared-memory")] 116 | pub shared_memory: Option, 117 | } 118 | 119 | impl std::fmt::Display for LinkDescriptor { 120 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 121 | write!(f, "{} => {}", self.from, self.to) 122 | } 123 | } 124 | 125 | impl LinkDescriptor { 126 | pub fn new(from: OutputDescriptor, to: InputDescriptor) -> Self { 127 | Self { 128 | from, 129 | to, 130 | #[cfg(feature = "shared-memory")] 131 | shared_memory: None, 132 | } 133 | } 134 | 135 | #[cfg(feature = "shared-memory")] 136 | pub fn set_shared_memory(mut self, shm: SharedMemoryConfiguration) -> Self { 137 | self.shared_memory = Some(shm); 138 | self 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | //! This crate centralises the different *descriptors* used in Zenoh-Flow. 16 | //! 17 | //! Descriptors describe the different parts that compose an application: the nodes (Source(s), Operator(s) and 18 | //! Sink(s)), the links (how the nodes are connected) and possibly where they should run. 19 | //! 20 | //! Descriptors are **enforced** by Zenoh-Flow, giving precise control to application developer. 21 | //! 22 | //! All Rust `struct` exposed by this crate implement the [Deserialize](serde::Deserialize) and 23 | //! [Serialize](serde::Serialize) traits. The purpose is to encourage users to describe their application in dedicated 24 | //! files (which are then fed to a Zenoh-Flow runtime to be parsed), which eases separating the integration from the 25 | //! development. 26 | //! 27 | //! The entry point in order to describe -- in a separate file -- a data flow is the [DataFlowDescriptor]. 28 | //! 29 | //! # Note 30 | //! 31 | //! In its current state, Zenoh-Flow does not easily support creating your data flow through code. It is planned in a 32 | //! future release to bring better support for this use-case. 33 | //! 34 | //! Users interested to do so should look into the `Flattened` family of structures, starting with the 35 | //! [FlattenedDataFlowDescriptor]. 36 | 37 | pub(crate) mod dataflow; 38 | pub(crate) mod flattened; 39 | pub(crate) mod io; 40 | pub(crate) mod nodes; 41 | pub(crate) mod uri; 42 | 43 | pub use self::{ 44 | dataflow::DataFlowDescriptor, 45 | flattened::{ 46 | dataflow::FlattenedDataFlowDescriptor, 47 | nodes::{ 48 | operator::FlattenedOperatorDescriptor, 49 | sink::{FlattenedSinkDescriptor, SinkVariant}, 50 | source::{FlattenedSourceDescriptor, SourceVariant}, 51 | }, 52 | }, 53 | io::{InputDescriptor, LinkDescriptor, OutputDescriptor}, 54 | }; 55 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/src/nodes/builtin/mod.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | pub(crate) mod zenoh; 16 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/src/nodes/builtin/zenoh.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::{ 16 | collections::{HashMap, HashSet}, 17 | sync::Arc, 18 | }; 19 | 20 | use serde::{Deserialize, Deserializer, Serialize}; 21 | use zenoh_flow_commons::PortId; 22 | use zenoh_keyexpr::OwnedKeyExpr; 23 | 24 | /// A `ZenohSourceDescriptor` encapsulates one or more subscriber(s). 25 | /// 26 | /// For each key expression provided, an output with the exact same value will be generated. 27 | /// 28 | /// # Caveats: canonical key expressions 29 | /// 30 | /// Zenoh only works with canonical key expressions. Hence, Zenoh-Flow will automatically "convert" the provided key 31 | /// expressions into their canonical form. 32 | /// 33 | /// If two key expressions, for the same sink, match to the same canonical form a warning message will be logged. 34 | /// 35 | /// # Examples 36 | /// 37 | /// ```yaml 38 | /// zenoh-subscribers: 39 | /// "cmd_vel": "rt/*/cmd_vel" 40 | /// "status": "rt/*/status" 41 | /// ``` 42 | #[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] 43 | pub(crate) struct ZenohSourceDescriptor { 44 | pub description: Option>, 45 | #[serde(deserialize_with = "deserialize_canon", alias = "zenoh-subscribers")] 46 | pub subscribers: HashMap, 47 | } 48 | 49 | /// A `ZenohSinkDescriptor` encapsulates one or more publisher(s). 50 | /// 51 | /// For each key expression provided, an output with the exact same value will be generated. 52 | /// 53 | /// # Caveats: canonical key expressions 54 | /// 55 | /// Zenoh only works with canonical key expressions. Hence, Zenoh-Flow will automatically "convert" the provided key 56 | /// expressions into their canonical form. 57 | /// 58 | /// If two key expressions, for the same sink, match to the same canonical form a warning message will be logged. 59 | /// 60 | /// # Examples 61 | /// 62 | /// ```yaml 63 | /// description: My zenoh sink 64 | /// zenoh-publishers: 65 | /// cmd_vel: rt/cmd_vel 66 | /// status: rt/status 67 | /// ``` 68 | #[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] 69 | pub(crate) struct ZenohSinkDescriptor { 70 | pub description: Option>, 71 | #[serde(deserialize_with = "deserialize_canon", alias = "zenoh-publishers")] 72 | pub publishers: HashMap, 73 | } 74 | 75 | // Transforms a HashMap into a HashMap. 76 | fn deserialize_canon<'de, D>( 77 | deserializer: D, 78 | ) -> std::result::Result, D::Error> 79 | where 80 | D: Deserializer<'de>, 81 | { 82 | let key_expressions: HashMap = 83 | serde::de::Deserialize::deserialize(deserializer)?; 84 | let mut h_map = HashMap::with_capacity(key_expressions.len()); 85 | let mut h_set = HashSet::with_capacity(key_expressions.len()); 86 | 87 | for (port_id, key_expr) in key_expressions { 88 | let owned_canon_ke = OwnedKeyExpr::autocanonize(key_expr.clone()).map_err(|e| { 89 | serde::de::Error::custom(format!( 90 | "Failed to autocanonize key expression < {} >:\n{:?}", 91 | key_expr.clone(), 92 | e 93 | )) 94 | })?; 95 | 96 | if !h_set.insert(owned_canon_ke.clone()) { 97 | let (duplicate, _) = h_map 98 | .iter() 99 | .find(|(_, owned_ke)| owned_canon_ke == **owned_ke) 100 | .unwrap(); 101 | tracing::warn!( 102 | r#" 103 | The following two key expressions share the same canonical form ( {} ): 104 | - {} 105 | - {} 106 | 107 | They will thus **both** receive the same publications. 108 | If this is a desired behaviour, you can safely ignore this message. 109 | 110 | For more details, see: 111 | https://github.com/eclipse-zenoh/roadmap/blob/main/rfcs/ALL/Key%20Expressions.md#canon-forms 112 | "#, 113 | owned_canon_ke, 114 | key_expr, 115 | duplicate, 116 | ); 117 | } 118 | 119 | h_map.insert(port_id.into(), owned_canon_ke); 120 | } 121 | 122 | Ok(h_map) 123 | } 124 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/src/nodes/mod.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | pub(crate) mod builtin; 16 | pub(crate) mod operator; 17 | pub(crate) mod sink; 18 | pub(crate) mod source; 19 | 20 | use std::sync::Arc; 21 | 22 | use serde::{Deserialize, Serialize}; 23 | use url::Url; 24 | use zenoh_flow_commons::Configuration; 25 | 26 | #[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] 27 | pub(crate) struct RemoteNodeDescriptor { 28 | pub descriptor: Url, 29 | pub description: Option>, 30 | #[serde(default)] 31 | pub configuration: Configuration, 32 | } 33 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/src/nodes/operator/composite.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::sync::Arc; 16 | 17 | use serde::{Deserialize, Serialize}; 18 | use zenoh_flow_commons::{Configuration, NodeId, PortId}; 19 | 20 | use crate::{ 21 | nodes::operator::OperatorDescriptor, InputDescriptor, LinkDescriptor, OutputDescriptor, 22 | }; 23 | 24 | /// A `CompositeOutputDescriptor` exposes the [Output](OutputDescriptor) of a node. 25 | /// 26 | /// # Example (YAML) 27 | /// 28 | /// ```yaml 29 | /// id: my-composite-output 30 | /// node: my-operator 31 | /// output: out 32 | /// ``` 33 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] 34 | pub(crate) struct CompositeOutputDescriptor { 35 | pub id: PortId, 36 | pub node: NodeId, 37 | pub output: PortId, 38 | } 39 | 40 | impl From for OutputDescriptor { 41 | fn from(value: CompositeOutputDescriptor) -> Self { 42 | Self { 43 | node: value.node, 44 | output: value.output, 45 | } 46 | } 47 | } 48 | 49 | /// A `CompositeInputDescriptor` exposes the [Input](InputDescriptor) of a node. 50 | /// 51 | /// # Example (YAML) 52 | /// 53 | /// ```yaml 54 | /// id: my-composite-input 55 | /// node: my-operator 56 | /// input: in 57 | /// ``` 58 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 59 | pub(crate) struct CompositeInputDescriptor { 60 | pub id: PortId, 61 | pub node: NodeId, 62 | pub input: PortId, 63 | } 64 | 65 | impl From for InputDescriptor { 66 | fn from(value: CompositeInputDescriptor) -> Self { 67 | Self { 68 | node: value.node, 69 | input: value.input, 70 | } 71 | } 72 | } 73 | 74 | /// A `CompositeOperatorDescriptor` groups together one or more Operators in a single descriptor. 75 | /// 76 | /// Its main purpose is to simplify the creation of data flow graphs by allowing this form of grouping. 77 | /// 78 | /// # Examples 79 | /// 80 | /// ```yaml 81 | /// description: CompositeOperator 82 | /// 83 | /// configuration: 84 | /// name: foo 85 | /// 86 | /// operators: 87 | /// - id: InnerOperator1 88 | /// descriptor: file:///home/zenoh-flow/nodes/operator1.yaml 89 | /// 90 | /// - id: InnerOperator2 91 | /// descriptor: file:///home/zenoh-flow/nodes/operator2.yaml 92 | /// 93 | /// links: 94 | /// - from: 95 | /// node: InnerOperator1 96 | /// output: out-2 97 | /// to: 98 | /// node: InnerOperator2 99 | /// input: in-1 100 | /// 101 | /// inputs: 102 | /// - id: CompositeOperator-in 103 | /// node: InnerOperator1 104 | /// input: in-1 105 | /// 106 | /// outputs: 107 | /// - id: CompositeOperator-out 108 | /// node: InnerOperator2 109 | /// output: out-1 110 | /// ``` 111 | #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] 112 | pub(crate) struct CompositeOperatorDescriptor { 113 | pub description: Arc, 114 | pub inputs: Vec, 115 | pub outputs: Vec, 116 | pub operators: Vec, 117 | pub links: Vec, 118 | #[serde(default)] 119 | pub configuration: Configuration, 120 | } 121 | 122 | impl std::fmt::Display for CompositeOperatorDescriptor { 123 | fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 124 | write!(f, "Composite Operator: {}", self.description) 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/src/nodes/operator/mod.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | pub(crate) mod composite; 16 | 17 | use std::sync::Arc; 18 | 19 | use serde::{Deserialize, Serialize}; 20 | use url::Url; 21 | use zenoh_flow_commons::{Configuration, NodeId, PortId}; 22 | 23 | use super::RemoteNodeDescriptor; 24 | 25 | /// An `OperatorDescriptor` uniquely identifies and configures an Operator. 26 | /// 27 | /// Zenoh-Flow supports several ways of declaring a Operator: 28 | /// - by importing a "remote" descriptor (e.g. located in another descriptor file), 29 | /// - with an inline declaration. 30 | /// 31 | /// It is not possible to define an `Operator` inside your code base. This structure was made to be parsed from a 32 | /// configuration file. You should instead use a [FlattenedOperatorDescriptor](crate::FlattenedOperatorDescriptor). 33 | /// 34 | /// # Remote descriptor: composite or custom 35 | /// 36 | /// Specifying a remote descriptor allows including a [CompositeOperatorDescriptor](crate::CompositeOperatorDescriptor): 37 | /// the composition of several Operators. 38 | /// 39 | /// Manually describing a composite operator is not supported as it defeats its primary purpose: simplifying the 40 | /// creation of a data flow. 41 | /// 42 | /// # ⚠️ Caveat: `NodeId` and `PortId` 43 | /// 44 | /// Zenoh-Flow identifiers cannot contain certain special characters as it could prevent creating valid Zenoh 45 | /// key-expressions. The list can be found [here](zenoh_flow_commons::deserialize_id). 46 | /// 47 | /// 48 | /// # Examples 49 | /// 50 | /// ## Remote descriptor 51 | /// 52 | /// ⚠️ For now only the `file://` schema is supported. We are planning to support other protocols in future releases of 53 | /// Zenoh-Flow. 54 | /// 55 | /// ```yaml 56 | /// id: my-operator-1 57 | /// descriptor: file:///home/zenoh-flow/my-operator.yaml 58 | /// configuration: 59 | /// answer: 1 60 | /// ``` 61 | /// 62 | /// With the file at `/home/zenoh-flow/my-operator.yaml` containing: 63 | /// ```yaml 64 | /// description: This is my Operator 65 | /// library: file:///home/zenoh-flow/libmy_operator.so 66 | /// inputs: 67 | /// - in-1 68 | /// outputs: 69 | /// - out-1 70 | /// ``` 71 | /// 72 | /// ## Inline declaration: custom operator 73 | /// 74 | /// ```yaml 75 | /// id: my-operator-1 76 | /// description: This is my Operator 77 | /// library: file:///home/zenoh-flow/libmy_operator.so 78 | /// inputs: 79 | /// - in-1 80 | /// outputs: 81 | /// - out-1 82 | /// configuration: 83 | /// answer: 1 84 | /// ``` 85 | #[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] 86 | pub(crate) struct OperatorDescriptor { 87 | pub id: NodeId, 88 | #[serde(flatten)] 89 | pub variant: OperatorVariants, 90 | } 91 | 92 | #[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] 93 | #[serde(untagged)] 94 | pub(crate) enum OperatorVariants { 95 | Remote(RemoteNodeDescriptor), 96 | Custom(CustomOperatorDescriptor), 97 | } 98 | 99 | #[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] 100 | pub(crate) struct CustomOperatorDescriptor { 101 | pub description: Option>, 102 | pub library: Url, 103 | pub inputs: Vec, 104 | pub outputs: Vec, 105 | #[serde(default)] 106 | pub configuration: Configuration, 107 | } 108 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/src/nodes/sink.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::sync::Arc; 16 | 17 | use serde::{Deserialize, Serialize}; 18 | use url::Url; 19 | use zenoh_flow_commons::{Configuration, NodeId, PortId}; 20 | 21 | use super::RemoteNodeDescriptor; 22 | use crate::nodes::builtin::zenoh::ZenohSinkDescriptor; 23 | 24 | /// A `SinkDescriptor` uniquely identifies a Sink. 25 | /// 26 | /// Zenoh-Flow supports several ways of declaring a Sink: 27 | /// - by importing a "remote" descriptor (e.g. located in another descriptor file), 28 | /// - with an inline declaration, 29 | /// - with an inline declaration of a Zenoh built-in, listing on which key expressions to publish. 30 | /// 31 | /// # ⚠️ Caveat: `NodeId` and `PortId` 32 | /// 33 | /// Zenoh-Flow identifiers cannot contain certain special characters as it could prevent creating valid Zenoh 34 | /// key-expressions. The list can be found [here](zenoh_flow_commons::deserialize_id). 35 | /// 36 | /// # Examples 37 | /// ## Remote descriptor 38 | /// 39 | /// ```yaml 40 | /// id: my-sink-0 41 | /// descriptor: file:///home/zenoh-flow/my-sink.yaml 42 | /// configuration: 43 | /// answer: 0 44 | /// ``` 45 | /// 46 | /// ## Inline declaration 47 | /// ### Custom sink 48 | /// 49 | /// ```yaml 50 | /// id: my-sink-0 51 | /// description: This is my Sink 52 | /// library: file:///home/zenoh-flow/libmy_sink.so 53 | /// inputs: 54 | /// - out-0 55 | /// - out-1 56 | /// configuration: 57 | /// answer: 42 58 | /// ``` 59 | /// 60 | /// ### Zenoh built-in Sink 61 | /// 62 | /// ```yaml 63 | /// id: my-sink-0 64 | /// description: My zenoh sink 65 | /// zenoh-publishers: 66 | /// key_0: key/expr/0 67 | /// key_1: key/expr/1 68 | /// ``` 69 | #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] 70 | pub(crate) struct SinkDescriptor { 71 | pub id: NodeId, 72 | #[serde(flatten)] 73 | pub variant: SinkVariants, 74 | } 75 | 76 | #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] 77 | #[serde(untagged)] 78 | pub(crate) enum SinkVariants { 79 | Zenoh(ZenohSinkDescriptor), 80 | Remote(RemoteNodeDescriptor), 81 | Custom(CustomSinkDescriptor), 82 | } 83 | 84 | #[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] 85 | pub(crate) struct CustomSinkDescriptor { 86 | pub description: Option>, 87 | pub library: Url, 88 | pub inputs: Vec, 89 | #[serde(default)] 90 | pub configuration: Configuration, 91 | } 92 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/src/nodes/source.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::sync::Arc; 16 | 17 | use serde::{Deserialize, Serialize}; 18 | use url::Url; 19 | use zenoh_flow_commons::{Configuration, NodeId, PortId}; 20 | 21 | use super::RemoteNodeDescriptor; 22 | use crate::nodes::builtin::zenoh::ZenohSourceDescriptor; 23 | 24 | /// A `SourceDescriptor` uniquely identifies a Source. 25 | /// 26 | /// Zenoh-Flow supports several ways of declaring a Source: 27 | /// - by importing a "remote" descriptor (e.g. located in another descriptor file), 28 | /// - with an inline declaration, 29 | /// - with an inline declaration of a Zenoh built-in. 30 | /// 31 | /// # ⚠️ Caveat: `NodeId` and `PortId` 32 | /// 33 | /// Zenoh-Flow identifiers cannot contain certain special characters as it could prevent creating valid Zenoh 34 | /// key-expressions. The list can be found [here](zenoh_flow_commons::deserialize_id). 35 | /// 36 | /// # Examples 37 | /// ## Remote descriptor 38 | /// 39 | /// ```yaml 40 | /// id: my-source-0 41 | /// descriptor: file:///home/zenoh-flow/my-source.yaml 42 | /// configuration: 43 | /// answer: 0 44 | /// ``` 45 | /// 46 | /// ## Inline declaration 47 | /// ### Custom source 48 | /// 49 | /// ```yaml 50 | /// id: my-source-0 51 | /// description: This is my Source 52 | /// library: file:///home/zenoh-flow/libmy_source.so 53 | /// outputs: 54 | /// - out-0 55 | /// - out-1 56 | /// configuration: 57 | /// answer: 42 58 | /// ``` 59 | /// 60 | /// ### Zenoh built-in Source 61 | /// 62 | /// ```yaml 63 | /// id: my-source-0 64 | /// description: My zenoh source 65 | /// zenoh-subscribers: 66 | /// ke-0: key/expr/0 67 | /// ke-1: key/expr/1 68 | /// ``` 69 | #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] 70 | pub(crate) struct SourceDescriptor { 71 | pub id: NodeId, 72 | #[serde(flatten)] 73 | pub variant: SourceVariants, 74 | } 75 | 76 | #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] 77 | #[serde(untagged)] 78 | pub(crate) enum SourceVariants { 79 | Zenoh(ZenohSourceDescriptor), 80 | Remote(RemoteNodeDescriptor), 81 | Custom(CustomSourceDescriptor), 82 | } 83 | 84 | #[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Eq)] 85 | pub(crate) struct CustomSourceDescriptor { 86 | pub description: Option>, 87 | pub library: Url, 88 | pub outputs: Vec, 89 | #[serde(default)] 90 | pub configuration: Configuration, 91 | } 92 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/src/uri.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use anyhow::{bail, Context}; 16 | use serde::Deserialize; 17 | use url::Url; 18 | use zenoh_flow_commons::{try_parse_from_file, Result, Vars}; 19 | 20 | pub(crate) fn try_load_descriptor(url: &Url, vars: Vars) -> Result<(N, Vars)> 21 | where 22 | N: for<'a> Deserialize<'a>, 23 | { 24 | match url.scheme() { 25 | "file" => try_parse_from_file::(url.path(), vars).context(format!( 26 | "Failed to load descriptor from file:\n{}", 27 | url.path() 28 | )), 29 | _ => bail!("Unsupported URL scheme < {} >", url.scheme(),), 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/composite-nested.yml: -------------------------------------------------------------------------------- 1 | description: composite-nested 2 | 3 | 4 | inputs: 5 | - id: composite-nested-in 6 | node: operator-1 7 | input: operator-1-in-1 8 | 9 | 10 | outputs: 11 | - id: composite-nested-out 12 | node: operator-2 13 | output: operator-2-out 14 | 15 | 16 | operators: 17 | - id: operator-1 18 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/operator-1.yml" 19 | 20 | - id: operator-2 21 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/operator-2.yml" 22 | 23 | 24 | links: 25 | - from: 26 | node: operator-1 27 | output: operator-1-out 28 | to: 29 | node: operator-2 30 | input: operator-2-in 31 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/composite-outer.yml: -------------------------------------------------------------------------------- 1 | description: composite-outer 2 | 3 | uri: file://composite-outer.so 4 | 5 | inputs: [composite-outer-in] 6 | 7 | outputs: [composite-outer-out] 8 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/data-flow-recursion-duplicate-composite.yml: -------------------------------------------------------------------------------- 1 | name: test-recursion-ok 2 | 3 | vars: 4 | SCHEME: 5 | BASE_DIR: 6 | 7 | sources: 8 | - id: source-composite 9 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/source-composite.yml" 10 | 11 | operators: 12 | - id: operator-composite-1 13 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/operator-composite.yml" 14 | 15 | - id: operator-composite-2 16 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/operator-composite.yml" 17 | 18 | sinks: 19 | - id: sink-composite 20 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/sink-composite.yml" 21 | 22 | links: 23 | - from: 24 | node: source-composite 25 | output: source-composite-out-1 26 | to: 27 | node: operator-composite-1 28 | input: operator-composite-in-1 29 | 30 | - from: 31 | node: source-composite 32 | output: source-composite-out-2 33 | to: 34 | node: operator-composite-1 35 | input: operator-composite-in-2 36 | 37 | - from: 38 | node: operator-composite-1 39 | output: operator-composite-out-1 40 | to: 41 | node: operator-composite-2 42 | input: operator-composite-in-1 43 | 44 | - from: 45 | node: operator-composite-1 46 | output: operator-composite-out-2 47 | to: 48 | node: operator-composite-2 49 | input: operator-composite-in-2 50 | 51 | - from: 52 | node: operator-composite-2 53 | output: operator-composite-out-1 54 | to: 55 | node: sink-composite 56 | input: sink-composite-in-1 57 | 58 | - from: 59 | node: operator-composite-2 60 | output: operator-composite-out-2 61 | to: 62 | node: sink-composite 63 | input: sink-composite-in-2 64 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/data-flow-recursion.yml: -------------------------------------------------------------------------------- 1 | name: test-recursion 2 | 3 | vars: 4 | BASE_DIR: ./src/tests 5 | 6 | sources: 7 | - id: source-1 8 | descriptor: "file://{{ BASE_DIR }}/source-1.yml" 9 | 10 | operators: 11 | - id: operator-1 12 | descriptor: "file://{{ BASE_DIR }}/operator-1.yml" 13 | 14 | - id: operator-infinite 15 | descriptor: "file://{{ BASE_DIR }}/operator-infinite.yml" 16 | 17 | sinks: 18 | - id: sink-1 19 | descriptor: "file://{{ BASE_DIR }}/sink-1.yml" 20 | 21 | links: 22 | - from: 23 | node: source-1 24 | output: source-1-out 25 | to: 26 | node: operator-1 27 | input: operator-1-in 28 | - from: 29 | node: operator-1 30 | output: operator-out-1 31 | to: 32 | node: operator-infinite 33 | input: infinite-input 34 | - from: 35 | node: operator-infinite 36 | output: infinite-output 37 | to: 38 | node: sink-1 39 | input: sink-1-in 40 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/data-flow.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | vars: 4 | SCHEME: file:// 5 | BASE_DIR: 6 | RUNTIME_1: 7 | RUNTIME_2: 8 | RUNTIME_COMPOSITE: 9 | 10 | configuration: 11 | foo: "global-outer" 12 | 13 | sources: 14 | - id: source-1 15 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/source.yml" 16 | 17 | - id: source-2 18 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/source.yml" 19 | 20 | - id: source-composite 21 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/source-composite.yml" 22 | 23 | 24 | operators: 25 | - id: operator-1 26 | description: "Outer description" 27 | descriptor: "{{ SCHEME }}{{BASE_DIR }}/operator.yml" 28 | 29 | - id: operator-2 30 | descriptor: "{{ SCHEME }}{{ BASE_DIR}}/operator.yml" 31 | 32 | - id: operator-composite 33 | descriptor: "{{ SCHEME }}{{BASE_DIR}}/operator-composite.yml" 34 | configuration: 35 | quux: "global-inner" 36 | 37 | 38 | sinks: 39 | - id: sink-1 40 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/sink.yml" 41 | 42 | - id: sink-2 43 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/sink.yml" 44 | 45 | - id: sink-composite 46 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/sink-composite.yml" 47 | 48 | 49 | links: 50 | - from: 51 | node: source-1 52 | output: source-out 53 | to: 54 | node: operator-1 55 | input: operator-in 56 | - from: 57 | node: operator-1 58 | output: operator-out 59 | to: 60 | node: sink-1 61 | input: sink-in 62 | 63 | - from: 64 | node: source-2 65 | output: source-out 66 | to: 67 | node: operator-2 68 | input: operator-in 69 | - from: 70 | node: operator-2 71 | output: operator-out 72 | to: 73 | node: sink-2 74 | input: sink-in 75 | 76 | - from: 77 | node: source-composite 78 | output: source-composite-out-1 79 | to: 80 | node: operator-composite 81 | input: operator-composite-in-1 82 | 83 | - from: 84 | node: source-composite 85 | output: source-composite-out-2 86 | to: 87 | node: operator-composite 88 | input: operator-composite-in-2 89 | 90 | - from: 91 | node: operator-composite 92 | output: operator-composite-out-1 93 | to: 94 | node: sink-composite 95 | input: sink-composite-in-1 96 | 97 | - from: 98 | node: operator-composite 99 | output: operator-composite-out-2 100 | to: 101 | node: sink-composite 102 | input: sink-composite-in-2 103 | 104 | 105 | mapping: 106 | "{{ RUNTIME_1 }}": 107 | - source-1 108 | "{{ RUNTIME_2 }}": 109 | - sink-2 110 | "{{ RUNTIME_COMPOSITE }}": 111 | - source-composite 112 | - operator-composite 113 | - sink-composite 114 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/operator-1.yml: -------------------------------------------------------------------------------- 1 | description: "operator-1" 2 | library: "file://operator-1.so" 3 | inputs: 4 | - "operator-1-in-1" 5 | - "operator-1-in-2" 6 | outputs: 7 | - "operator-1-out" 8 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/operator-2.yml: -------------------------------------------------------------------------------- 1 | description: operator-2 2 | 3 | library: file://operator-2.so 4 | 5 | inputs: [operator-2-in] 6 | 7 | outputs: [operator-2-out] 8 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/operator-composite.yml: -------------------------------------------------------------------------------- 1 | description: composite 2 | 3 | 4 | vars: 5 | SCHEME: "" 6 | BASE_DIR: "" 7 | 8 | 9 | configuration: 10 | foo: "composite-outer" 11 | bar: "composite-outer" 12 | 13 | 14 | inputs: 15 | - id: operator-composite-in-1 16 | node: sub-operator-1 17 | input: sub-operator-1-in-1 18 | 19 | - id: operator-composite-in-2 20 | node: sub-operator-1 21 | input: sub-operator-1-in-2 22 | 23 | 24 | outputs: 25 | - id: operator-composite-out-1 26 | node: sub-operator-2 27 | output: sub-operator-2-out-1 28 | 29 | - id: operator-composite-out-2 30 | node: sub-operator-2 31 | output: sub-operator-2-out-2 32 | 33 | 34 | operators: 35 | - id: sub-operator-1 36 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/sub-operator-1.yml" 37 | 38 | - id: sub-operator-composite 39 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/sub-operator-composite.yml" 40 | configuration: 41 | foo: "composite-inner" 42 | buzz: "composite-inner" 43 | 44 | - id: sub-operator-2 45 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/sub-operator-2.yml" 46 | 47 | 48 | links: 49 | - from: 50 | node: sub-operator-1 51 | output: sub-operator-1-out 52 | to: 53 | node: sub-operator-composite 54 | input: sub-operator-composite-in 55 | 56 | - from: 57 | node: sub-operator-composite 58 | output: sub-operator-composite-out 59 | to: 60 | node: sub-operator-2 61 | input: sub-operator-2-in 62 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/operator-infinite.yml: -------------------------------------------------------------------------------- 1 | description: operator-infinite 2 | 3 | vars: 4 | SCHEME: "" # set up by the data flow 5 | BASE_DIR: "" # 6 | 7 | inputs: 8 | - id: infinite-input 9 | node: operator-1 10 | input: operator-1-in 11 | 12 | 13 | outputs: 14 | - id: infinite-output 15 | node: operator-2 16 | output: operator-2-out 17 | 18 | 19 | operators: 20 | - id: operator-1 21 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/operator-1.yml" 22 | 23 | - id: operator-infinite 24 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/operator-infinite.yml" 25 | 26 | - id: operator-2 27 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/operator-2.yml" 28 | 29 | 30 | links: 31 | - from: 32 | node: operator-1 33 | output: operator-1-out 34 | to: 35 | node: operator-infinite 36 | input: infinite-input 37 | - from: 38 | node: operator-infinite 39 | output: infinite-output 40 | to: 41 | node: operator-2 42 | input: operator-2-out 43 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/operator.yml: -------------------------------------------------------------------------------- 1 | description: operator 2 | 3 | library: "{{ SCHEME }}operator.so" 4 | 5 | inputs: [operator-in] 6 | 7 | outputs: [operator-out] 8 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/sink-composite.yml: -------------------------------------------------------------------------------- 1 | description: composite-sink 2 | 3 | library: "{{ SCHEME }}sink-composite.so" 4 | 5 | configuration: 6 | bar: reverse 7 | 8 | inputs: 9 | - sink-composite-in-1 10 | - sink-composite-in-2 11 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/sink.yml: -------------------------------------------------------------------------------- 1 | description: sink 2 | 3 | library: "{{ SCHEME }}sink.so" 4 | 5 | inputs: [sink-in] 6 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/source-composite.yml: -------------------------------------------------------------------------------- 1 | description: composite-source 2 | 3 | vars: 4 | SCHEME: "" 5 | 6 | configuration: 7 | bar: re-reverse 8 | 9 | library: "{{ SCHEME }}source-composite.so" 10 | 11 | outputs: 12 | - source-composite-out-1 13 | - source-composite-out-2 14 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/source.yml: -------------------------------------------------------------------------------- 1 | description: source 2 | 3 | library: "{{ SCHEME }}source.so" 4 | 5 | outputs: [source-out] 6 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/sub-operator-1.yml: -------------------------------------------------------------------------------- 1 | description: leaf-operator-1 2 | 3 | library: "{{ SCHEME }}sub-operator-1.so" 4 | 5 | inputs: 6 | - sub-operator-1-in-1 7 | - sub-operator-1-in-2 8 | 9 | outputs: [sub-operator-1-out] 10 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/sub-operator-2.yml: -------------------------------------------------------------------------------- 1 | description: leaf-operator-2 2 | 3 | library: "{{ SCHEME }}sub-operator-2.so" 4 | 5 | inputs: [sub-operator-2-in] 6 | 7 | outputs: 8 | - sub-operator-2-out-1 9 | - sub-operator-2-out-2 10 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/sub-operator-composite.yml: -------------------------------------------------------------------------------- 1 | description: sub-operator-composite 2 | 3 | vars: 4 | SCHEME: "" # set up by the data flow 5 | BASE_DIR: "" # 6 | 7 | inputs: 8 | - id: sub-operator-composite-in 9 | node: sub-sub-operator-1 10 | input: sub-sub-operator-1-in 11 | 12 | 13 | outputs: 14 | - id: sub-operator-composite-out 15 | node: sub-sub-operator-2 16 | output: sub-sub-operator-2-out 17 | 18 | 19 | operators: 20 | - id: sub-sub-operator-1 21 | description: sub-leaf-operator-1 22 | library: "{{ SCHEME }}sub-sub-operator-1.so" 23 | configuration: 24 | foo: "leaf" 25 | bar: "leaf" 26 | baz: "leaf" 27 | quux: "leaf" 28 | inputs: [sub-sub-operator-1-in] 29 | outputs: [sub-sub-operator-1-out] 30 | 31 | - id: sub-sub-operator-2 32 | descriptor: "{{ SCHEME }}{{ BASE_DIR }}/sub-sub-operator-2.yml" 33 | 34 | 35 | links: 36 | - from: 37 | node: sub-sub-operator-1 38 | output: sub-sub-operator-1-out 39 | to: 40 | node: sub-sub-operator-2 41 | input: sub-sub-operator-2-in 42 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/sub-sub-operator-1.yml: -------------------------------------------------------------------------------- 1 | description: sub-leaf-operator-1 2 | 3 | library: "{{ SCHEME }}sub-sub-operator-1.so" 4 | 5 | configuration: 6 | foo: "leaf" 7 | bar: "leaf" 8 | baz: "leaf" 9 | quux: "leaf" 10 | 11 | inputs: [sub-sub-operator-1-in] 12 | 13 | outputs: [sub-sub-operator-1-out] 14 | -------------------------------------------------------------------------------- /zenoh-flow-descriptors/tests/descriptors/sub-sub-operator-2.yml: -------------------------------------------------------------------------------- 1 | description: sub-leaf-operator-2 2 | 3 | library: "{{ SCHEME }}sub-sub-operator-2.so" 4 | 5 | inputs: [sub-sub-operator-2-in] 6 | 7 | outputs: [sub-sub-operator-2-out] 8 | -------------------------------------------------------------------------------- /zenoh-flow-nodes/Cargo.toml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2021 ZettaScale Technology 3 | # 4 | # This program and the accompanying materials are made available under the 5 | # terms of the Eclipse Public License 2.0 which is available at 6 | # http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | # which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | # 11 | # Contributors: 12 | # ZettaScale Zenoh Team, 13 | # 14 | 15 | [package] 16 | authors = { workspace = true } 17 | categories = { workspace = true } 18 | description = "Internal crate for Zenoh-Flow." 19 | edition = { workspace = true } 20 | homepage = { workspace = true } 21 | license = { workspace = true } 22 | name = "zenoh-flow-nodes" 23 | repository = { workspace = true } 24 | version = { workspace = true } 25 | 26 | [dependencies] 27 | anyhow = { workspace = true } 28 | async-trait = { workspace = true } 29 | bincode = { version = "1.3" } 30 | flume = { workspace = true } 31 | futures = { workspace = true } 32 | serde = { workspace = true } 33 | tracing = { workspace = true } 34 | uhlc = { workspace = true } 35 | uuid = { workspace = true } 36 | zenoh-flow-commons = { workspace = true } 37 | zenoh-flow-derive = { path = "../zenoh-flow-derive" } 38 | 39 | [dev-dependencies] 40 | prost = "0.12" 41 | serde_json = { workspace = true } 42 | 43 | [build-dependencies] 44 | rustc_version = "0.4.0" 45 | -------------------------------------------------------------------------------- /zenoh-flow-nodes/build.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | fn main() { 16 | let version = rustc_version::version().unwrap(); 17 | println!("cargo:rustc-env=RUSTC_VERSION={version}"); 18 | } 19 | -------------------------------------------------------------------------------- /zenoh-flow-nodes/src/context.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::{path::PathBuf, sync::Arc}; 16 | 17 | use zenoh_flow_commons::{InstanceId, NodeId, RuntimeId}; 18 | 19 | /// The `Context` structure provides information about the data flow and the Zenoh-Flow runtime. 20 | /// 21 | /// In particular, it allows accessing: 22 | /// - the [name](Context::name()) of the data flow, 23 | /// - the [instance id](Context::instance_id()) of this instance of the data flow, 24 | /// - the [runtime id](Context::runtime_id()) of the Zenoh-Flow runtime managing the **node**. 25 | #[derive(Clone, Debug)] 26 | pub struct Context { 27 | pub(crate) node_id: NodeId, 28 | pub(crate) flow_name: Arc, 29 | pub(crate) instance_id: InstanceId, 30 | pub(crate) runtime_id: RuntimeId, 31 | pub(crate) library_path: Arc, 32 | } 33 | 34 | impl Context { 35 | /// Creates a new node `Context`. 36 | pub fn new( 37 | flow_name: Arc, 38 | instance_id: InstanceId, 39 | runtime_id: RuntimeId, 40 | library_path: Arc, 41 | node_id: NodeId, 42 | ) -> Self { 43 | Self { 44 | flow_name, 45 | instance_id, 46 | runtime_id, 47 | library_path, 48 | node_id, 49 | } 50 | } 51 | 52 | /// Returns the name of the data flow. 53 | /// 54 | /// Note all instances of the same data flow will share the same `name`. 55 | pub fn name(&self) -> &str { 56 | self.flow_name.as_ref() 57 | } 58 | 59 | /// Returns the unique identifier of this instance of the data flow. 60 | pub fn instance_id(&self) -> &InstanceId { 61 | &self.instance_id 62 | } 63 | 64 | /// Returns the unique identifier of the Zenoh-Flow runtime managing the **node**. 65 | /// 66 | /// Note that, for the same instance, different nodes might return different runtime identifier if they are running 67 | /// on different Zenoh-Flow runtimes. 68 | pub fn runtime_id(&self) -> &RuntimeId { 69 | &self.runtime_id 70 | } 71 | 72 | /// Returns the path of the library loaded by the Zenoh-Flow runtime. 73 | /// 74 | /// The path is local to the machine where the Zenoh-Flow runtime is running. 75 | pub fn library_path(&self) -> &PathBuf { 76 | &self.library_path 77 | } 78 | 79 | /// Returns the node unique identifier in the data flow. 80 | pub fn node_id(&self) -> &NodeId { 81 | &self.node_id 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /zenoh-flow-nodes/src/declaration.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::{pin::Pin, sync::Arc}; 16 | 17 | use futures::Future; 18 | use zenoh_flow_commons::{Configuration, Result}; 19 | 20 | use crate::prelude::{Context, Inputs, Node, Outputs}; 21 | 22 | /// (⚙️️ *internal)* Constant used to check if a node is compatible with the Zenoh-Flow runtime managing it. 23 | /// 24 | /// As nodes are dynamically loaded, this is to prevent (possibly cryptic) runtime error due to incompatible API. 25 | /// 26 | /// This constant is used by the procedural macros [export_operator](crate::prelude::export_operator), 27 | /// [export_source](crate::prelude::export_source) and [export_sink](crate::prelude::export_sink). A Zenoh-Flow runtime 28 | /// will compare its value of this constant to the value that all node it will dynamically load expose. 29 | pub const CORE_VERSION: &str = env!("CARGO_PKG_VERSION"); 30 | 31 | /// (⚙️ *internal)* Constant used to check if a node was compiled with the same version of the Rust compiler than the 32 | /// Zenoh-Flow runtime managing it. 33 | /// 34 | /// As Rust is not ABI stable, this is to prevent (possibly cryptic) runtime errors. 35 | /// 36 | /// This constant is used by the procedural macros [export_operator](crate::prelude::export_operator), 37 | /// [export_source](crate::prelude::export_source) and [export_sink](crate::prelude::export_sink). A Zenoh-Flow runtime 38 | /// will compare its value of this constant to the value that all node it will dynamically load expose. 39 | pub const RUSTC_VERSION: &str = env!("RUSTC_VERSION"); 40 | 41 | /// (⚙️ *internal)* Declaration expected in the library that will be loaded. 42 | /// 43 | /// This structure is automatically created by the procedural macros 44 | /// [export_operator](crate::prelude::export_operator), [export_source](crate::prelude::export_source) and 45 | /// [export_sink](crate::prelude::export_sink). 46 | pub struct NodeDeclaration { 47 | pub rustc_version: &'static str, 48 | pub core_version: &'static str, 49 | pub constructor: C, 50 | } 51 | 52 | /// (⚙️ *internal)* `SourceFn` is the only signature we accept to construct a [Source](crate::prelude::Source). 53 | /// 54 | /// This function is automatically created by the procedural macro [export_source](crate::prelude::export_source). 55 | pub type SourceFn = fn( 56 | Context, 57 | Configuration, 58 | Outputs, 59 | ) -> Pin>> + Send>>; 60 | 61 | /// (⚙️ *internal)* `OperatorFn` is the only signature we accept to construct an [Operator](crate::prelude::Operator). 62 | /// 63 | /// This function is automatically created by the procedural macro [export_operator](crate::prelude::export_operator). 64 | pub type OperatorFn = fn( 65 | Context, 66 | Configuration, 67 | Inputs, 68 | Outputs, 69 | ) -> Pin>> + Send>>; 70 | 71 | /// (⚙️ *internal)* `SinkFn` is the only signature we accept to construct a [Sink](crate::prelude::Sink). 72 | /// 73 | /// This function is automatically created by the procedural macro [export_sink](crate::prelude::export_sink). 74 | pub type SinkFn = fn( 75 | Context, 76 | Configuration, 77 | Inputs, 78 | ) -> Pin>> + Send>>; 79 | -------------------------------------------------------------------------------- /zenoh-flow-nodes/src/io/mod.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | mod inputs; 16 | mod outputs; 17 | 18 | pub use self::{ 19 | inputs::{Input, InputBuilder, InputRaw, Inputs}, 20 | outputs::{Output, OutputBuilder, OutputRaw, Outputs}, 21 | }; 22 | -------------------------------------------------------------------------------- /zenoh-flow-nodes/src/io/tests/input-tests.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::sync::Arc; 16 | 17 | use prost::Message as pMessage; 18 | use serde::{Deserialize, Serialize}; 19 | 20 | use super::{Input, InputRaw}; 21 | use crate::{ 22 | messages::{LinkMessage, Payload}, 23 | traits::SendSyncAny, 24 | }; 25 | 26 | /// Test that the Input behaves as expected for the provided data and deserialiser: 27 | /// 1. when a Payload::Bytes is received the deserialiser is called and produces the correct output, 28 | /// 2. when a Payload::Typed is received the data can correctly be downcast. 29 | /// 30 | /// ## Scenario tested 31 | /// 32 | /// A typed input is created. 33 | /// 34 | /// We send on the associated channel: 35 | /// 1. a Payload::Bytes (the `expected_serialised`), 36 | /// 2. a Payload::Typed (the `expected_data` upcast to `dyn SendSyncAny`). 37 | /// 38 | /// ## Traits bound on T 39 | /// 40 | /// The bounds on `T` are more restrictive than what they are in the code. In particular, `Clone` 41 | /// and `std::fmt::Debug` are not required. This has no impact on the test and mostly help us debug. 42 | fn test_typed_input( 43 | expected_data: T, 44 | expected_serialized: Vec, 45 | deserializer: impl Fn(&[u8]) -> anyhow::Result + Send + Sync + 'static, 46 | ) { 47 | let hlc = uhlc::HLC::default(); 48 | let (tx, rx) = flume::unbounded::(); 49 | 50 | let input_raw = InputRaw { 51 | port_id: "test-id".into(), 52 | receiver: rx, 53 | }; 54 | 55 | let input = Input { 56 | input_raw, 57 | deserializer: Arc::new(deserializer), 58 | }; 59 | 60 | let message = LinkMessage::new( 61 | Payload::Bytes(Arc::new(expected_serialized)), 62 | hlc.new_timestamp(), 63 | ); 64 | tx.send(message).expect("Failed to send message"); 65 | 66 | let (data, _) = input 67 | .try_recv() 68 | .expect("Message (serialised) was not sent") 69 | .expect("No message was received"); 70 | 71 | assert_eq!(expected_data, *data); 72 | 73 | let message = LinkMessage::new( 74 | Payload::Typed(( 75 | Arc::new(expected_data.clone()) as Arc, 76 | // The serialiser should never be called, hence the panic. 77 | Arc::new(|_buffer, _data| panic!("Unexpected call to serialise the data")), 78 | )), 79 | hlc.new_timestamp(), 80 | ); 81 | tx.send(message).expect("Failed to send message"); 82 | 83 | let (data, _) = input 84 | .try_recv() 85 | .expect("Message (dyn SendSyncAny) was not sent") 86 | .expect("No message was received"); 87 | assert_eq!(expected_data, *data); 88 | } 89 | 90 | //////////////////////////////////////////////////////////////////////////////////////////////////// 91 | /// SERDE JSON 92 | 93 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] 94 | struct TestData { 95 | pub field1: u8, 96 | pub field2: String, 97 | pub field3: f64, 98 | } 99 | 100 | #[test] 101 | fn test_serde_json() { 102 | let expected_data = TestData { 103 | field1: 1u8, 104 | field2: "test".to_string(), 105 | field3: 0.2f64, 106 | }; 107 | 108 | let expected_serialized = 109 | serde_json::ser::to_vec(&expected_data).expect("serde_json failed to serialise"); 110 | 111 | test_typed_input(expected_data, expected_serialized, |bytes| { 112 | serde_json::de::from_slice::(bytes).map_err(|e| anyhow::anyhow!(e)) 113 | }) 114 | } 115 | 116 | //////////////////////////////////////////////////////////////////////////////////////////////////// 117 | /// PROTOBUF PROST 118 | 119 | // This structure was generated using the `prost-build` crate. We copied & pasted it here such that 120 | // we do not have to include `prost-build` as a build dependency to Zenoh-Flow. Our only purpose is 121 | // to ensure that at least one implementation of ProtoBuf works, not to suggest to use Prost. 122 | #[allow(clippy::derive_partial_eq_without_eq)] 123 | #[derive(Clone, PartialEq, ::prost::Message)] 124 | pub struct TestProto { 125 | #[prost(int64, tag = "1")] 126 | pub field1: i64, 127 | #[prost(string, tag = "2")] 128 | pub field2: ::prost::alloc::string::String, 129 | #[prost(double, tag = "3")] 130 | pub field3: f64, 131 | } 132 | 133 | #[test] 134 | fn test_protobuf_prost() { 135 | let expected_data = TestProto { 136 | field1: 1i64, 137 | field2: "test".to_string(), 138 | field3: 0.2f64, 139 | }; 140 | 141 | // First test, send data serialised. 142 | let expected_serialized = expected_data.encode_to_vec(); 143 | 144 | test_typed_input(expected_data, expected_serialized, |bytes| { 145 | ::decode(bytes).map_err(|e| anyhow::anyhow!(e)) 146 | }) 147 | } 148 | -------------------------------------------------------------------------------- /zenoh-flow-nodes/src/io/tests/output-tests.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::{collections::HashMap, sync::Arc}; 16 | 17 | use prost::Message; 18 | use serde::{Deserialize, Serialize}; 19 | use zenoh_flow_commons::PortId; 20 | 21 | use super::Outputs; 22 | use crate::messages::{LinkMessage, Payload}; 23 | 24 | /// Test that the Output behaves as expected for the provided data and serialiser: 25 | /// 1. the `serialiser` is correctly type-erased yet still produces the correct output, 26 | /// 2. the `expected_data` is not eagerly serialised and can correctly be downcast. 27 | /// 28 | /// ## Scenario tested 29 | /// 30 | /// A bogus output is generated — see the call to `outputs.take`. We go through the `Outputs` 31 | /// structure such that the transformation on the serialiser is performed (i.e. the type is erased). 32 | /// 33 | /// The provided `expected_data` is sent on the output. 34 | /// 35 | /// A receiver channel ensures that: 36 | /// 1. it is a `Payload::Typed`, 37 | /// 2. we can still downcast it to `T`, 38 | /// 3. the result of the serialisation is correct. 39 | /// 40 | /// ## Traits on T 41 | /// 42 | /// The bounds on `T` are more restrictive than what they are in the code. In particular, `Clone` 43 | /// and `std::fmt::Debug` are not required. This has no impact on the test and mostly help us debug. 44 | fn test_typed_output( 45 | expected_data: T, 46 | expected_serialized: Vec, 47 | serializer: impl for<'b, 'a> Fn(&'b mut Vec, &'a T) -> anyhow::Result<()> 48 | + Send 49 | + Sync 50 | + 'static, 51 | ) { 52 | let hlc = uhlc::HLC::default(); 53 | let key: PortId = "test".into(); 54 | 55 | let (tx, rx) = flume::unbounded::(); 56 | 57 | let mut outputs = Outputs { 58 | hmap: HashMap::from([(key.clone(), vec![tx])]), 59 | hlc: Arc::new(hlc), 60 | }; 61 | 62 | let output = outputs 63 | .take(key.as_ref()) 64 | .expect("Wrong key provided") 65 | .typed(serializer); 66 | 67 | output 68 | .try_send(expected_data.clone(), None) 69 | .expect("Failed to send the message"); 70 | 71 | let message = rx.recv().expect("Received no message"); 72 | match message.payload { 73 | Payload::Bytes(_) => panic!("Unexpected bytes payload"), 74 | Payload::Typed((dyn_data, serializer)) => { 75 | let mut dyn_serialized = Vec::new(); 76 | (serializer)(&mut dyn_serialized, dyn_data.clone()).expect("Failed to serialise"); 77 | assert_eq!(expected_serialized, dyn_serialized); 78 | 79 | let data = (*dyn_data) 80 | .as_any() 81 | .downcast_ref::() 82 | .expect("Failed to downcast"); 83 | assert_eq!(expected_data, *data); 84 | } 85 | } 86 | } 87 | 88 | //////////////////////////////////////////////////////////////////////////////////////////////////// 89 | /// SERDE JSON 90 | 91 | #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] 92 | struct TestData { 93 | pub field1: u8, 94 | pub field2: String, 95 | pub field3: f64, 96 | } 97 | 98 | #[test] 99 | fn test_serde_json() { 100 | let expected_data = TestData { 101 | field1: 1u8, 102 | field2: "two".into(), 103 | field3: 0.3f64, 104 | }; 105 | 106 | let expected_serialized = 107 | serde_json::ser::to_vec(&expected_data).expect("serde_json failed to serialise"); 108 | 109 | let serializer = |buffer: &mut Vec, data: &TestData| { 110 | serde_json::ser::to_writer(buffer, data).map_err(|e| anyhow::anyhow!(e)) 111 | }; 112 | 113 | test_typed_output(expected_data, expected_serialized, serializer) 114 | } 115 | 116 | //////////////////////////////////////////////////////////////////////////////////////////////////// 117 | /// PROTOBUF PROST 118 | 119 | // This structure was generated using the `prost-build` crate. We copied & pasted it here such that 120 | // we do not have to include `prost-build` as a build dependency to Zenoh-Flow. Our only purpose is 121 | // to ensure that at least one implementation of ProtoBuf works, not to suggest to use Prost. 122 | #[allow(clippy::derive_partial_eq_without_eq)] 123 | #[derive(Clone, PartialEq, ::prost::Message)] 124 | pub struct TestProto { 125 | #[prost(int64, tag = "1")] 126 | pub field1: i64, 127 | #[prost(string, tag = "2")] 128 | pub field2: ::prost::alloc::string::String, 129 | #[prost(double, tag = "3")] 130 | pub field3: f64, 131 | } 132 | 133 | #[test] 134 | fn test_protobuf_prost() { 135 | let expected_data = TestProto { 136 | field1: 1i64, 137 | field2: "two".into(), 138 | field3: 0.3f64, 139 | }; 140 | 141 | let expected_serialized = expected_data.encode_to_vec(); 142 | 143 | let serializer = |buffer: &mut Vec, data: &TestProto| { 144 | data.encode(buffer).map_err(|e| anyhow::anyhow!(e)) 145 | }; 146 | 147 | test_typed_output(expected_data, expected_serialized, serializer) 148 | } 149 | -------------------------------------------------------------------------------- /zenoh-flow-nodes/src/io/tests/test_types.proto: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | syntax = "proto3"; 16 | 17 | package testtypes.test_types; 18 | 19 | message TestProto { 20 | int64 field1 = 1; 21 | string field2 = 2; 22 | double field3 = 3; 23 | } 24 | -------------------------------------------------------------------------------- /zenoh-flow-nodes/src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | //! This crate exposes the traits and structures necessary to create Zenoh-Flow nodes. 16 | //! 17 | //! Items not exposed in the `prelude` are meant for internal usage within the Zenoh-Flow project. 18 | //! 19 | //! # [prelude] 20 | //! 21 | //! Application developers wishing to create a data flow should include the [prelude] in their code-base as it regroups 22 | //! all the required structures and traits: 23 | //! 24 | //! ``` 25 | //! use zenoh_flow_nodes::prelude::*; 26 | //! ``` 27 | //! 28 | //! Next would be to implement, as different shared libraries, at least a [Source](crate::prelude::Source), a 29 | //! [Sink](crate::prelude::Sink) and possibly some [Operators](crate::prelude::Operator). See their respective 30 | //! documentation for examples. 31 | 32 | pub(crate) mod context; 33 | pub(crate) mod declaration; 34 | pub(crate) mod io; 35 | pub(crate) mod messages; 36 | pub(crate) mod traits; 37 | 38 | pub use self::{ 39 | declaration::{NodeDeclaration, OperatorFn, SinkFn, SourceFn, CORE_VERSION, RUSTC_VERSION}, 40 | io::{InputBuilder, OutputBuilder}, 41 | }; 42 | 43 | /// This module expose all the structures required to implement a Zenoh-Flow node. 44 | /// 45 | /// It also re-exposes items from the [anyhow], [zenoh_flow_commons] and [zenoh_flow_derive] crates. 46 | pub mod prelude { 47 | pub use anyhow::{anyhow, bail}; 48 | pub use uhlc::Timestamp; 49 | pub use zenoh_flow_commons::{Configuration, InstanceId, NodeId, Result, RuntimeId}; 50 | pub use zenoh_flow_derive::{export_operator, export_sink, export_source}; 51 | 52 | pub use crate::{ 53 | context::Context, 54 | io::{Input, InputRaw, Inputs, Output, OutputRaw, Outputs}, 55 | messages::{Data, LinkMessage, Payload}, 56 | traits::{Node, Operator, SendSyncAny, Sink, Source}, 57 | }; 58 | } 59 | -------------------------------------------------------------------------------- /zenoh-flow-records/Cargo.toml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2021 ZettaScale Technology 3 | # 4 | # This program and the accompanying materials are made available under the 5 | # terms of the Eclipse Public License 2.0 which is available at 6 | # http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | # which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | # 11 | # Contributors: 12 | # ZettaScale Zenoh Team, 13 | # 14 | 15 | [package] 16 | authors = { workspace = true } 17 | categories = { workspace = true } 18 | description = "Internal crate for Zenoh-Flow." 19 | edition = { workspace = true } 20 | homepage = { workspace = true } 21 | license = { workspace = true } 22 | name = "zenoh-flow-records" 23 | repository = { workspace = true } 24 | version = { workspace = true } 25 | 26 | [dependencies] 27 | anyhow = { workspace = true } 28 | serde = { workspace = true } 29 | uuid = { workspace = true } 30 | zenoh-flow-commons = { workspace = true } 31 | zenoh-flow-descriptors = { workspace = true } 32 | zenoh-keyexpr = { workspace = true } 33 | 34 | [features] 35 | default = [] 36 | shared-memory = [] 37 | 38 | [dev-dependencies] 39 | serde_json = { workspace = true } 40 | serde_yaml = { workspace = true } 41 | -------------------------------------------------------------------------------- /zenoh-flow-records/src/connectors.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::fmt::Display; 16 | 17 | use serde::{Deserialize, Serialize}; 18 | use zenoh_flow_commons::NodeId; 19 | use zenoh_keyexpr::OwnedKeyExpr; 20 | 21 | /// A `SenderRecord` describes the sending end of a "Zenoh connection" between Zenoh-Flow runtimes. 22 | /// 23 | /// Effectively, a `Sender` performs `put` operations on Zenoh. The main difference with an out-of-the-box `put` is 24 | /// that Zenoh-Flow manages when they are done and on which resource. 25 | /// 26 | /// Specifically, Zenoh-Flow ensures that each resource stays unique. This allows deploying the same data flow multiple 27 | /// times on the same infrastructure and keeping them isolated. 28 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 29 | pub struct SenderRecord { 30 | pub(crate) id: NodeId, 31 | pub(crate) resource: OwnedKeyExpr, 32 | } 33 | 34 | impl Display for SenderRecord { 35 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 36 | write!(f, "Sender < {} > publishing on: {}", self.id, self.resource) 37 | } 38 | } 39 | 40 | impl SenderRecord { 41 | pub fn id(&self) -> NodeId { 42 | self.id.clone() 43 | } 44 | 45 | pub fn resource(&self) -> &OwnedKeyExpr { 46 | &self.resource 47 | } 48 | } 49 | 50 | /// A `ReceiverRecord` describes the receiving end of a "Zenoh connection" between Zenoh-Flow runtimes. 51 | /// 52 | /// Effectively, a `Receiver` encapsulates a Zenoh subscriber. The main difference with out-of-the-box subscriber is 53 | /// that Zenoh-Flow manages how it is pulled and the resource it declares. 54 | /// 55 | /// Specifically, Zenoh-Flow ensures that each resource stays unique. This allows deploying the same data flow multiple 56 | /// times on the same infrastructure and keeping them isolated. 57 | #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)] 58 | pub struct ReceiverRecord { 59 | pub(crate) id: NodeId, 60 | pub(crate) resource: OwnedKeyExpr, 61 | } 62 | 63 | impl Display for ReceiverRecord { 64 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 65 | write!( 66 | f, 67 | "Receiver < {} > subscribed on: {}", 68 | self.id, self.resource 69 | ) 70 | } 71 | } 72 | 73 | impl ReceiverRecord { 74 | pub fn id(&self) -> NodeId { 75 | self.id.clone() 76 | } 77 | 78 | pub fn resource(&self) -> &OwnedKeyExpr { 79 | &self.resource 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /zenoh-flow-records/src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | //! This crate exposes `*Record` structures. A *record* in Zenoh-Flow is a description of a data flow (or part of it) 16 | //! that is tied to a specific infrastructure and deployment. 17 | //! 18 | //! In particular, a [DataFlowRecord] represents a single deployment of a 19 | //! [FlattenedDataFlowDescriptor](zenoh_flow_descriptors::FlattenedDataFlowDescriptor) on an infrastructure: all the 20 | //! nodes have been assigned to a Zenoh-Flow runtime. This is why to each [DataFlowRecord] is associated a unique 21 | //! [InstanceId](zenoh_flow_commons::InstanceId) which uniquely identifies it. 22 | //! 23 | //! # ⚠️ Internal usage 24 | //! 25 | //! This crate is (mostly) intended for internal usage within the 26 | //! [Zenoh-Flow](https://github.com/eclipse-zenoh/zenoh-flow) project. 27 | 28 | mod connectors; 29 | mod dataflow; 30 | 31 | pub use self::{ 32 | connectors::{ReceiverRecord, SenderRecord}, 33 | dataflow::DataFlowRecord, 34 | }; 35 | -------------------------------------------------------------------------------- /zenoh-flow-runtime/Cargo.toml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2021 ZettaScale Technology 3 | # 4 | # This program and the accompanying materials are made available under the 5 | # terms of the Eclipse Public License 2.0 which is available at 6 | # http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | # which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | # 11 | # Contributors: 12 | # ZettaScale Zenoh Team, 13 | # 14 | 15 | [package] 16 | authors = { workspace = true } 17 | categories = { workspace = true } 18 | description = "Internal crate for Zenoh-Flow." 19 | edition = { workspace = true } 20 | homepage = { workspace = true } 21 | license = { workspace = true } 22 | name = "zenoh-flow-runtime" 23 | repository = { workspace = true } 24 | version = { workspace = true } 25 | 26 | 27 | [dependencies] 28 | anyhow = { workspace = true } 29 | async-std = { workspace = true } 30 | async-trait = { workspace = true } 31 | bincode = { version = "1.3" } 32 | flume = { workspace = true } 33 | futures = { workspace = true } 34 | libloading = "0.8" 35 | serde = { workspace = true } 36 | thiserror = "1" 37 | tracing = { workspace = true } 38 | uhlc = { workspace = true } 39 | url = { workspace = true } 40 | uuid = { workspace = true } 41 | zenoh = { workspace = true, optional = true } 42 | zenoh-flow-commons = { workspace = true } 43 | zenoh-flow-descriptors = { workspace = true } 44 | zenoh-flow-nodes = { workspace = true } 45 | zenoh-flow-records = { workspace = true } 46 | 47 | [features] 48 | default = ["zenoh"] 49 | zenoh = ["dep:zenoh"] 50 | shared-memory = ["zenoh"] 51 | test-utils = [] 52 | 53 | [dev-dependencies] 54 | serde_yaml = { workspace = true } 55 | zenoh-flow-runtime = { path = ".", features = ["test-utils"] } 56 | -------------------------------------------------------------------------------- /zenoh-flow-runtime/src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | //! This crate exposes the structures driving the execution of a data flow: the [Runtime] and the [DataFlowInstance]. 16 | //! 17 | //! If the feature `zenoh` is enabled (it is by default), this crate additionally re-exports the structures from 18 | //! [Zenoh](zenoh) that allow opening a [Session](zenoh::Session) *asynchronously*. 19 | //! 20 | //! Users interested in exposing a Zenoh-Flow runtime should find everything in the [Runtime] and [RuntimeBuilder]. 21 | //! 22 | //! Users interested in fetching the state of a data flow instance should look into the [DataFlowInstance], 23 | //! [InstanceState] and [InstanceStatus] structures. These structures are leveraged by the `zfctl` command line tool. 24 | 25 | mod instance; 26 | pub use self::instance::{DataFlowInstance, InstanceState, InstanceStatus}; 27 | 28 | mod loader; 29 | pub use self::loader::{Extension, Extensions}; 30 | 31 | #[cfg(feature = "shared-memory")] 32 | mod shared_memory; 33 | 34 | mod runners; 35 | 36 | mod runtime; 37 | pub use runtime::{DataFlowErr, Runtime, RuntimeBuilder}; 38 | 39 | /// A re-export of the Zenoh structures needed to open a [Session](zenoh::Session) asynchronously. 40 | #[cfg(feature = "zenoh")] 41 | pub mod zenoh { 42 | pub use zenoh::{open, Config, Session}; 43 | } 44 | -------------------------------------------------------------------------------- /zenoh-flow-runtime/src/runners/builtin/mod.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | #[cfg(feature = "zenoh")] 16 | pub(crate) mod zenoh; 17 | -------------------------------------------------------------------------------- /zenoh-flow-runtime/src/runners/builtin/zenoh/mod.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | pub(crate) mod sink; 16 | pub(crate) mod source; 17 | -------------------------------------------------------------------------------- /zenoh-flow-runtime/src/runners/builtin/zenoh/source.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::{collections::HashMap, pin::Pin, sync::Arc}; 16 | 17 | use anyhow::{anyhow, Context as ac}; 18 | use async_std::sync::Mutex; 19 | use futures::{future::select_all, Future}; 20 | use zenoh::{ 21 | handlers::FifoChannelHandler, key_expr::OwnedKeyExpr, pubsub::Subscriber, sample::Sample, 22 | Session, 23 | }; 24 | use zenoh_flow_commons::{NodeId, PortId, Result}; 25 | use zenoh_flow_nodes::prelude::{Node, OutputRaw, Outputs}; 26 | 27 | /// Internal type of pending futures for the ZenohSource 28 | pub(crate) type ZSubFut = Pin)> + Send + Sync>>; 29 | 30 | fn wait_zenoh_sub(id: PortId, sub: &Subscriber>) -> ZSubFut { 31 | let sub = sub.handler().clone(); 32 | Box::pin(async move { (id, sub.recv_async().await.map_err(|e| anyhow!("{e:?}"))) }) 33 | } 34 | 35 | pub(crate) struct ZenohSource { 36 | id: NodeId, 37 | session: Session, 38 | outputs: HashMap, 39 | key_exprs: HashMap, 40 | subscribers: Mutex>>>, 41 | futs: Arc>>, 42 | } 43 | 44 | impl ZenohSource { 45 | pub(crate) async fn try_new( 46 | id: &NodeId, 47 | session: Session, 48 | key_exprs: &HashMap, 49 | mut outputs: Outputs, 50 | ) -> Result { 51 | let mut raw_outputs = HashMap::with_capacity(key_exprs.len()); 52 | 53 | for (port, key_expr) in key_exprs.iter() { 54 | raw_outputs.insert( 55 | port.clone(), 56 | outputs 57 | .take(port.as_ref()) 58 | .with_context(|| { 59 | format!( 60 | "{id}: fatal internal error: no channel was created for key \ 61 | expression < {} >", 62 | key_expr 63 | ) 64 | })? 65 | .raw(), 66 | ); 67 | } 68 | 69 | let zenoh_source = Self { 70 | id: id.clone(), 71 | session, 72 | outputs: raw_outputs, 73 | key_exprs: key_exprs.clone(), 74 | subscribers: Mutex::new(HashMap::with_capacity(key_exprs.len())), 75 | futs: Arc::new(Mutex::new(Vec::with_capacity(key_exprs.len()))), 76 | }; 77 | 78 | Ok(zenoh_source) 79 | } 80 | } 81 | 82 | #[async_trait::async_trait] 83 | impl Node for ZenohSource { 84 | // When we resume an aborted Zenoh Source, we have to re-subscribe to the key expressions and, possibly, recreate 85 | // the futures awaiting publications. 86 | async fn on_resume(&self) -> Result<()> { 87 | let mut futures = self.futs.lock().await; 88 | let futures_were_empty = futures.is_empty(); 89 | 90 | let mut subscribers = self.subscribers.lock().await; 91 | for (port, key_expr) in self.key_exprs.iter() { 92 | let subscriber = self 93 | .session 94 | .declare_subscriber(key_expr) 95 | .await 96 | .map_err(|e| { 97 | anyhow!( 98 | r#"fatal internal error: failed to declare a subscriber on < {} > 99 | Caused by: 100 | {:?}"#, 101 | key_expr, 102 | e 103 | ) 104 | })?; 105 | 106 | // NOTE: Even though it is more likely that the node was aborted while the `futures` were swapped (and thus 107 | // empty), there is still a possibility that the `abort` happened outside of this scenario. 108 | // In this rare case, we should not push new futures. 109 | if futures_were_empty { 110 | futures.push(wait_zenoh_sub(port.clone(), &subscriber)); 111 | } 112 | 113 | subscribers.insert(port.clone(), subscriber); 114 | } 115 | 116 | Ok(()) 117 | } 118 | 119 | // When we abort a Zenoh Source we drop the subscribers to remove them from the Zenoh network. 120 | // 121 | // This action is motivated by two factors: 122 | // 1. It prevents receiving publications that happened while the Zenoh Source was not active. 123 | // 2. It prevents impacting the other subscribers / publishers on the same resource. 124 | async fn on_abort(&self) { 125 | let mut subscribers = self.subscribers.lock().await; 126 | subscribers.clear(); 127 | } 128 | 129 | // The iteration of a Zenoh Source polls, concurrently, the subscribers and forwards the first publication received 130 | // on the associated port. 131 | async fn iteration(&self) -> Result<()> { 132 | let mut subscribers_futures = self.futs.lock().await; 133 | let subs = std::mem::take(&mut (*subscribers_futures)); 134 | 135 | let ((id, result), _index, mut remaining) = select_all(subs).await; 136 | 137 | match result { 138 | Ok(sample) => { 139 | let payload = sample.payload().to_bytes(); 140 | let ke = sample.key_expr(); 141 | tracing::trace!("received subscription on {ke}"); 142 | let output = self.outputs.get(&id).ok_or(anyhow!( 143 | "{}: internal error, unable to find output < {} >", 144 | self.id, 145 | id 146 | ))?; 147 | output.send(&*payload, None).await?; 148 | } 149 | Err(e) => tracing::error!("subscriber for output {id} failed with: {e:?}"), 150 | } 151 | 152 | // Add back the subscriber that got polled 153 | let subscribers = self.subscribers.lock().await; 154 | let sub = subscribers 155 | .get(&id) 156 | .ok_or_else(|| anyhow!("[{}] Cannot find port < {} >", self.id, id))?; 157 | 158 | remaining.push(wait_zenoh_sub(id.clone(), sub)); 159 | 160 | // Setting back a complete list for the next iteration 161 | *subscribers_futures = remaining; 162 | 163 | Ok(()) 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /zenoh-flow-runtime/src/runners/mod.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | pub(crate) mod builtin; 16 | 17 | #[cfg(feature = "zenoh")] 18 | pub(crate) mod connectors; 19 | 20 | use std::{sync::Arc, time::Instant}; 21 | 22 | use anyhow::Context; 23 | use async_std::task::JoinHandle; 24 | use libloading::Library; 25 | use tracing::Instrument; 26 | use zenoh_flow_commons::{NodeId, Result}; 27 | use zenoh_flow_nodes::prelude::Node; 28 | 29 | /// A `Runner` takes care of running a `Node`. 30 | /// 31 | /// Each Runner runs in a separate task. 32 | pub(crate) struct Runner { 33 | pub(crate) id: NodeId, 34 | node: Arc, 35 | handle: Option>, 36 | // The `_library` field is used solely for its `Arc`. We need to keep track of how many `Runners` are using the 37 | // `Library` such that once that number reaches 0, we drop the library. 38 | // 39 | // The `Option` exists because only user-implemented nodes have a `Library`. For example, built-in Zenoh Source / 40 | // Sink and the connectors have no `Library`. 41 | _library: Option>, 42 | } 43 | 44 | impl Runner { 45 | /// Creates a runner, spawning a [Task](async_std::task::Task) that will poll, in a loop, the `iteration` of the 46 | /// underlying [Node]. 47 | // 48 | // The control logic is relatively simple: 49 | // 1. When `start` is called, create a task that will poll indefinitely, in a loop, the `iteration` method of the 50 | // Node. 51 | // 2. Keep a reference to that task through its JoinHandle so we can call `cancel` to stop it. 52 | pub(crate) fn new(id: NodeId, node: Arc, library: Option>) -> Self { 53 | Self { 54 | id, 55 | node, 56 | handle: None, 57 | _library: library, 58 | } 59 | } 60 | 61 | /// Returns `true` if the Runner is running, i.e. if the `iteration` of the [Node] it wraps is being polled in a 62 | /// loop. 63 | pub(crate) fn is_running(&self) -> bool { 64 | self.handle.is_some() 65 | } 66 | 67 | /// Starts the runner: run the `iteration` method of the [Node] it wraps in a loop. 68 | /// 69 | /// This method is also idempotent: if the runner is already running, nothing will happen. 70 | pub(crate) async fn start(&mut self) -> Result<()> { 71 | if self.is_running() { 72 | return Ok(()); 73 | } 74 | 75 | self.node 76 | .on_resume() 77 | .await 78 | .with_context(|| format!("{}: call to `on_resume` failed", self.id))?; 79 | 80 | let id = self.id.clone(); 81 | let node = self.node.clone(); 82 | let iteration_span = tracing::trace_span!("iteration", node = %id); 83 | 84 | self.handle = Some(async_std::task::spawn( 85 | async move { 86 | let mut instant; 87 | let mut iteration; 88 | loop { 89 | instant = Instant::now(); 90 | iteration = node.iteration().await; 91 | tracing::trace!("duration: {}µs", instant.elapsed().as_micros()); 92 | if let Err(e) = iteration { 93 | tracing::error!("{:?}", e); 94 | } 95 | 96 | async_std::task::yield_now().await; 97 | } 98 | } 99 | .instrument(iteration_span), 100 | )); 101 | 102 | Ok(()) 103 | } 104 | 105 | /// Aborts the runner: stop the execution of its `iteration` method at its nearest `await` point. 106 | /// 107 | /// This method is idempotent: if the runner is not running, nothing will happen. 108 | /// 109 | /// # Warning 110 | /// 111 | /// This method will *drop* the future driving the execution of the current `iteration`. This will effectively 112 | /// cancel it at its nearest next `.await` point in its code. 113 | /// 114 | /// What this also means is that there is a possibility to leave the node in an **inconsistent state**. For 115 | /// instance, modified values that are not saved between several `.await` points would be lost if the node is 116 | /// aborted. 117 | pub(crate) async fn abort(&mut self) { 118 | if let Some(handle) = self.handle.take() { 119 | handle.cancel().await; 120 | self.node.on_abort().await; 121 | } 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /zenoh-flow-runtime/src/shared_memory.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use std::sync::Arc; 16 | 17 | use anyhow::anyhow; 18 | use zenoh::{ 19 | prelude::r#async::*, 20 | shm::{SharedMemoryBuf, SharedMemoryManager}, 21 | }; 22 | use zenoh_flow_commons::{NodeId, Result, SharedMemoryConfiguration}; 23 | use zenoh_flow_nodes::prelude::{DataMessage, LinkMessage}; 24 | 25 | pub(crate) struct SharedMemory { 26 | session: Arc, 27 | manager: SharedMemoryManager, 28 | configuration: SharedMemoryConfiguration, 29 | } 30 | 31 | impl SharedMemory { 32 | pub(crate) fn new( 33 | id: &NodeId, 34 | session: Arc, 35 | shm_configuration: &SharedMemoryConfiguration, 36 | ) -> Self { 37 | Self { 38 | session, 39 | manager: SharedMemoryManager::make( 40 | format!("{}>shared-memory-manager", id), 41 | shm_configuration.size, 42 | ) 43 | .unwrap(), 44 | configuration: *shm_configuration, 45 | } 46 | } 47 | 48 | /// This method tries to send the [LinkMessage] via Zenoh's shared memory. 49 | /// 50 | /// # Errors 51 | /// 52 | /// This method can fail for multiple reasons: 53 | /// 1. Zenoh's [SharedMemoryManager] did not manage to allocate a buffer. 54 | /// 2. The serialization of the message into the buffer failed. 55 | /// 3. Zenoh failed to send the message via shared memory. 56 | pub(crate) async fn try_send_message( 57 | &mut self, 58 | key_expr: &str, 59 | message: LinkMessage, 60 | message_buffer: &mut Vec, 61 | payload_buffer: &mut Vec, 62 | ) -> Result<()> { 63 | message.serialize_bincode_into(message_buffer, payload_buffer)?; 64 | self.try_put_buffer(key_expr, message_buffer).await 65 | } 66 | 67 | pub(crate) async fn try_send_payload( 68 | &mut self, 69 | key_expr: &str, 70 | data: DataMessage, 71 | payload_buffer: &mut Vec, 72 | ) -> Result<()> { 73 | data.try_as_bytes_into(payload_buffer)?; 74 | self.try_put_buffer(key_expr, payload_buffer).await 75 | } 76 | 77 | async fn try_put_buffer(&mut self, key_expr: &str, buffer: &mut Vec) -> Result<()> { 78 | let mut shm_buffer = self.try_allocate_buffer(buffer.len()).await?; 79 | let slice = unsafe { shm_buffer.as_mut_slice() }; 80 | slice.clone_from_slice(buffer.as_mut_slice()); 81 | 82 | self.session 83 | .put(key_expr, shm_buffer) 84 | .congestion_control(CongestionControl::Block) 85 | .res() 86 | .await 87 | .map_err(|e| { 88 | anyhow!( 89 | r#"shared memory: Put on < {:?} > failed 90 | 91 | Caused by: 92 | 93 | {:?}"#, 94 | &key_expr, 95 | e 96 | ) 97 | }) 98 | } 99 | 100 | /// This methods attempts, twice, to allocate memory leveraging Zenoh's [SharedMemoryManager]. 101 | /// 102 | /// # Errors 103 | /// 104 | /// If the first call fails, we wait for `backoff` nanoseconds (as configured) and then perform (i) a garbage 105 | /// collection followed by (ii) a defragmentation. Once these two operations have finished, we try once more to 106 | /// allocate memory. 107 | /// 108 | /// If it fails again, we return the error. 109 | pub(crate) async fn try_allocate_buffer(&mut self, size: usize) -> Result { 110 | if let Ok(buffer) = self.try_alloc(size) { 111 | return Ok(buffer); 112 | } 113 | tracing::trace!( 114 | "shared memory: backing off for {} nanoseconds", 115 | self.configuration.backoff 116 | ); 117 | async_std::task::sleep(std::time::Duration::from_nanos(self.configuration.backoff)).await; 118 | 119 | tracing::trace!( 120 | "shared memory: garbage collect recovered {} bytes", 121 | self.manager.garbage_collect() 122 | ); 123 | tracing::trace!( 124 | "shared memory: defragmented {} bytes", 125 | self.manager.defragment() 126 | ); 127 | 128 | self.try_alloc(size) 129 | } 130 | 131 | // Utility method that logs the error if the shared memory manager failed to allocate and converts the error 132 | // into one that is "compatible" with `anyhow`. 133 | fn try_alloc(&mut self, size: usize) -> Result { 134 | let buffer = self.manager.alloc(size); 135 | 136 | buffer.map_err(|e| { 137 | tracing::trace!( 138 | r#" 139 | shared memory: allocation of {} bytes failed 140 | 141 | Caused by: 142 | {:?} 143 | "#, 144 | size, 145 | e 146 | ); 147 | anyhow!("{:?}", e) 148 | }) 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /zenoh-plugin-zenoh-flow/Cargo.toml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2021 ZettaScale Technology 3 | # 4 | # This program and the accompanying materials are made available under the 5 | # terms of the Eclipse Public License 2.0 which is available at 6 | # http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | # which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | # 11 | # Contributors: 12 | # ZettaScale Zenoh Team, 13 | # 14 | 15 | [package] 16 | authors = { workspace = true } 17 | categories = { workspace = true } 18 | description = "A Zenoh plugin embedding a Zenoh-Flow daemon." 19 | edition = { workspace = true } 20 | homepage = { workspace = true } 21 | license = { workspace = true } 22 | name = "zenoh-plugin-zenoh-flow" 23 | repository = { workspace = true } 24 | version = { workspace = true } 25 | 26 | [features] 27 | dynamic_plugin = [] 28 | default = ["dynamic_plugin"] 29 | 30 | [lib] 31 | crate-type = ["cdylib"] 32 | name = "zenoh_plugin_zenoh_flow" 33 | 34 | [dependencies] 35 | async-std = { workspace = true } 36 | flume = { workspace = true } 37 | git-version = { workspace = true } 38 | serde_json = { workspace = true } 39 | tracing = { workspace = true } 40 | tracing-subscriber = { workspace = true } 41 | uhlc = { workspace = true } 42 | zenoh = { workspace = true, features = ["unstable"] } 43 | zenoh-flow-daemon = { workspace = true } 44 | zenoh-plugin-trait = { workspace = true } 45 | -------------------------------------------------------------------------------- /zenoh-plugin-zenoh-flow/src/lib.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use flume::Sender; 16 | use zenoh::internal::{ 17 | plugins::{RunningPluginTrait, ZenohPlugin}, 18 | zerror, 19 | }; 20 | use zenoh_flow_daemon::daemon::*; 21 | use zenoh_plugin_trait::{plugin_long_version, plugin_version, Plugin, PluginControl}; 22 | 23 | pub struct ZenohFlowPlugin(Sender<()>); 24 | 25 | pub const GIT_VERSION: &str = git_version::git_version!(prefix = "v", cargo_prefix = "v"); 26 | 27 | #[cfg(feature = "dynamic_plugin")] 28 | zenoh_plugin_trait::declare_plugin!(ZenohFlowPlugin); 29 | 30 | impl ZenohPlugin for ZenohFlowPlugin {} 31 | impl Plugin for ZenohFlowPlugin { 32 | type StartArgs = zenoh::internal::runtime::Runtime; 33 | type Instance = zenoh::internal::plugins::RunningPlugin; 34 | 35 | const DEFAULT_NAME: &'static str = "zenoh-flow"; 36 | const PLUGIN_VERSION: &'static str = plugin_version!(); 37 | const PLUGIN_LONG_VERSION: &'static str = plugin_long_version!(); 38 | 39 | fn start( 40 | name: &str, 41 | zenoh_runtime: &Self::StartArgs, 42 | ) -> zenoh::Result { 43 | let _ = tracing_subscriber::fmt::try_init(); 44 | 45 | let zenoh_config = zenoh_runtime.config().lock(); 46 | let zenoh_flow_config = zenoh_config 47 | .plugin(name) 48 | .cloned() 49 | .ok_or_else(|| zerror!("Plugin '{}': missing configuration", name))?; 50 | 51 | let (abort_tx, abort_rx) = flume::bounded(1); 52 | let zenoh_runtime = zenoh_runtime.clone(); 53 | async_std::task::spawn(async move { 54 | let zenoh_session = zenoh::session::init(zenoh_runtime).await.unwrap(); 55 | 56 | let zenoh_flow_config: ZenohFlowConfiguration = 57 | match serde_json::from_value(zenoh_flow_config) { 58 | Ok(config) => config, 59 | Err(e) => { 60 | tracing::error!("Failed to parse configuration: {e:?}"); 61 | return; 62 | } 63 | }; 64 | 65 | let daemon = match Daemon::spawn_from_config(zenoh_session, zenoh_flow_config).await { 66 | Ok(daemon) => daemon, 67 | Err(e) => { 68 | tracing::error!("Failed to spawn the Daemon from a configuration: {e:?}"); 69 | return; 70 | } 71 | }; 72 | 73 | if let Err(e) = abort_rx.recv_async().await { 74 | tracing::error!("Abort channel failed with: {e:?}"); 75 | } 76 | 77 | daemon.stop().await; 78 | }); 79 | 80 | Ok(Box::new(ZenohFlowPlugin(abort_tx))) 81 | } 82 | } 83 | 84 | impl PluginControl for ZenohFlowPlugin {} 85 | impl RunningPluginTrait for ZenohFlowPlugin { 86 | fn adminspace_getter( 87 | &self, 88 | selector: &zenoh::key_expr::KeyExpr, 89 | plugin_status_key: &str, 90 | ) -> zenoh::Result> { 91 | let mut responses = Vec::new(); 92 | let version_key = [plugin_status_key, "/__version__"].concat(); 93 | let ke = zenoh::key_expr::KeyExpr::new(&version_key)?; 94 | 95 | if selector.intersects(&ke) { 96 | responses.push(zenoh::internal::plugins::Response::new( 97 | version_key, 98 | GIT_VERSION.into(), 99 | )); 100 | } 101 | Ok(responses) 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /zfctl/Cargo.toml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright © 2021 ZettaScale Technology 3 | # 4 | # This program and the accompanying materials are made available under the 5 | # terms of the Eclipse Public License 2.0 which is available at 6 | # http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | # which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | # 9 | # SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | # 11 | # Contributors: 12 | # ZettaScale Zenoh Team, 13 | # 14 | 15 | [package] 16 | authors.workspace = true 17 | categories.workspace = true 18 | description.workspace = true 19 | edition.workspace = true 20 | homepage.workspace = true 21 | license.workspace = true 22 | name = "zfctl" 23 | readme.workspace = true 24 | repository.workspace = true 25 | version.workspace = true 26 | 27 | [dependencies] 28 | anyhow = { workspace = true } 29 | async-std = { workspace = true, features = ["attributes"] } 30 | base64 = { workspace = true } 31 | bytesize = { workspace = true } 32 | clap = { workspace = true, features = ["derive", "wrap_help"] } 33 | comfy-table = "7.1.0" 34 | derive_more = "0.99.10" 35 | dirs = "5.0" 36 | signal-hook = "0.3" 37 | signal-hook-async-std = "0.2" 38 | git-version = { workspace = true } 39 | itertools = "0.12" 40 | log = { workspace = true } 41 | rand = "0.8.3" 42 | semver = { version = "1.0.4", features = ["serde"] } 43 | serde = { workspace = true, features = ["derive"] } 44 | serde_derive = { workspace = true } 45 | serde_json = { workspace = true } 46 | serde_yaml = { workspace = true } 47 | tracing = { workspace = true } 48 | tracing-subscriber = { workspace = true, features = ["env-filter"] } 49 | uuid = { workspace = true, features = ["serde", "v4"] } 50 | zenoh = { workspace = true } 51 | zenoh-flow-commons = { workspace = true } 52 | zenoh-flow-daemon = { workspace = true } 53 | zenoh-flow-runtime = { workspace = true } 54 | zenoh-flow-descriptors = { workspace = true } 55 | zenoh-flow-records = { workspace = true } 56 | -------------------------------------------------------------------------------- /zfctl/src/main.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | mod instance_command; 16 | use instance_command::InstanceCommand; 17 | 18 | mod daemon_command; 19 | use daemon_command::DaemonCommand; 20 | 21 | mod run_local_command; 22 | use run_local_command::RunLocalCommand; 23 | use tracing::Instrument; 24 | use tracing_subscriber::EnvFilter; 25 | 26 | mod utils; 27 | use std::path::PathBuf; 28 | 29 | use clap::{ArgGroup, Parser, Subcommand}; 30 | use utils::{get_random_runtime, get_runtime_by_name}; 31 | use zenoh_flow_commons::RuntimeId; 32 | 33 | const ZENOH_FLOW_INTERNAL_ERROR: &str = r#" 34 | `zfctl` encountered a fatal internal error. 35 | 36 | If the above error log does not help you troubleshoot the reason, you can contact us on: 37 | - Discord: https://discord.gg/CeJB5rxk9x 38 | - GitHub: https://github.com/eclipse-zenoh/zenoh-flow 39 | "#; 40 | 41 | /// Macro to facilitate the creation of a [Row](comfy_table::Row) where its contents are not of the same type. 42 | #[macro_export] 43 | macro_rules! row { 44 | ( 45 | $( $cell: expr ),* 46 | ) => { 47 | comfy_table::Row::from(vec![ $( &$cell as &dyn std::fmt::Display ),*]) 48 | }; 49 | } 50 | 51 | #[derive(Parser)] 52 | struct Zfctl { 53 | /// The path to a Zenoh configuration to manage the connection to the Zenoh 54 | /// network. 55 | /// 56 | /// If no configuration is provided, `zfctl` will default to connecting as 57 | /// a peer with multicast scouting enabled. 58 | #[arg(short = 'z', long, verbatim_doc_comment)] 59 | zenoh_configuration: Option, 60 | 61 | #[command(subcommand)] 62 | command: Command, 63 | } 64 | 65 | #[derive(Subcommand)] 66 | enum Command { 67 | /// To manage a data flow instance. 68 | /// 69 | /// This command accepts an optional `name` or `id` of a Zenoh-Flow Daemon 70 | /// to contact. If no name or id is provided, one is randomly selected. 71 | #[command(group( 72 | ArgGroup::new("exclusive") 73 | .args(&["daemon_id", "daemon_name"]) 74 | .required(false) 75 | .multiple(false) 76 | ))] 77 | Instance { 78 | #[command(subcommand)] 79 | command: InstanceCommand, 80 | /// The unique identifier of the Zenoh-Flow daemon to contact. 81 | #[arg(short = 'i', long = "id", verbatim_doc_comment)] 82 | daemon_id: Option, 83 | /// The name of the Zenoh-Flow daemon to contact. 84 | /// 85 | /// If several daemons share the same name, `zfctl` will abort 86 | /// its execution asking you to instead use their `id`. 87 | #[arg(short = 'n', long = "name", verbatim_doc_comment)] 88 | daemon_name: Option, 89 | }, 90 | 91 | /// To interact with a Zenoh-Flow daemon. 92 | #[command(subcommand)] 93 | Daemon(DaemonCommand), 94 | 95 | /// Run a dataflow locally. 96 | #[command(verbatim_doc_comment)] 97 | RunLocal(RunLocalCommand), 98 | } 99 | 100 | #[async_std::main] 101 | async fn main() { 102 | let env_filter = EnvFilter::try_from_default_env() 103 | .unwrap_or_else(|_| EnvFilter::new("zfctl=info,zenoh_flow=info")); 104 | let subscriber = tracing_subscriber::fmt() 105 | .with_env_filter(env_filter) 106 | .with_target(false) 107 | .finish(); 108 | let _ = tracing::subscriber::set_global_default(subscriber); 109 | 110 | let span = tracing::info_span!("zfctl"); 111 | let _guard = span.enter(); 112 | 113 | let zfctl = Zfctl::parse(); 114 | 115 | let zenoh_config = match zfctl.zenoh_configuration { 116 | Some(path) => match zenoh::Config::from_file(path.clone()) { 117 | Ok(path) => path, 118 | Err(e) => { 119 | tracing::error!( 120 | "Failed to parse Zenoh configuration from < {} >: {e:?}", 121 | path.display() 122 | ); 123 | return; 124 | } 125 | }, 126 | None => zenoh::Config::default(), 127 | }; 128 | 129 | async { 130 | let session = match zenoh::open(zenoh_config).await { 131 | Ok(session) => session, 132 | Err(e) => { 133 | tracing::error!("Failed to open Zenoh session: {e:?}"); 134 | return; 135 | } 136 | }; 137 | 138 | tracing::info!("Using ZID: {}", session.zid()); 139 | 140 | let result = match zfctl.command { 141 | Command::Instance { 142 | command, 143 | daemon_id, 144 | daemon_name, 145 | } => { 146 | let orchestrator_id = match (daemon_id, daemon_name) { 147 | (Some(id), _) => id, 148 | (None, Some(name)) => match get_runtime_by_name(&session, &name).await { 149 | Ok(id) => id, 150 | Err(e) => { 151 | tracing::error!("{e:?}"); 152 | return; 153 | } 154 | }, 155 | (None, None) => match get_random_runtime(&session).await { 156 | Ok(id) => id, 157 | Err(e) => { 158 | tracing::error!("{e:?}"); 159 | return; 160 | } 161 | }, 162 | }; 163 | 164 | command.run(session, orchestrator_id).await 165 | } 166 | Command::Daemon(command) => command.run(session).await, 167 | Command::RunLocal(command) => command.run(session).await, 168 | }; 169 | 170 | if let Err(e) = result { 171 | tracing::error!("{e:?}"); 172 | } 173 | } 174 | .in_current_span() 175 | .await; 176 | } 177 | -------------------------------------------------------------------------------- /zfctl/src/run_local_command.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use anyhow::Context; 4 | use async_std::io::ReadExt; 5 | use clap::Parser; 6 | use zenoh::Session; 7 | use zenoh_flow_commons::{parse_vars, Result, Vars}; 8 | use zenoh_flow_descriptors::{DataFlowDescriptor, FlattenedDataFlowDescriptor}; 9 | use zenoh_flow_records::DataFlowRecord; 10 | use zenoh_flow_runtime::{Extensions, Runtime}; 11 | 12 | #[derive(Parser)] 13 | pub struct RunLocalCommand { 14 | /// The data flow to execute. 15 | flow: PathBuf, 16 | /// The, optional, location of the configuration to load nodes implemented not in Rust. 17 | #[arg(short, long, value_name = "path")] 18 | extensions: Option, 19 | /// Variables to add / overwrite in the `vars` section of your data 20 | /// flow, with the form `KEY=VALUE`. Can be repeated multiple times. 21 | /// 22 | /// Example: 23 | /// --vars HOME_DIR=/home/zenoh-flow --vars BUILD=debug 24 | #[arg(long, value_parser = parse_vars::, verbatim_doc_comment)] 25 | vars: Option>, 26 | } 27 | 28 | impl RunLocalCommand { 29 | pub async fn run(self, session: Session) -> Result<()> { 30 | let extensions = match self.extensions { 31 | Some(extensions_path) => { 32 | let (extensions, _) = zenoh_flow_commons::try_parse_from_file::( 33 | extensions_path.as_os_str(), 34 | Vars::default(), 35 | ) 36 | .context(format!( 37 | "Failed to load Loader configuration from < {} >", 38 | &extensions_path.display() 39 | ))?; 40 | 41 | extensions 42 | } 43 | None => Extensions::default(), 44 | }; 45 | 46 | let vars = match self.vars { 47 | Some(v) => Vars::from(v), 48 | None => Vars::default(), 49 | }; 50 | 51 | let (data_flow, vars) = zenoh_flow_commons::try_parse_from_file::( 52 | self.flow.as_os_str(), 53 | vars, 54 | ) 55 | .context(format!( 56 | "Failed to load data flow descriptor from < {} >", 57 | &self.flow.display() 58 | ))?; 59 | 60 | let flattened_flow = 61 | FlattenedDataFlowDescriptor::try_flatten(data_flow, vars).context(format!( 62 | "Failed to flattened data flow extracted from < {} >", 63 | &self.flow.display() 64 | ))?; 65 | 66 | let runtime_builder = Runtime::builder("zenoh-flow-standalone-runtime") 67 | .session(session) 68 | .add_extensions(extensions) 69 | .context("Failed to add extensions")?; 70 | 71 | let runtime = runtime_builder 72 | .build() 73 | .await 74 | .context("Failed to build the Zenoh-Flow runtime")?; 75 | 76 | let record = DataFlowRecord::try_new(&flattened_flow, runtime.id()) 77 | .context("Failed to create a Record from the flattened data flow descriptor")?; 78 | 79 | let instance_id = record.instance_id().clone(); 80 | let record_name = record.name().clone(); 81 | runtime 82 | .try_load_data_flow(record) 83 | .await 84 | .context("Failed to load Record")?; 85 | 86 | runtime 87 | .try_start_instance(&instance_id) 88 | .await 89 | .context(format!("Failed to start data flow < {} >", &instance_id))?; 90 | 91 | let mut stdin = async_std::io::stdin(); 92 | let mut input = [0_u8]; 93 | println!( 94 | r#" 95 | The flow ({}) < {} > was successfully started. 96 | To abort its execution, simply enter 'q'. 97 | "#, 98 | record_name, instance_id 99 | ); 100 | 101 | loop { 102 | let _ = stdin.read_exact(&mut input).await; 103 | if input[0] == b'q' { 104 | break; 105 | } 106 | } 107 | 108 | runtime 109 | .try_delete_instance(&instance_id) 110 | .await 111 | .context(format!("Failed to delete data flow < {} >:", &instance_id))?; 112 | 113 | Ok(()) 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /zfctl/src/utils.rs: -------------------------------------------------------------------------------- 1 | // 2 | // Copyright © 2021 ZettaScale Technology 3 | // 4 | // This program and the accompanying materials are made available under the 5 | // terms of the Eclipse Public License 2.0 which is available at 6 | // http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 7 | // which is available at https://www.apache.org/licenses/LICENSE-2.0. 8 | // 9 | // SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 10 | // 11 | // Contributors: 12 | // ZettaScale Zenoh Team, 13 | // 14 | 15 | use itertools::Itertools; 16 | use rand::Rng; 17 | use zenoh::{query::ConsolidationMode, Session}; 18 | use zenoh_flow_commons::{Result, RuntimeId}; 19 | use zenoh_flow_daemon::queries::{selector_all_runtimes, RuntimeInfo, RuntimesQuery}; 20 | 21 | /// Returns the list of [RuntimeInfo] of the reachable Zenoh-Flow Daemon(s). 22 | /// 23 | /// # Panic 24 | /// 25 | /// This function will panic if: 26 | /// - (internal error) the query to list the Zenoh-Flow Daemons could not be serialised by `serde_json`, 27 | /// - the query on the Zenoh network failed, 28 | /// - no Zenoh-Flow Daemon is reachable. 29 | pub(crate) async fn get_all_runtimes(session: &Session) -> Result> { 30 | tracing::debug!("Fetching available Zenoh-Flow Daemon(s)"); 31 | let value = match serde_json::to_vec(&RuntimesQuery::List) { 32 | Ok(value) => value, 33 | Err(e) => { 34 | anyhow::bail!("`serde_json` failed to serialize `RuntimeQuery::List`: {e:?}"); 35 | } 36 | }; 37 | 38 | let runtime_replies = match session 39 | .get(selector_all_runtimes()) 40 | .payload(value) 41 | // We want to address all the Zenoh-Flow daemons that are reachable on the Zenoh network. 42 | .consolidation(ConsolidationMode::None) 43 | .await 44 | { 45 | Ok(replies) => replies, 46 | Err(e) => { 47 | anyhow::bail!("Failed to send Query to Zenoh-Flow Daemon(s): {:?}", e); 48 | } 49 | }; 50 | 51 | let mut runtimes = Vec::new(); 52 | while let Ok(reply) = runtime_replies.recv_async().await { 53 | match reply.result() { 54 | Ok(sample) => { 55 | match serde_json::from_slice::(&sample.payload().to_bytes()) { 56 | Ok(runtime_info) => runtimes.push(runtime_info), 57 | Err(e) => { 58 | tracing::error!("Failed to parse a reply as a `RuntimeId`: {:?}", e) 59 | } 60 | } 61 | } 62 | Err(e) => tracing::warn!("A reply returned an error: {:?}", e), 63 | } 64 | } 65 | 66 | if runtimes.is_empty() { 67 | anyhow::bail!("Found no Zenoh-Flow Daemon on the network"); 68 | } 69 | 70 | tracing::debug!("Found {} Zenoh-Flow Daemon(s)", runtimes.len()); 71 | 72 | Ok(runtimes) 73 | } 74 | 75 | /// Returns the unique identifier of the Zenoh-Flow Daemon that has the provided `name`. 76 | /// 77 | /// # Panic 78 | /// 79 | /// This function will panic if: 80 | /// - there is no Zenoh-Flow Daemon that has the provided name, 81 | /// - there are more than 1 Zenoh-Flow Daemon with the provided name. 82 | pub(crate) async fn get_runtime_by_name(session: &Session, name: &str) -> Result { 83 | let runtimes = get_all_runtimes(session).await?; 84 | let mut matching_runtimes = runtimes 85 | .iter() 86 | .filter(|&r_info| r_info.name.as_ref() == name) 87 | .collect_vec(); 88 | 89 | if matching_runtimes.is_empty() { 90 | anyhow::bail!("Found no Zenoh-Flow Daemon with name < {name} >"); 91 | } else if matching_runtimes.len() > 1 { 92 | tracing::error!("Found multiple Zenoh-Flow Daemon named < {name} >:"); 93 | matching_runtimes.iter().for_each(|&r_info| { 94 | tracing::error!("- {} - (id) {}", r_info.name, r_info.id); 95 | }); 96 | anyhow::bail!( 97 | "There are multiple Zenoh-Flow Daemons named < {name} >, please use their 'zid' \ 98 | instead" 99 | ); 100 | } else { 101 | Ok(matching_runtimes.pop().unwrap().id.clone()) 102 | } 103 | } 104 | 105 | /// Returns the unique identifier of a reachable Zenoh-Flow Daemon. 106 | /// 107 | /// # Panic 108 | /// 109 | /// This function will panic if: 110 | /// - (internal error) the query to list the Zenoh-Flow Daemons could not be serialised by `serde_json`, 111 | /// - the query on the Zenoh network failed, 112 | /// - no Zenoh-Flow Daemon is reachable. 113 | pub(crate) async fn get_random_runtime(session: &Session) -> Result { 114 | let mut runtimes = get_all_runtimes(session).await?; 115 | let orchestrator = runtimes.remove(rand::thread_rng().gen_range(0..runtimes.len())); 116 | 117 | tracing::info!( 118 | "Selected Zenoh-Flow Daemon < {}: {} > as Orchestrator", 119 | orchestrator.name, 120 | orchestrator.id 121 | ); 122 | 123 | Ok(orchestrator.id) 124 | } 125 | --------------------------------------------------------------------------------