├── .github ├── release-drafter.yml └── workflows │ ├── ci.yml │ └── release-drafter.yml ├── .gitignore ├── Cargo.toml ├── README.md ├── download └── Workload │ ├── config.json │ └── rootfs.tar ├── hawkbit ├── Cargo.toml ├── README.md ├── examples │ └── polling.rs ├── src │ ├── ddi.rs │ ├── ddi │ │ ├── cancel_action.rs │ │ ├── client.rs │ │ ├── common.rs │ │ ├── config_data.rs │ │ ├── deployment_base.rs │ │ ├── feedback.rs │ │ └── poll.rs │ └── lib.rs └── tests │ ├── data │ └── test.txt │ └── tests.rs └── hawkbit_mock ├── Cargo.toml ├── README.md └── src ├── ddi.rs └── lib.rs /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | template: | 2 | ## What’s Changed 3 | 4 | $CHANGES 5 | 6 | categories: 7 | - title: '🚀 Features' 8 | label: 'feature' 9 | - title: '🐛 Bug Fixes' 10 | labels: 11 | - 'fix' 12 | - 'bugfix' 13 | - 'bug' 14 | 15 | exclude-labels: 16 | - 'skip-changelog' -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | 3 | name: CI 4 | 5 | jobs: 6 | check: 7 | name: Check 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | - uses: actions-rs/toolchain@v1 12 | with: 13 | profile: minimal 14 | toolchain: stable 15 | override: true 16 | - uses: actions-rs/cargo@v1 17 | with: 18 | command: check 19 | 20 | test: 21 | name: Test Suite 22 | runs-on: ${{ matrix.os }} 23 | strategy: 24 | matrix: 25 | os: [ubuntu-latest, windows-latest, macOS-latest] 26 | steps: 27 | - uses: actions/checkout@v2 28 | - uses: actions-rs/toolchain@v1 29 | with: 30 | profile: minimal 31 | toolchain: stable 32 | override: true 33 | - uses: actions-rs/cargo@v1 34 | with: 35 | command: test 36 | args: --all-features 37 | 38 | fmt: 39 | name: Rustfmt 40 | runs-on: ubuntu-latest 41 | steps: 42 | - uses: actions/checkout@v2 43 | - uses: actions-rs/toolchain@v1 44 | with: 45 | profile: minimal 46 | toolchain: stable 47 | override: true 48 | - run: rustup component add rustfmt 49 | - uses: actions-rs/cargo@v1 50 | with: 51 | command: fmt 52 | args: --all -- --check 53 | 54 | clippy: 55 | name: Clippy 56 | runs-on: ubuntu-latest 57 | steps: 58 | - uses: actions/checkout@v2 59 | - uses: actions-rs/toolchain@v1 60 | with: 61 | profile: minimal 62 | toolchain: stable 63 | override: true 64 | - run: rustup component add clippy 65 | - uses: actions-rs/cargo@v1 66 | with: 67 | command: clippy 68 | args: -- -D warnings 69 | 70 | coverage: 71 | name: Coverage 72 | runs-on: ubuntu-latest 73 | steps: 74 | - uses: actions/checkout@v1 75 | - uses: actions-rs/toolchain@v1 76 | with: 77 | toolchain: nightly 78 | override: true 79 | components: llvm-tools-preview 80 | - uses: actions-rs/cargo@v1 81 | with: 82 | command: test 83 | args: --all-features --no-fail-fast 84 | env: 85 | RUSTFLAGS: "-Zinstrument-coverage" 86 | LLVM_PROFILE_FILE: "hawkbitrs-%p-%m.profraw" 87 | - name: Install grcov 88 | run: if [[ ! -e ~/.cargo/bin/grcov ]]; then cargo install grcov; fi 89 | - name: Run grcov 90 | run: grcov . --binary-path ./target/debug/ -s . -t lcov --branch --ignore-not-existing --ignore "hawkbit/examples/*" --ignore "*target*" -o coverage.lcov 91 | - name: Upload coverage to Codecov 92 | uses: codecov/codecov-action@v1 93 | with: 94 | files: ./coverage.lcov -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name: Release Drafter 2 | 3 | on: 4 | push: 5 | # branches to consider in the event; optional, defaults to all 6 | branches: 7 | - main 8 | 9 | jobs: 10 | update_release_draft: 11 | runs-on: ubuntu-latest 12 | steps: 13 | # Drafts your next Release notes as Pull Requests are merged into "master" 14 | - uses: release-drafter/release-drafter@v5 15 | env: 16 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = [ 4 | "hawkbit", 5 | "hawkbit_mock", 6 | ] -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # hawkbitrs [![codecov](https://codecov.io/gh/collabora/hawkbit-rs/branch/main/graph/badge.svg?token=GRPKKE2XT5)](https://codecov.io/gh/collabora/hawkbit-rs) [![CI](https://github.com/collabora/hawkbit-rs/workflows/CI/badge.svg)](https://github.com/collabora/hawkbit-rs/actions) 2 | 3 | This module contains a couple of [Rust](https://www.rust-lang.org) crates 4 | to implement [Eclipse hawkBit](https://www.eclipse.org/hawkbit/) clients: 5 | 6 | - [hawkbit](hawkbit/): high-level client-side API [![](https://img.shields.io/crates/v/hawkbit.svg)](https://crates.io/crates/hawkbit) [![](https://docs.rs/hawkbit/badge.svg)](https://docs.rs/hawkbit/) 7 | - [hawkbit_mock](hawkbit_mock/): mock server to use for testing [![](https://img.shields.io/crates/v/hawkbit_mock.svg)](https://crates.io/crates/hawkbit_mock) [![](https://docs.rs/hawkbit_mock/badge.svg)](https://docs.rs/hawkbit_mock/) -------------------------------------------------------------------------------- /download/Workload/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "ociVersion": "1.0.2-dev", 3 | "process": { 4 | "terminal": false, 5 | "user": { 6 | "uid": 0, 7 | "gid": 0 8 | }, 9 | "args": [ 10 | "ls" 11 | ], 12 | "env": [ 13 | "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", 14 | "TERM=xterm" 15 | ], 16 | "cwd": "/", 17 | "capabilities": { 18 | "bounding": [ 19 | "CAP_AUDIT_WRITE", 20 | "CAP_KILL", 21 | "CAP_NET_BIND_SERVICE" 22 | ], 23 | "effective": [ 24 | "CAP_AUDIT_WRITE", 25 | "CAP_KILL", 26 | "CAP_NET_BIND_SERVICE" 27 | ], 28 | "inheritable": [ 29 | "CAP_AUDIT_WRITE", 30 | "CAP_KILL", 31 | "CAP_NET_BIND_SERVICE" 32 | ], 33 | "permitted": [ 34 | "CAP_AUDIT_WRITE", 35 | "CAP_KILL", 36 | "CAP_NET_BIND_SERVICE" 37 | ], 38 | "ambient": [ 39 | "CAP_AUDIT_WRITE", 40 | "CAP_KILL", 41 | "CAP_NET_BIND_SERVICE" 42 | ] 43 | }, 44 | "rlimits": [ 45 | { 46 | "type": "RLIMIT_NOFILE", 47 | "hard": 1024, 48 | "soft": 1024 49 | } 50 | ], 51 | "noNewPrivileges": true 52 | }, 53 | "root": { 54 | "path": "rootfs", 55 | "readonly": true 56 | }, 57 | "hostname": "runc", 58 | "mounts": [ 59 | { 60 | "destination": "/proc", 61 | "type": "proc", 62 | "source": "proc" 63 | }, 64 | { 65 | "destination": "/dev", 66 | "type": "tmpfs", 67 | "source": "tmpfs", 68 | "options": [ 69 | "nosuid", 70 | "strictatime", 71 | "mode=755", 72 | "size=65536k" 73 | ] 74 | }, 75 | { 76 | "destination": "/dev/pts", 77 | "type": "devpts", 78 | "source": "devpts", 79 | "options": [ 80 | "nosuid", 81 | "noexec", 82 | "newinstance", 83 | "ptmxmode=0666", 84 | "mode=0620" 85 | ] 86 | }, 87 | { 88 | "destination": "/dev/shm", 89 | "type": "tmpfs", 90 | "source": "shm", 91 | "options": [ 92 | "nosuid", 93 | "noexec", 94 | "nodev", 95 | "mode=1777", 96 | "size=65536k" 97 | ] 98 | }, 99 | { 100 | "destination": "/dev/mqueue", 101 | "type": "mqueue", 102 | "source": "mqueue", 103 | "options": [ 104 | "nosuid", 105 | "noexec", 106 | "nodev" 107 | ] 108 | }, 109 | { 110 | "destination": "/sys", 111 | "type": "none", 112 | "source": "/sys", 113 | "options": [ 114 | "rbind", 115 | "nosuid", 116 | "noexec", 117 | "nodev", 118 | "ro" 119 | ] 120 | } 121 | ], 122 | "linux": { 123 | "uidMappings": [ 124 | { 125 | "containerID": 0, 126 | "hostID": 1000, 127 | "size": 1 128 | } 129 | ], 130 | "gidMappings": [ 131 | { 132 | "containerID": 0, 133 | "hostID": 1000, 134 | "size": 1 135 | } 136 | ], 137 | "namespaces": [ 138 | { 139 | "type": "pid" 140 | }, 141 | { 142 | "type": "ipc" 143 | }, 144 | { 145 | "type": "uts" 146 | }, 147 | { 148 | "type": "mount" 149 | }, 150 | { 151 | "type": "cgroup" 152 | }, 153 | { 154 | "type": "user" 155 | } 156 | ], 157 | "maskedPaths": [ 158 | "/proc/acpi", 159 | "/proc/asound", 160 | "/proc/kcore", 161 | "/proc/keys", 162 | "/proc/latency_stats", 163 | "/proc/timer_list", 164 | "/proc/timer_stats", 165 | "/proc/sched_debug", 166 | "/sys/firmware", 167 | "/proc/scsi" 168 | ], 169 | "readonlyPaths": [ 170 | "/proc/bus", 171 | "/proc/fs", 172 | "/proc/irq", 173 | "/proc/sys", 174 | "/proc/sysrq-trigger" 175 | ] 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /download/Workload/rootfs.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/collabora/hawkbit-rs/8654cd8468de40b1839261ee66c8585abf6ca4b5/download/Workload/rootfs.tar -------------------------------------------------------------------------------- /hawkbit/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hawkbit" 3 | version = "0.6.0" 4 | authors = ["Guillaume Desmottes "] 5 | edition = "2018" 6 | categories = ["api-bindings"] 7 | description = "Client side API to interact with Eclipse hawkBit" 8 | license = "MIT OR Apache-2.0" 9 | readme = "README.md" 10 | repository = "https://github.com/collabora/hawkbit-rs" 11 | documentation = "https://docs.rs/hawkbit_mock/" 12 | 13 | [dependencies] 14 | reqwest = { version = "0.11", features = ["json", "stream"] } 15 | tokio = { version = "1.1", features = ["time", "fs"] } 16 | serde = { version = "1.0", features = ["derive"] } 17 | serde_json = "1.0" 18 | thiserror = "1.0" 19 | url = "2.2" 20 | strum = { version = "0.21", features = ["derive"] } 21 | cfg-if = "1.0" 22 | digest = { version = "0.9", optional = true } 23 | md-5 = { version = "0.9", optional = true } 24 | sha-1 = { version = "0.9", optional = true } 25 | sha2 = { version = "0.9", optional = true } 26 | generic-array = {version = "0.14", optional = true } 27 | futures = "0.3" 28 | bytes = "1.0" 29 | 30 | [dev-dependencies] 31 | hawkbit_mock = { path = "../hawkbit_mock/" } 32 | structopt = "0.3" 33 | anyhow = "1.0" 34 | log = "0.4" 35 | env_logger = "0.8" 36 | tempdir = "0.3" 37 | assert_matches = "1.4" 38 | 39 | [features] 40 | hash-digest= ["digest", "generic-array"] 41 | hash-md5 = ["md-5", "hash-digest"] 42 | hash-sha1 = ["sha-1", "hash-digest"] 43 | hash-sha256 = ["sha2", "hash-digest"] 44 | -------------------------------------------------------------------------------- /hawkbit/README.md: -------------------------------------------------------------------------------- 1 | # hawkbit 2 | 3 | Client side API to interact with [Eclipse hawkBit](https://www.eclipse.org/hawkbit/). 4 | 5 | So far only the [Direct Device Integration API](https://www.eclipse.org/hawkbit/apis/ddi_api/) 6 | is implemented. See [this example](https://github.com/collabora/hawkbit-rs/blob/main/hawkbit/examples/polling.rs) 7 | demonstrating how to use it. 8 | 9 | ## Documentation 10 | 11 | See the [crate documentation](https://docs.rs/hawkbit/). -------------------------------------------------------------------------------- /hawkbit/examples/polling.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020, Collabora Ltd. 2 | // SPDX-License-Identifier: MIT OR Apache-2.0 3 | 4 | use std::path::Path; 5 | 6 | use anyhow::Result; 7 | use hawkbit::ddi::{Client, Execution, Finished}; 8 | use serde::Serialize; 9 | use structopt::StructOpt; 10 | use tokio::time::sleep; 11 | 12 | #[derive(StructOpt, Debug)] 13 | #[structopt(name = "polling example")] 14 | struct Opt { 15 | url: String, 16 | controller: String, 17 | key: String, 18 | #[structopt(short, long, default_value = "DEFAULT")] 19 | tenant: String, 20 | } 21 | 22 | #[derive(Debug, Serialize)] 23 | pub(crate) struct ConfigData { 24 | #[serde(rename = "HwRevision")] 25 | hw_revision: String, 26 | } 27 | 28 | #[tokio::main] 29 | async fn main() -> Result<()> { 30 | let opt = Opt::from_args(); 31 | 32 | let ddi = Client::new(&opt.url, &opt.tenant, &opt.controller, &opt.key)?; 33 | 34 | loop { 35 | let reply = ddi.poll().await?; 36 | dbg!(&reply); 37 | 38 | if let Some(request) = reply.config_data_request() { 39 | println!("Uploading config data"); 40 | let data = ConfigData { 41 | hw_revision: "1.0".to_string(), 42 | }; 43 | 44 | request 45 | .upload(Execution::Closed, Finished::Success, None, data, vec![]) 46 | .await?; 47 | } 48 | 49 | if let Some(update) = reply.update() { 50 | println!("Pending update"); 51 | 52 | let update = update.fetch().await?; 53 | dbg!(&update); 54 | 55 | update 56 | .send_feedback(Execution::Proceeding, Finished::None, vec!["Downloading"]) 57 | .await?; 58 | 59 | let artifacts = update.download(Path::new("./download/")).await?; 60 | dbg!(&artifacts); 61 | 62 | #[cfg(feature = "hash-digest")] 63 | for artifact in artifacts { 64 | #[cfg(feature = "hash-md5")] 65 | artifact.check_md5().await?; 66 | #[cfg(feature = "hash-sha1")] 67 | artifact.check_sha1().await?; 68 | #[cfg(feature = "hash-sha256")] 69 | artifact.check_sha256().await?; 70 | } 71 | 72 | update 73 | .send_feedback(Execution::Closed, Finished::Success, vec![]) 74 | .await?; 75 | } 76 | 77 | if let Some(cancel_action) = reply.cancel_action() { 78 | println!("Action to cancel: {}", cancel_action.id().await?); 79 | 80 | cancel_action 81 | .send_feedback(Execution::Proceeding, Finished::None, vec!["Cancelling"]) 82 | .await?; 83 | 84 | cancel_action 85 | .send_feedback(Execution::Closed, Finished::Success, vec![]) 86 | .await?; 87 | 88 | println!("Action cancelled"); 89 | } 90 | 91 | let t = reply.polling_sleep()?; 92 | sleep(t).await; 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /hawkbit/src/ddi.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021, Collabora Ltd. 2 | // SPDX-License-Identifier: MIT OR Apache-2.0 3 | 4 | //! [Direct Device Integration](https://www.eclipse.org/hawkbit/apis/ddi_api/) API 5 | //! 6 | //! This module provides API for devices to poll their hawkBit server, upload their configuration 7 | //! and download updates. 8 | //! 9 | //! Devices would typically create a [`Client`] using [`Client::new`] 10 | //! and would then regularly call [`Client::poll`] checking for updates. 11 | //! 12 | //! See `examples/polling.rs` demonstrating how to use it. 13 | 14 | // FIXME: set link to hawbit/examples/polling.rs once we have the final public repo 15 | 16 | mod cancel_action; 17 | mod client; 18 | mod common; 19 | mod config_data; 20 | mod deployment_base; 21 | mod feedback; 22 | mod poll; 23 | 24 | pub use cancel_action::CancelAction; 25 | pub use client::{Client, Error}; 26 | pub use common::{Execution, Finished}; 27 | pub use config_data::{ConfigRequest, Mode}; 28 | #[cfg(feature = "hash-digest")] 29 | pub use deployment_base::ChecksumType; 30 | pub use deployment_base::{ 31 | Artifact, Chunk, DownloadedArtifact, MaintenanceWindow, Type, Update, UpdatePreFetch, 32 | }; 33 | pub use poll::Reply; 34 | -------------------------------------------------------------------------------- /hawkbit/src/ddi/cancel_action.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2021, Collabora Ltd. 2 | // SPDX-License-Identifier: MIT OR Apache-2.0 3 | 4 | // Cancelled operation 5 | 6 | use reqwest::Client; 7 | use serde::Deserialize; 8 | 9 | use crate::ddi::client::Error; 10 | use crate::ddi::common::{send_feedback_internal, Execution, Finished}; 11 | 12 | /// A request from the server to cancel an update. 13 | /// 14 | /// Call [`CancelAction::id()`] to retrieve the ID of the action to cancel. 15 | /// 16 | /// Cancel actions need to be closed by sending feedback to the server using 17 | /// [`CancelAction::send_feedback`] with either 18 | /// [`Finished::Success`] or [`Finished::Failure`]. 19 | #[derive(Debug)] 20 | pub struct CancelAction { 21 | client: Client, 22 | url: String, 23 | } 24 | 25 | impl CancelAction { 26 | pub(crate) fn new(client: Client, url: String) -> Self { 27 | Self { client, url } 28 | } 29 | 30 | /// Retrieve the id of the action to cancel. 31 | pub async fn id(&self) -> Result { 32 | let reply = self.client.get(&self.url).send().await?; 33 | reply.error_for_status_ref()?; 34 | 35 | let reply = reply.json::().await?; 36 | Ok(reply.cancel_action.stop_id) 37 | } 38 | 39 | /// Send feedback to server about this cancel action. 40 | /// 41 | /// # Arguments 42 | /// * `execution`: status of the action execution. 43 | /// * `finished`: defined status of the result. The action will be kept open on the server until the controller on the device reports either [`Finished::Success`] or [`Finished::Failure`]. 44 | /// * `details`: list of details message information. 45 | pub async fn send_feedback( 46 | &self, 47 | execution: Execution, 48 | finished: Finished, 49 | details: Vec<&str>, 50 | ) -> Result<(), Error> { 51 | let id = self.id().await?; 52 | 53 | send_feedback_internal::( 54 | &self.client, 55 | &self.url, 56 | &id, 57 | execution, 58 | finished, 59 | None, 60 | details, 61 | ) 62 | .await 63 | } 64 | } 65 | 66 | #[derive(Debug, Deserialize)] 67 | struct CancelReply { 68 | id: String, 69 | #[serde(rename = "cancelAction")] 70 | cancel_action: CancelActionReply, 71 | } 72 | 73 | #[derive(Debug, Deserialize)] 74 | struct CancelActionReply { 75 | #[serde(rename = "stopId")] 76 | stop_id: String, 77 | } 78 | -------------------------------------------------------------------------------- /hawkbit/src/ddi/client.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020, Collabora Ltd. 2 | // SPDX-License-Identifier: MIT OR Apache-2.0 3 | 4 | use std::convert::TryInto; 5 | 6 | use thiserror::Error; 7 | use url::Url; 8 | 9 | use crate::ddi::poll; 10 | 11 | /// [Direct Device Integration](https://www.eclipse.org/hawkbit/apis/ddi_api/) client. 12 | #[derive(Debug, Clone)] 13 | pub struct Client { 14 | base_url: Url, 15 | client: reqwest::Client, 16 | } 17 | 18 | /// DDI errors 19 | #[non_exhaustive] 20 | #[derive(Error, Debug)] 21 | pub enum Error { 22 | /// URL error 23 | #[error("Could not parse url")] 24 | ParseUrlError(#[from] url::ParseError), 25 | /// Token error 26 | #[error("Invalid token format")] 27 | InvalidToken(#[from] reqwest::header::InvalidHeaderValue), 28 | /// HTTP error 29 | #[error("Failed to process request")] 30 | ReqwestError(#[from] reqwest::Error), 31 | /// Error parsing sleep field from server 32 | #[error("Failed to parse polling sleep")] 33 | InvalidSleep, 34 | /// IO error 35 | #[error("Failed to download update")] 36 | Io(#[from] std::io::Error), 37 | /// Invalid checksum 38 | #[cfg(feature = "hash-digest")] 39 | #[error("Invalid Checksum")] 40 | ChecksumError(crate::ddi::deployment_base::ChecksumType), 41 | } 42 | 43 | impl Client { 44 | /// Create a new DDI client. 45 | /// 46 | /// # Arguments 47 | /// * `url`: the URL of the hawkBit server, such as `http://my-server.com:8080` 48 | /// * `tenant`: the server tenant 49 | /// * `controller_id`: the id of the controller 50 | /// * `key_token`: the secret authentification token of the controller 51 | pub fn new( 52 | url: &str, 53 | tenant: &str, 54 | controller_id: &str, 55 | key_token: &str, 56 | ) -> Result { 57 | let host: Url = url.parse()?; 58 | let path = format!("{}/controller/v1/{}", tenant, controller_id); 59 | let base_url = host.join(&path)?; 60 | 61 | let mut headers = reqwest::header::HeaderMap::new(); 62 | headers.insert( 63 | reqwest::header::AUTHORIZATION, 64 | format!("TargetToken {}", key_token).try_into()?, 65 | ); 66 | 67 | let client = reqwest::Client::builder() 68 | .default_headers(headers) 69 | .build()?; 70 | Ok(Self { base_url, client }) 71 | } 72 | 73 | /// Poll the server for updates 74 | pub async fn poll(&self) -> Result { 75 | let reply = self.client.get(self.base_url.clone()).send().await?; 76 | reply.error_for_status_ref()?; 77 | 78 | let reply = reply.json::().await?; 79 | Ok(poll::Reply::new(reply, self.client.clone())) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /hawkbit/src/ddi/common.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020, Collabora Ltd. 2 | // SPDX-License-Identifier: MIT OR Apache-2.0 3 | 4 | use std::fmt; 5 | 6 | use reqwest::Client; 7 | use serde::{Deserialize, Serialize}; 8 | use url::Url; 9 | 10 | use crate::ddi::client::Error; 11 | use crate::ddi::feedback::Feedback; 12 | 13 | #[derive(Debug, Deserialize)] 14 | pub struct Link { 15 | href: String, 16 | } 17 | 18 | impl fmt::Display for Link { 19 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 20 | write!(f, "{}", self.href) 21 | } 22 | } 23 | 24 | #[derive(Debug, Serialize)] 25 | #[serde(rename_all = "lowercase")] 26 | /// Sent by the target to the server informing it about the execution state of a pending request, 27 | /// see the [DDI API reference](https://www.eclipse.org/hawkbit/apis/ddi_api/) for details. 28 | pub enum Execution { 29 | /// Target completes the action either with `Finished::Success` or `Finished::Failure` as result. 30 | Closed, 31 | /// This can be used by the target to inform that it is working on the action. 32 | Proceeding, 33 | /// This is send by the target as confirmation of a cancellation request by the update server. 34 | Canceled, 35 | /// This can be used by the target to inform that it scheduled on the action. 36 | Scheduled, 37 | /// This is send by the target in case an update of a cancellation is rejected, i.e. cannot be fulfilled at this point in time. 38 | Rejected, 39 | /// This can be used by the target to inform that it continued to work on the action. 40 | Resumed, 41 | } 42 | 43 | #[derive(Debug, Serialize)] 44 | #[serde(rename_all = "lowercase")] 45 | /// Status of a pending operation 46 | pub enum Finished { 47 | /// Operation suceeded 48 | Success, 49 | /// Operation failed 50 | Failure, 51 | /// Operation is still in-progress 52 | None, 53 | } 54 | 55 | pub(crate) async fn send_feedback_internal( 56 | client: &Client, 57 | url: &str, 58 | id: &str, 59 | execution: Execution, 60 | finished: Finished, 61 | progress: Option, 62 | details: Vec<&str>, 63 | ) -> Result<(), Error> { 64 | let mut url: Url = url.parse()?; 65 | { 66 | let mut paths = url 67 | .path_segments_mut() 68 | .map_err(|_| url::ParseError::SetHostOnCannotBeABaseUrl)?; 69 | paths.push("feedback"); 70 | } 71 | url.set_query(None); 72 | 73 | let details = details.iter().map(|m| m.to_string()).collect(); 74 | let feedback = Feedback::new(id, execution, finished, progress, details); 75 | 76 | let reply = client.post(&url.to_string()).json(&feedback).send().await?; 77 | reply.error_for_status()?; 78 | 79 | Ok(()) 80 | } 81 | -------------------------------------------------------------------------------- /hawkbit/src/ddi/config_data.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020, Collabora Ltd. 2 | // SPDX-License-Identifier: MIT OR Apache-2.0 3 | 4 | // Structures used to send config data 5 | 6 | use reqwest::Client; 7 | use serde::Serialize; 8 | 9 | use crate::ddi::{Error, Execution, Finished}; 10 | 11 | /// A request from the server asking to upload the device configuration. 12 | #[derive(Debug)] 13 | pub struct ConfigRequest { 14 | client: Client, 15 | url: String, 16 | } 17 | 18 | impl ConfigRequest { 19 | pub(crate) fn new(client: Client, url: String) -> Self { 20 | Self { client, url } 21 | } 22 | 23 | /// Send the requested device configuration to the server. 24 | /// 25 | /// The configuration is represented as the `data` argument which 26 | /// need to be serializable. 27 | pub async fn upload( 28 | &self, 29 | execution: Execution, 30 | finished: Finished, 31 | mode: Option, 32 | data: T, 33 | details: Vec<&str>, 34 | ) -> Result<(), Error> { 35 | let details = details.iter().map(|m| m.to_string()).collect(); 36 | let data = ConfigData::new(execution, finished, mode, data, details); 37 | let reply = self.client.put(&self.url).json(&data).send().await?; 38 | 39 | reply.error_for_status()?; 40 | Ok(()) 41 | } 42 | } 43 | 44 | #[derive(Debug, Serialize)] 45 | pub(crate) struct ConfigData { 46 | status: Status, 47 | mode: Option, 48 | data: T, 49 | // skip 'id' as its semantic is unclear and it's left empty in the doc 50 | } 51 | #[derive(Debug, Serialize)] 52 | struct Status { 53 | execution: Execution, 54 | result: ResultT, 55 | details: Vec, 56 | } 57 | 58 | #[derive(Debug, Serialize)] 59 | pub(crate) struct ResultT { 60 | finished: Finished, 61 | } 62 | 63 | /// Update mode that should be applied when updating target 64 | // FIXME: would be good to have better documentation of the fields but the spec does not say much 65 | #[derive(Debug, Serialize)] 66 | #[serde(rename_all = "lowercase")] 67 | pub enum Mode { 68 | /// Merge 69 | Merge, 70 | /// Replace 71 | Replace, 72 | /// Remove 73 | Remove, 74 | } 75 | 76 | impl ConfigData { 77 | pub(crate) fn new( 78 | execution: Execution, 79 | finished: Finished, 80 | mode: Option, 81 | data: T, 82 | details: Vec, 83 | ) -> Self { 84 | Self { 85 | data, 86 | status: Status { 87 | execution, 88 | result: ResultT { finished }, 89 | details, 90 | }, 91 | mode, 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /hawkbit/src/ddi/deployment_base.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020, Collabora Ltd. 2 | // SPDX-License-Identifier: MIT OR Apache-2.0 3 | 4 | // Structures when querying deployment 5 | 6 | use std::path::{Path, PathBuf}; 7 | 8 | use bytes::Bytes; 9 | use futures::{prelude::*, TryStreamExt}; 10 | use reqwest::{Client, Response}; 11 | use serde::de::{Deserializer, Error as _, IgnoredAny, MapAccess, Visitor}; 12 | use serde::{Deserialize, Serialize}; 13 | 14 | use tokio::{ 15 | fs::{DirBuilder, File}, 16 | io::AsyncWriteExt, 17 | }; 18 | 19 | use crate::ddi::client::Error; 20 | use crate::ddi::common::{send_feedback_internal, Execution, Finished, Link}; 21 | 22 | #[derive(Debug)] 23 | /// A pending update whose details have not been retrieved yet. 24 | /// 25 | /// Call [`UpdatePreFetch::fetch()`] to retrieve the details from server. 26 | pub struct UpdatePreFetch { 27 | client: Client, 28 | url: String, 29 | } 30 | 31 | impl UpdatePreFetch { 32 | pub(crate) fn new(client: Client, url: String) -> Self { 33 | Self { client, url } 34 | } 35 | 36 | /// Retrieve details about the update. 37 | pub async fn fetch(self) -> Result { 38 | let reply = self.client.get(&self.url).send().await?; 39 | reply.error_for_status_ref()?; 40 | 41 | let reply = reply.json::().await?; 42 | Ok(Update::new(self.client, reply, self.url)) 43 | } 44 | } 45 | 46 | #[derive(Debug, Deserialize)] 47 | struct Reply { 48 | id: String, 49 | deployment: Deployment, 50 | #[serde(rename = "actionHistory")] 51 | action_history: Option, 52 | } 53 | 54 | #[derive(Debug, Deserialize)] 55 | struct Deployment { 56 | download: Type, 57 | update: Type, 58 | #[serde(rename = "maintenanceWindow")] 59 | maintenance_window: Option, 60 | chunks: Vec, 61 | } 62 | 63 | /// How the download or update should be processed by the target. 64 | #[derive(Debug, Deserialize, Serialize, Copy, Clone, PartialEq)] 65 | #[serde(rename_all = "lowercase")] 66 | pub enum Type { 67 | /// Do not process yet 68 | Skip, 69 | /// Server asks to process 70 | Attempt, 71 | /// Server requests immediate processing 72 | Forced, 73 | } 74 | 75 | /// Separation of download and installation by defining a maintenance window for the installation. 76 | #[derive(Debug, Deserialize, Serialize, Copy, Clone, PartialEq)] 77 | #[serde(rename_all = "lowercase")] 78 | pub enum MaintenanceWindow { 79 | /// Maintenance window is available 80 | Available, 81 | /// Maintenance window is unavailable 82 | Unavailable, 83 | } 84 | 85 | #[derive(Debug, Deserialize)] 86 | struct ChunkInternal { 87 | #[serde(default)] 88 | metadata: Vec, 89 | part: String, 90 | name: String, 91 | version: String, 92 | artifacts: Vec, 93 | } 94 | 95 | #[derive(Debug, Deserialize)] 96 | struct Metadata { 97 | key: String, 98 | value: String, 99 | } 100 | 101 | #[derive(Debug, Deserialize)] 102 | struct ArtifactInternal { 103 | filename: String, 104 | hashes: Hashes, 105 | size: u32, 106 | #[serde(rename = "_links")] 107 | links: Links, 108 | } 109 | 110 | #[derive(Debug, Deserialize, Clone)] 111 | struct Hashes { 112 | sha1: String, 113 | md5: String, 114 | sha256: String, 115 | } 116 | 117 | impl<'de> Deserialize<'de> for Links { 118 | fn deserialize(deserializer: D) -> Result 119 | where 120 | D: Deserializer<'de>, 121 | { 122 | struct V; 123 | 124 | impl<'de> Visitor<'de> for V { 125 | type Value = Links; 126 | 127 | fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 128 | write!(f, "a map") 129 | } 130 | 131 | fn visit_map(self, mut map: A) -> Result 132 | where 133 | A: MapAccess<'de>, 134 | { 135 | let mut download: Option = None; 136 | let mut md5sum: Option = None; 137 | let mut download_http: Option = None; 138 | let mut md5sum_http: Option = None; 139 | 140 | while let Some(key) = map.next_key()? { 141 | match key { 142 | "download" => { 143 | download = match download { 144 | Some(_) => return Err(A::Error::duplicate_field("download")), 145 | None => Some(map.next_value()?), 146 | }; 147 | } 148 | "md5sum" => { 149 | md5sum = match md5sum { 150 | Some(_) => return Err(A::Error::duplicate_field("md5sum")), 151 | None => Some(map.next_value()?), 152 | }; 153 | } 154 | "download-http" => { 155 | download_http = match download_http { 156 | Some(_) => return Err(A::Error::duplicate_field("download-http")), 157 | None => Some(map.next_value()?), 158 | }; 159 | } 160 | "md5sum-http" => { 161 | md5sum_http = match md5sum_http { 162 | Some(_) => return Err(A::Error::duplicate_field("md5sum-http")), 163 | None => Some(map.next_value()?), 164 | }; 165 | } 166 | _ => { 167 | map.next_value::()?; 168 | } 169 | } 170 | } 171 | 172 | let https = download.map(|content| Download { content, md5sum }); 173 | let http = download_http.map(|content| Download { 174 | content, 175 | md5sum: md5sum_http, 176 | }); 177 | 178 | if http.is_none() && https.is_none() { 179 | Err(A::Error::missing_field("download or download-http")) 180 | } else { 181 | Ok(Links { http, https }) 182 | } 183 | } 184 | } 185 | 186 | let visitor = V; 187 | 188 | deserializer.deserialize_map(visitor) 189 | } 190 | } 191 | 192 | #[derive(Debug)] 193 | struct Download { 194 | content: Link, 195 | md5sum: Option, 196 | } 197 | 198 | /// Download links a single artifact, at least one of http or https will be 199 | /// Some 200 | #[derive(Debug)] 201 | struct Links { 202 | http: Option, 203 | https: Option, 204 | } 205 | 206 | #[derive(Debug, Deserialize)] 207 | struct ActionHistory { 208 | status: String, 209 | #[serde(default)] 210 | messages: Vec, 211 | } 212 | 213 | /// A pending update to deploy. 214 | #[derive(Debug)] 215 | pub struct Update { 216 | client: Client, 217 | info: Reply, 218 | url: String, 219 | } 220 | 221 | impl Update { 222 | fn new(client: Client, info: Reply, url: String) -> Self { 223 | Self { client, info, url } 224 | } 225 | 226 | /// Handling for the download part of the provisioning process. 227 | pub fn download_type(&self) -> Type { 228 | self.info.deployment.download 229 | } 230 | 231 | /// Handling for the update part of the provisioning process. 232 | pub fn update_type(&self) -> Type { 233 | self.info.deployment.update 234 | } 235 | 236 | /// If set, the update is part of a maintenance window. 237 | pub fn maintenance_window(&self) -> Option { 238 | self.info.deployment.maintenance_window 239 | } 240 | 241 | /// An iterator on all the software chunks of the update. 242 | pub fn chunks(&self) -> impl Iterator { 243 | let client = self.client.clone(); 244 | 245 | self.info 246 | .deployment 247 | .chunks 248 | .iter() 249 | .map(move |c| Chunk::new(c, client.clone())) 250 | } 251 | 252 | /// Download all software chunks to the directory defined in `dir`. 253 | pub async fn download(&self, dir: &Path) -> Result, Error> { 254 | let mut result = Vec::new(); 255 | for c in self.chunks() { 256 | let downloaded = c.download(dir).await?; 257 | result.extend(downloaded); 258 | } 259 | 260 | Ok(result) 261 | } 262 | 263 | /// Send feedback to server about this update, with custom progress information. 264 | /// 265 | /// # Arguments 266 | /// * `execution`: status of the action execution. 267 | /// * `finished`: defined status of the result. The action will be kept open on the server until the controller on the device reports either [`Finished::Success`] or [`Finished::Failure`]. 268 | /// * `progress`: progress assumption of the device. 269 | /// * `details`: list of details message information. 270 | pub async fn send_feedback_with_progress( 271 | &self, 272 | execution: Execution, 273 | finished: Finished, 274 | progress: T, 275 | details: Vec<&str>, 276 | ) -> Result<(), Error> { 277 | send_feedback_internal( 278 | &self.client, 279 | &self.url, 280 | &self.info.id, 281 | execution, 282 | finished, 283 | Some(progress), 284 | details, 285 | ) 286 | .await 287 | } 288 | 289 | /// Send feedback to server about this update. 290 | /// 291 | /// Same as [`Update::send_feedback_with_progress`] but without passing custom progress information about the update. 292 | pub async fn send_feedback( 293 | &self, 294 | execution: Execution, 295 | finished: Finished, 296 | details: Vec<&str>, 297 | ) -> Result<(), Error> { 298 | send_feedback_internal::( 299 | &self.client, 300 | &self.url, 301 | &self.info.id, 302 | execution, 303 | finished, 304 | None, 305 | details, 306 | ) 307 | .await 308 | } 309 | } 310 | 311 | /// Software chunk of an update. 312 | #[derive(Debug)] 313 | pub struct Chunk<'a> { 314 | chunk: &'a ChunkInternal, 315 | client: Client, 316 | } 317 | 318 | impl<'a> Chunk<'a> { 319 | fn new(chunk: &'a ChunkInternal, client: Client) -> Self { 320 | Self { chunk, client } 321 | } 322 | 323 | /// Type of the chunk. 324 | pub fn part(&self) -> &str { 325 | &self.chunk.part 326 | } 327 | 328 | /// Name of the chunk. 329 | pub fn name(&self) -> &str { 330 | &self.chunk.name 331 | } 332 | 333 | /// Software version of the chunk. 334 | pub fn version(&self) -> &str { 335 | &self.chunk.version 336 | } 337 | 338 | /// An iterator on all the artifacts of the chunk. 339 | pub fn artifacts(&self) -> impl Iterator { 340 | let client = self.client.clone(); 341 | 342 | self.chunk 343 | .artifacts 344 | .iter() 345 | .map(move |a| Artifact::new(a, client.clone())) 346 | } 347 | 348 | /// An iterator on all the metadata of the chunk. 349 | pub fn metadata(&self) -> impl Iterator { 350 | self.chunk 351 | .metadata 352 | .iter() 353 | .map(|a| (a.key.as_str(), a.value.as_str())) 354 | } 355 | 356 | /// Download all artifacts of the chunk to the directory defined in `dir`. 357 | pub async fn download(&'a self, dir: &Path) -> Result, Error> { 358 | let mut dir = dir.to_path_buf(); 359 | dir.push(self.name()); 360 | let mut result = Vec::new(); 361 | 362 | for a in self.artifacts() { 363 | let downloaded = a.download(&dir).await?; 364 | result.push(downloaded); 365 | } 366 | 367 | Ok(result) 368 | } 369 | } 370 | 371 | /// A single file part of a [`Chunk`] to download. 372 | #[derive(Debug)] 373 | pub struct Artifact<'a> { 374 | artifact: &'a ArtifactInternal, 375 | client: Client, 376 | } 377 | 378 | impl<'a> Artifact<'a> { 379 | fn new(artifact: &'a ArtifactInternal, client: Client) -> Self { 380 | Self { artifact, client } 381 | } 382 | 383 | /// The name of the file. 384 | pub fn filename(&self) -> &str { 385 | &self.artifact.filename 386 | } 387 | 388 | /// The size of the file. 389 | pub fn size(&self) -> u32 { 390 | self.artifact.size 391 | } 392 | 393 | async fn download_response(&'a self) -> Result { 394 | let download = self 395 | .artifact 396 | .links 397 | .https 398 | .as_ref() 399 | .or_else(|| self.artifact.links.http.as_ref()) 400 | .expect("Missing content link in for artifact"); 401 | 402 | let resp = self 403 | .client 404 | .get(&download.content.to_string()) 405 | .send() 406 | .await?; 407 | 408 | resp.error_for_status_ref()?; 409 | Ok(resp) 410 | } 411 | 412 | /// Download the artifact file to the directory defined in `dir`. 413 | pub async fn download(&'a self, dir: &Path) -> Result { 414 | let mut resp = self.download_response().await?; 415 | 416 | if !dir.exists() { 417 | DirBuilder::new().recursive(true).create(dir).await?; 418 | } 419 | 420 | let mut file_name = dir.to_path_buf(); 421 | file_name.push(self.filename()); 422 | let mut dest = File::create(&file_name).await?; 423 | 424 | while let Some(chunk) = resp.chunk().await? { 425 | dest.write_all(&chunk).await?; 426 | } 427 | 428 | Ok(DownloadedArtifact::new( 429 | file_name, 430 | self.artifact.hashes.clone(), 431 | )) 432 | } 433 | 434 | /// Provide a `Stream` of `Bytes` to download the artifact. 435 | /// 436 | /// This can be used as an alternative to [`Artifact::download`], 437 | /// for example, to extract an archive while it's being downloaded, 438 | /// saving the need to store the archive file on disk. 439 | pub async fn download_stream( 440 | &'a self, 441 | ) -> Result>, Error> { 442 | let resp = self.download_response().await?; 443 | 444 | Ok(resp.bytes_stream().map_err(|e| e.into())) 445 | } 446 | 447 | /// Provide a `Stream` of `Bytes` to download the artifact while checking md5 checksum. 448 | /// 449 | /// The stream will yield the same data as [`Artifact::download_stream`] but will raise 450 | /// an error if the md5sum of the downloaded data does not match the one provided by the server. 451 | #[cfg(feature = "hash-md5")] 452 | pub async fn download_stream_with_md5_check( 453 | &'a self, 454 | ) -> Result>, Error> { 455 | let stream = self.download_stream().await?; 456 | let hasher = DownloadHasher::new_md5(self.artifact.hashes.md5.clone()); 457 | 458 | let stream = DownloadStreamHash { 459 | stream: Box::new(stream), 460 | hasher, 461 | }; 462 | 463 | Ok(stream) 464 | } 465 | 466 | /// Provide a `Stream` of `Bytes` to download the artifact while checking sha1 checksum. 467 | /// 468 | /// The stream will yield the same data as [`Artifact::download_stream`] but will raise 469 | /// an error if the sha1sum of the downloaded data does not match the one provided by the server. 470 | #[cfg(feature = "hash-sha1")] 471 | pub async fn download_stream_with_sha1_check( 472 | &'a self, 473 | ) -> Result>, Error> { 474 | let stream = self.download_stream().await?; 475 | let hasher = DownloadHasher::new_sha1(self.artifact.hashes.sha1.clone()); 476 | 477 | let stream = DownloadStreamHash { 478 | stream: Box::new(stream), 479 | hasher, 480 | }; 481 | 482 | Ok(stream) 483 | } 484 | 485 | /// Provide a `Stream` of `Bytes` to download the artifact while checking sha256 checksum. 486 | /// 487 | /// The stream will yield the same data as [`Artifact::download_stream`] but will raise 488 | /// an error if the sha256sum of the downloaded data does not match the one provided by the server. 489 | #[cfg(feature = "hash-sha256")] 490 | pub async fn download_stream_with_sha256_check( 491 | &'a self, 492 | ) -> Result>, Error> { 493 | let stream = self.download_stream().await?; 494 | let hasher = DownloadHasher::new_sha256(self.artifact.hashes.sha256.clone()); 495 | 496 | let stream = DownloadStreamHash { 497 | stream: Box::new(stream), 498 | hasher, 499 | }; 500 | 501 | Ok(stream) 502 | } 503 | } 504 | 505 | /// A downloaded file part of a [`Chunk`]. 506 | #[derive(Debug)] 507 | pub struct DownloadedArtifact { 508 | file: PathBuf, 509 | hashes: Hashes, 510 | } 511 | 512 | cfg_if::cfg_if! { 513 | if #[cfg(feature = "hash-digest")] { 514 | use std::{ 515 | pin::Pin, 516 | task::Poll, 517 | }; 518 | use digest::Digest; 519 | 520 | const HASH_BUFFER_SIZE: usize = 4096; 521 | 522 | /// Enum representing the different type of supported checksums 523 | #[derive(Debug, strum::Display, Clone)] 524 | pub enum ChecksumType { 525 | /// md5 526 | #[cfg(feature = "hash-md5")] 527 | Md5, 528 | /// sha1 529 | #[cfg(feature = "hash-sha1")] 530 | Sha1, 531 | /// sha256 532 | #[cfg(feature = "hash-sha256")] 533 | Sha256, 534 | } 535 | 536 | // quite complex trait bounds because of requirements so LowerHex is implemented on the output 537 | #[derive(Clone)] 538 | struct DownloadHasher 539 | where 540 | T: Digest, 541 | ::OutputSize: core::ops::Add, 542 | <::OutputSize as core::ops::Add>::Output: generic_array::ArrayLength, 543 | { 544 | hasher: T, 545 | expected: String, 546 | error: ChecksumType, 547 | } 548 | 549 | impl DownloadHasher 550 | where 551 | T: Digest, 552 | ::OutputSize: core::ops::Add, 553 | <::OutputSize as core::ops::Add>::Output: generic_array::ArrayLength 554 | { 555 | fn update(&mut self, data: impl AsRef<[u8]>) { 556 | self.hasher.update(data); 557 | } 558 | 559 | fn finalize(self) -> Result<(), Error> { 560 | let digest = self.hasher.finalize(); 561 | 562 | if format!("{:x}", digest) == self.expected { 563 | Ok(()) 564 | } else { 565 | Err(Error::ChecksumError(self.error)) 566 | } 567 | } 568 | } 569 | 570 | #[cfg(feature = "hash-md5")] 571 | impl DownloadHasher { 572 | fn new_md5(expected: String) -> Self { 573 | Self { 574 | hasher: md5::Md5::new(), 575 | expected, 576 | error: ChecksumType::Md5, 577 | } 578 | } 579 | } 580 | 581 | #[cfg(feature = "hash-sha1")] 582 | impl DownloadHasher { 583 | fn new_sha1(expected: String) -> Self { 584 | Self { 585 | hasher: sha1::Sha1::new(), 586 | expected, 587 | error: ChecksumType::Sha1, 588 | } 589 | } 590 | } 591 | 592 | #[cfg(feature = "hash-sha256")] 593 | impl DownloadHasher { 594 | fn new_sha256(expected: String) -> Self { 595 | Self { 596 | hasher: sha2::Sha256::new(), 597 | expected, 598 | error: ChecksumType::Sha256, 599 | } 600 | } 601 | } 602 | 603 | struct DownloadStreamHash 604 | where 605 | T: Digest, 606 | ::OutputSize: core::ops::Add, 607 | <::OutputSize as core::ops::Add>::Output: generic_array::ArrayLength, 608 | { 609 | stream: Box> + Unpin + Send + Sync>, 610 | hasher: DownloadHasher, 611 | } 612 | 613 | impl Stream for DownloadStreamHash 614 | where 615 | T: Digest, 616 | ::OutputSize: core::ops::Add, 617 | <::OutputSize as core::ops::Add>::Output: generic_array::ArrayLength, 618 | T: Unpin, 619 | T: Clone, 620 | { 621 | type Item = Result; 622 | 623 | fn poll_next( 624 | self: std::pin::Pin<&mut Self>, 625 | cx: &mut std::task::Context<'_>, 626 | ) -> std::task::Poll> { 627 | let me = Pin::into_inner(self); 628 | 629 | match Pin::new(&mut me.stream).poll_next(cx) { 630 | Poll::Ready(Some(Ok(data))) => { 631 | // feed data to the hasher and then pass them back to the stream 632 | me.hasher.update(&data); 633 | Poll::Ready(Some(Ok(data))) 634 | } 635 | Poll::Ready(None) => { 636 | // download is done, check the hash 637 | match me.hasher.clone().finalize() { 638 | Ok(_) => Poll::Ready(None), 639 | Err(e) => Poll::Ready(Some(Err(e))), 640 | } 641 | } 642 | // passthrough on errors and pendings 643 | Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))), 644 | Poll::Pending => Poll::Pending, 645 | } 646 | } 647 | } 648 | } 649 | } 650 | 651 | impl<'a> DownloadedArtifact { 652 | fn new(file: PathBuf, hashes: Hashes) -> Self { 653 | Self { file, hashes } 654 | } 655 | 656 | /// Path of the downloaded file. 657 | pub fn file(&self) -> &PathBuf { 658 | &self.file 659 | } 660 | 661 | #[cfg(feature = "hash-digest")] 662 | async fn hash(&self, mut hasher: DownloadHasher) -> Result<(), Error> 663 | where 664 | T: Digest, 665 | ::OutputSize: core::ops::Add, 666 | <::OutputSize as core::ops::Add>::Output: generic_array::ArrayLength, 667 | { 668 | use tokio::io::AsyncReadExt; 669 | 670 | let mut file = File::open(&self.file).await?; 671 | let mut buffer = [0; HASH_BUFFER_SIZE]; 672 | 673 | loop { 674 | let n = file.read(&mut buffer[..]).await?; 675 | if n == 0 { 676 | break; 677 | } 678 | hasher.update(&buffer[..n]); 679 | } 680 | 681 | hasher.finalize() 682 | } 683 | 684 | /// Check if the md5sum of the downloaded file matches the one provided by the server. 685 | #[cfg(feature = "hash-md5")] 686 | pub async fn check_md5(&self) -> Result<(), Error> { 687 | let hasher = DownloadHasher::new_md5(self.hashes.md5.clone()); 688 | self.hash(hasher).await 689 | } 690 | 691 | /// Check if the sha1sum of the downloaded file matches the one provided by the server. 692 | #[cfg(feature = "hash-sha1")] 693 | pub async fn check_sha1(&self) -> Result<(), Error> { 694 | let hasher = DownloadHasher::new_sha1(self.hashes.sha1.clone()); 695 | self.hash(hasher).await 696 | } 697 | 698 | /// Check if the sha256sum of the downloaded file matches the one provided by the server. 699 | #[cfg(feature = "hash-sha256")] 700 | pub async fn check_sha256(&self) -> Result<(), Error> { 701 | let hasher = DownloadHasher::new_sha256(self.hashes.sha256.clone()); 702 | self.hash(hasher).await 703 | } 704 | } 705 | -------------------------------------------------------------------------------- /hawkbit/src/ddi/feedback.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020, Collabora Ltd. 2 | // SPDX-License-Identifier: MIT OR Apache-2.0 3 | 4 | // Structures used to send feedback on upgrades 5 | 6 | use serde::Serialize; 7 | 8 | use crate::ddi::common::{Execution, Finished}; 9 | 10 | #[derive(Debug, Serialize)] 11 | pub(crate) struct Feedback { 12 | id: String, 13 | status: Status, 14 | } 15 | #[derive(Debug, Serialize)] 16 | struct Status { 17 | execution: Execution, 18 | result: ResultT, 19 | details: Vec, 20 | } 21 | 22 | #[derive(Debug, Serialize)] 23 | pub struct ResultT { 24 | finished: Finished, 25 | #[serde(skip_serializing_if = "Option::is_none")] 26 | progress: Option, 27 | } 28 | 29 | impl Feedback { 30 | pub(crate) fn new( 31 | id: &str, 32 | execution: Execution, 33 | finished: Finished, 34 | progress: Option, 35 | details: Vec, 36 | ) -> Self { 37 | Self { 38 | id: id.to_string(), 39 | status: Status { 40 | execution, 41 | details, 42 | result: ResultT { finished, progress }, 43 | }, 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /hawkbit/src/ddi/poll.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020, Collabora Ltd. 2 | // SPDX-License-Identifier: MIT OR Apache-2.0 3 | 4 | // Structures used to poll the status 5 | 6 | use std::time::Duration; 7 | 8 | use reqwest::Client; 9 | use serde::Deserialize; 10 | 11 | use crate::ddi::cancel_action::CancelAction; 12 | use crate::ddi::client::Error; 13 | use crate::ddi::common::Link; 14 | use crate::ddi::config_data::ConfigRequest; 15 | use crate::ddi::deployment_base::UpdatePreFetch; 16 | 17 | #[derive(Debug, Deserialize)] 18 | pub(crate) struct ReplyInternal { 19 | config: Config, 20 | #[serde(rename = "_links")] 21 | links: Option, 22 | } 23 | #[derive(Debug, Deserialize)] 24 | pub struct Config { 25 | polling: Polling, 26 | } 27 | #[derive(Debug, Deserialize)] 28 | pub struct Polling { 29 | sleep: String, 30 | } 31 | #[derive(Debug, Deserialize)] 32 | pub struct Links { 33 | #[serde(rename = "configData")] 34 | config_data: Option, 35 | #[serde(rename = "deploymentBase")] 36 | deployment_base: Option, 37 | #[serde(rename = "cancelAction")] 38 | cancel_action: Option, 39 | } 40 | 41 | /// Polling reply from the server 42 | #[derive(Debug)] 43 | pub struct Reply { 44 | reply: ReplyInternal, 45 | client: Client, 46 | } 47 | 48 | impl Reply { 49 | pub(crate) fn new(reply: ReplyInternal, client: Client) -> Self { 50 | Self { reply, client } 51 | } 52 | 53 | /// Suggested sleeping time between two polling requests to the server. 54 | pub fn polling_sleep(&self) -> Result { 55 | self.reply.config.polling.as_duration() 56 | } 57 | 58 | /// Returns pending configuration data request from the server, if any. 59 | pub fn config_data_request(&self) -> Option { 60 | match &self.reply.links { 61 | Some(links) => links 62 | .config_data 63 | .as_ref() 64 | .map(|l| ConfigRequest::new(self.client.clone(), l.to_string())), 65 | None => None, 66 | } 67 | } 68 | 69 | /// Returns pending update to deploy, if any. 70 | pub fn update(&self) -> Option { 71 | match &self.reply.links { 72 | Some(links) => links 73 | .deployment_base 74 | .as_ref() 75 | .map(|l| UpdatePreFetch::new(self.client.clone(), l.to_string())), 76 | None => None, 77 | } 78 | } 79 | 80 | /// Returns pending cancel action, if any. 81 | pub fn cancel_action(&self) -> Option { 82 | match &self.reply.links { 83 | Some(links) => links 84 | .cancel_action 85 | .as_ref() 86 | .map(|l| CancelAction::new(self.client.clone(), l.to_string())), 87 | None => None, 88 | } 89 | } 90 | } 91 | 92 | impl Polling { 93 | fn as_duration(&self) -> Result { 94 | let times: Vec> = self.sleep.split(':').map(|s| s.parse()).collect(); 95 | if times.len() != 3 { 96 | return Err(Error::InvalidSleep); 97 | } 98 | 99 | match times[..] { 100 | [Ok(h), Ok(m), Ok(s)] => Ok(Duration::new(h * 60 * 60 + m * 60 + s, 0)), 101 | _ => Ok(Duration::new(0, 0)), 102 | } 103 | } 104 | } 105 | 106 | #[cfg(test)] 107 | mod tests { 108 | use super::*; 109 | 110 | #[test] 111 | fn sleep_duration() { 112 | let polling = Polling { 113 | sleep: "00:00:05".to_string(), 114 | }; 115 | assert_eq!(polling.as_duration().unwrap(), Duration::new(5, 0)); 116 | 117 | let polling = Polling { 118 | sleep: "00:05:05".to_string(), 119 | }; 120 | assert_eq!(polling.as_duration().unwrap(), Duration::new(305, 0)); 121 | 122 | let polling = Polling { 123 | sleep: "01:05:05".to_string(), 124 | }; 125 | assert_eq!(polling.as_duration().unwrap(), Duration::new(3905, 0)); 126 | 127 | let polling = Polling { 128 | sleep: "05:05".to_string(), 129 | }; 130 | assert!(polling.as_duration().is_err()); 131 | 132 | let polling = Polling { 133 | sleep: "invalid".to_string(), 134 | }; 135 | assert!(polling.as_duration().is_err()); 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /hawkbit/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020, Collabora Ltd. 2 | // SPDX-License-Identifier: MIT OR Apache-2.0 3 | 4 | #![warn(missing_docs)] 5 | 6 | //! # hawkbit 7 | //! 8 | //! The `hawkbit` crate provides high-level client-side API to interact with 9 | //! [Eclipse hawkBit](https://www.eclipse.org/hawkbit/). 10 | //! 11 | //! So far only the [Direct Device Integration API](https://www.eclipse.org/hawkbit/apis/ddi_api/) 12 | //! is implemented, see the [`ddi`] module. 13 | 14 | pub mod ddi; 15 | -------------------------------------------------------------------------------- /hawkbit/tests/data/test.txt: -------------------------------------------------------------------------------- 1 | hello world -------------------------------------------------------------------------------- /hawkbit/tests/tests.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020, Collabora Ltd. 2 | // SPDX-License-Identifier: MIT OR Apache-2.0 3 | 4 | use std::fs::File; 5 | use std::io::prelude::*; 6 | use std::{path::PathBuf, time::Duration}; 7 | 8 | use bytes::Bytes; 9 | use futures::prelude::*; 10 | use hawkbit::ddi::{Client, Error, Execution, Finished, MaintenanceWindow, Mode, Type}; 11 | use serde::Serialize; 12 | use serde_json::json; 13 | use tempdir::TempDir; 14 | 15 | use hawkbit_mock::ddi::{ 16 | ChunkProtocol, Deployment, DeploymentBuilder, Server, ServerBuilder, Target, 17 | }; 18 | 19 | fn init() { 20 | let _ = env_logger::builder().is_test(true).try_init(); 21 | } 22 | 23 | fn add_target(server: &Server, name: &str) -> (Client, Target) { 24 | let target = server.add_target(name); 25 | 26 | let client = Client::new( 27 | &server.base_url(), 28 | &server.tenant, 29 | &target.name, 30 | &target.key, 31 | ) 32 | .expect("DDI creation failed"); 33 | 34 | (client, target) 35 | } 36 | 37 | #[tokio::test] 38 | async fn poll() { 39 | init(); 40 | 41 | let server = ServerBuilder::default().tenant("my-tenant").build(); 42 | let (client, target) = add_target(&server, "Target1"); 43 | 44 | assert_eq!(target.poll_hits(), 0); 45 | 46 | // Try polling twice 47 | for i in 0..2 { 48 | let reply = client.poll().await.expect("poll failed"); 49 | assert_eq!(reply.polling_sleep().unwrap(), Duration::from_secs(60)); 50 | assert!(reply.config_data_request().is_none()); 51 | assert!(reply.update().is_none()); 52 | assert_eq!(target.poll_hits(), i + 1); 53 | } 54 | } 55 | 56 | #[tokio::test] 57 | async fn upload_config() { 58 | init(); 59 | 60 | let server = ServerBuilder::default().build(); 61 | let (client, target) = add_target(&server, "Target1"); 62 | 63 | let expected_config_data = json!({ 64 | "mode" : "merge", 65 | "data" : { 66 | "awesome" : true, 67 | }, 68 | "status" : { 69 | "result" : { 70 | "finished" : "success" 71 | }, 72 | "execution" : "closed", 73 | "details" : [ "Some stuffs" ] 74 | } 75 | }); 76 | target.request_config(expected_config_data); 77 | 78 | let reply = client.poll().await.expect("poll failed"); 79 | let config_data_req = reply 80 | .config_data_request() 81 | .expect("missing config data request"); 82 | assert!(reply.update().is_none()); 83 | 84 | #[derive(Serialize)] 85 | struct Config { 86 | awesome: bool, 87 | } 88 | 89 | let config = Config { awesome: true }; 90 | 91 | config_data_req 92 | .upload( 93 | Execution::Closed, 94 | Finished::Success, 95 | Some(Mode::Merge), 96 | config, 97 | vec!["Some stuffs"], 98 | ) 99 | .await 100 | .expect("upload config failed"); 101 | 102 | assert_eq!(target.poll_hits(), 1); 103 | assert_eq!(target.config_data_hits(), 1); 104 | } 105 | 106 | fn artifact_path() -> PathBuf { 107 | let mut test_artifact = PathBuf::new(); 108 | test_artifact.push("tests"); 109 | test_artifact.push("data"); 110 | test_artifact.push("test.txt"); 111 | 112 | test_artifact 113 | } 114 | 115 | fn get_deployment(valid_checksums: bool) -> Deployment { 116 | let test_artifact = artifact_path(); 117 | 118 | let artifacts = if valid_checksums { 119 | vec![( 120 | test_artifact, 121 | "5eb63bbbe01eeed093cb22bb8f5acdc3", 122 | "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed", 123 | "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9", 124 | )] 125 | } else { 126 | vec![(test_artifact, "badger", "badger", "badger")] 127 | }; 128 | 129 | DeploymentBuilder::new("10", Type::Forced, Type::Attempt) 130 | .maintenance_window(MaintenanceWindow::Available) 131 | .chunk( 132 | ChunkProtocol::BOTH, 133 | "app-both", 134 | "1.0", 135 | "some-chunk", 136 | artifacts.clone(), 137 | ) 138 | .chunk( 139 | ChunkProtocol::HTTP, 140 | "app-http", 141 | "1.0", 142 | "some-chunk", 143 | artifacts.clone(), 144 | ) 145 | .chunk( 146 | ChunkProtocol::HTTPS, 147 | "app-https", 148 | "1.0", 149 | "some-chunk", 150 | artifacts, 151 | ) 152 | .build() 153 | } 154 | 155 | #[tokio::test] 156 | async fn deployment() { 157 | init(); 158 | 159 | let server = ServerBuilder::default().build(); 160 | let (client, target) = add_target(&server, "Target1"); 161 | target.push_deployment(get_deployment(true)); 162 | 163 | let reply = client.poll().await.expect("poll failed"); 164 | assert!(reply.config_data_request().is_none()); 165 | assert_eq!(target.deployment_hits(), 0); 166 | 167 | let update = reply.update().expect("missing update"); 168 | let update = update.fetch().await.expect("failed to fetch update info"); 169 | assert_eq!(target.deployment_hits(), 1); 170 | assert_eq!(update.download_type(), Type::Forced); 171 | assert_eq!(update.update_type(), Type::Attempt); 172 | assert_eq!( 173 | update.maintenance_window(), 174 | Some(MaintenanceWindow::Available) 175 | ); 176 | assert_eq!(update.chunks().count(), 3); 177 | 178 | let mut chunks = update.chunks(); 179 | for p in &[ 180 | ChunkProtocol::BOTH, 181 | ChunkProtocol::HTTP, 182 | ChunkProtocol::HTTPS, 183 | ] { 184 | // Check chunk 185 | let chunk = chunks.next().unwrap(); 186 | let name = match p { 187 | ChunkProtocol::BOTH => "app-both", 188 | ChunkProtocol::HTTP => "app-http", 189 | ChunkProtocol::HTTPS => "app-https", 190 | }; 191 | assert_eq!(chunk.part(), name); 192 | assert_eq!(chunk.version(), "1.0"); 193 | assert_eq!(chunk.name(), "some-chunk"); 194 | assert_eq!(chunk.artifacts().count(), 1); 195 | 196 | let art = chunk.artifacts().next().unwrap(); 197 | assert_eq!(art.filename(), "test.txt"); 198 | assert_eq!(art.size(), 11); 199 | 200 | let out_dir = TempDir::new("test-hawkbitrs").expect("Failed to create temp dir"); 201 | let artifacts = chunk 202 | .download(out_dir.path()) 203 | .await 204 | .expect("Failed to download update"); 205 | 206 | // Check artifact 207 | assert_eq!(artifacts.len(), 1); 208 | let p = artifacts[0].file(); 209 | assert_eq!(p.file_name().unwrap(), "test.txt"); 210 | assert!(p.exists()); 211 | 212 | #[cfg(feature = "hash-md5")] 213 | artifacts[0].check_md5().await.expect("invalid md5"); 214 | #[cfg(feature = "hash-sha1")] 215 | artifacts[0].check_sha1().await.expect("invalid sha1"); 216 | #[cfg(feature = "hash-sha256")] 217 | artifacts[0].check_sha256().await.expect("invalid sha256"); 218 | } 219 | } 220 | 221 | #[tokio::test] 222 | async fn send_deployment_feedback() { 223 | init(); 224 | 225 | let server = ServerBuilder::default().build(); 226 | let deploy = get_deployment(true); 227 | let deploy_id = deploy.id.clone(); 228 | let (client, target) = add_target(&server, "Target1"); 229 | target.push_deployment(deploy); 230 | 231 | let reply = client.poll().await.expect("poll failed"); 232 | let update = reply.update().expect("missing update"); 233 | let update = update.fetch().await.expect("failed to fetch update info"); 234 | 235 | // Send feedback without progress 236 | let mut mock = target.expect_deployment_feedback( 237 | &deploy_id, 238 | Execution::Proceeding, 239 | Finished::None, 240 | None, 241 | vec!["Downloading"], 242 | ); 243 | assert_eq!(mock.hits(), 0); 244 | 245 | update 246 | .send_feedback(Execution::Proceeding, Finished::None, vec!["Downloading"]) 247 | .await 248 | .expect("Failed to send feedback"); 249 | assert_eq!(mock.hits(), 1); 250 | mock.delete(); 251 | 252 | // Send feedback with progress 253 | let mut mock = target.expect_deployment_feedback( 254 | &deploy_id, 255 | Execution::Closed, 256 | Finished::Success, 257 | Some(json!({"awesome": true})), 258 | vec!["Done"], 259 | ); 260 | assert_eq!(mock.hits(), 0); 261 | 262 | #[derive(Serialize)] 263 | struct Progress { 264 | awesome: bool, 265 | } 266 | let progress = Progress { awesome: true }; 267 | 268 | update 269 | .send_feedback_with_progress( 270 | Execution::Closed, 271 | Finished::Success, 272 | Some(progress), 273 | vec!["Done"], 274 | ) 275 | .await 276 | .expect("Failed to send feedback"); 277 | assert_eq!(mock.hits(), 1); 278 | mock.delete(); 279 | } 280 | 281 | #[tokio::test] 282 | async fn config_then_deploy() { 283 | init(); 284 | 285 | let server = ServerBuilder::default().build(); 286 | let (client, target) = add_target(&server, "Target1"); 287 | 288 | let reply = client.poll().await.expect("poll failed"); 289 | assert!(reply.config_data_request().is_none()); 290 | assert!(reply.update().is_none()); 291 | 292 | // server requests config 293 | let expected_config_data = json!({ 294 | "mode" : "merge", 295 | "data" : { 296 | "awesome" : true, 297 | }, 298 | "status" : { 299 | "result" : { 300 | "finished" : "success" 301 | }, 302 | "execution" : "closed", 303 | "details" : [ "Some stuffs" ] 304 | } 305 | }); 306 | target.request_config(expected_config_data); 307 | 308 | let reply = client.poll().await.expect("poll failed"); 309 | assert!(reply.config_data_request().is_some()); 310 | assert!(reply.update().is_none()); 311 | 312 | // server pushes an update 313 | target.push_deployment(get_deployment(true)); 314 | 315 | let reply = client.poll().await.expect("poll failed"); 316 | assert!(reply.config_data_request().is_some()); 317 | assert!(reply.update().is_some()); 318 | } 319 | 320 | #[tokio::test] 321 | async fn download_stream() { 322 | init(); 323 | 324 | let server = ServerBuilder::default().build(); 325 | let (client, target) = add_target(&server, "Target1"); 326 | 327 | target.push_deployment(get_deployment(true)); 328 | let reply = client.poll().await.expect("poll failed"); 329 | 330 | let update = reply.update().expect("missing update"); 331 | let update = update.fetch().await.expect("failed to fetch update info"); 332 | let chunk = update.chunks().next().unwrap(); 333 | let art = chunk.artifacts().next().unwrap(); 334 | 335 | async fn check_download(mut stream: Box> + Unpin>) { 336 | let mut downloaded: Vec = Vec::new(); 337 | while let Some(b) = stream.next().await { 338 | downloaded.extend(b.unwrap().as_ref()); 339 | } 340 | 341 | // Compare downloaded content with the actual file 342 | let mut art_file = File::open(&artifact_path()).expect("failed to open artifact"); 343 | let mut expected = Vec::new(); 344 | art_file 345 | .read_to_end(&mut expected) 346 | .expect("failed to read artifact"); 347 | 348 | assert_eq!(downloaded, expected); 349 | } 350 | 351 | // Download artifact using the stream API 352 | let stream = art 353 | .download_stream() 354 | .await 355 | .expect("failed to get download stream"); 356 | check_download(Box::new(stream)).await; 357 | 358 | cfg_if::cfg_if! { 359 | if #[cfg(feature = "hash-md5")] { 360 | let stream = art 361 | .download_stream_with_md5_check() 362 | .await 363 | .expect("failed to get download stream"); 364 | check_download(Box::new(stream)).await; 365 | } 366 | } 367 | 368 | cfg_if::cfg_if! { 369 | if #[cfg(feature = "hash-sha1")] { 370 | let stream = art 371 | .download_stream_with_sha1_check() 372 | .await 373 | .expect("failed to get download stream"); 374 | check_download(Box::new(stream)).await; 375 | } 376 | } 377 | 378 | cfg_if::cfg_if! { 379 | if #[cfg(feature = "hash-sha256")] { 380 | let stream = art 381 | .download_stream_with_sha256_check() 382 | .await 383 | .expect("failed to get download stream"); 384 | check_download(Box::new(stream)).await; 385 | } 386 | } 387 | } 388 | 389 | #[cfg(feature = "hash-digest")] 390 | #[tokio::test] 391 | async fn wrong_checksums() { 392 | use assert_matches::assert_matches; 393 | use hawkbit::ddi::ChecksumType; 394 | 395 | init(); 396 | 397 | let server = ServerBuilder::default().build(); 398 | let (client, target) = add_target(&server, "Target1"); 399 | 400 | target.push_deployment(get_deployment(false)); 401 | let reply = client.poll().await.expect("poll failed"); 402 | 403 | let update = reply.update().expect("missing update"); 404 | let update = update.fetch().await.expect("failed to fetch update info"); 405 | let chunk = update.chunks().next().unwrap(); 406 | let art = chunk.artifacts().next().unwrap(); 407 | 408 | let out_dir = TempDir::new("test-hawkbitrs").expect("Failed to create temp dir"); 409 | let downloaded = art 410 | .download(out_dir.path()) 411 | .await 412 | .expect("failed to download artifact"); 413 | 414 | #[cfg(feature = "hash-md5")] 415 | assert_matches!( 416 | downloaded.check_md5().await, 417 | Err(Error::ChecksumError(ChecksumType::Md5)) 418 | ); 419 | #[cfg(feature = "hash-sha1")] 420 | assert_matches!( 421 | downloaded.check_sha1().await, 422 | Err(Error::ChecksumError(ChecksumType::Sha1)) 423 | ); 424 | #[cfg(feature = "hash-sha256")] 425 | assert_matches!( 426 | downloaded.check_sha256().await, 427 | Err(Error::ChecksumError(ChecksumType::Sha256)) 428 | ); 429 | 430 | cfg_if::cfg_if! { 431 | if #[cfg(feature = "hash-md5")] { 432 | let stream = art 433 | .download_stream_with_md5_check() 434 | .await 435 | .expect("failed to get download stream"); 436 | let end = stream.skip_while(|b| future::ready(b.is_ok())).next().await; 437 | assert_matches!(end, Some(Err(Error::ChecksumError(ChecksumType::Md5)))); 438 | } 439 | } 440 | 441 | cfg_if::cfg_if! { 442 | if #[cfg(feature = "hash-sha1")] { 443 | let stream = art 444 | .download_stream_with_sha1_check() 445 | .await 446 | .expect("failed to get download stream"); 447 | let end = stream.skip_while(|b| future::ready(b.is_ok())).next().await; 448 | assert_matches!(end, Some(Err(Error::ChecksumError(ChecksumType::Sha1)))); 449 | } 450 | } 451 | 452 | cfg_if::cfg_if! { 453 | if #[cfg(feature = "hash-sha256")] { 454 | let stream = art 455 | .download_stream_with_sha256_check() 456 | .await 457 | .expect("failed to get download stream"); 458 | let end = stream.skip_while(|b| future::ready(b.is_ok())).next().await; 459 | assert_matches!(end, Some(Err(Error::ChecksumError(ChecksumType::Sha256)))); 460 | } 461 | } 462 | } 463 | 464 | #[tokio::test] 465 | async fn cancel_action() { 466 | init(); 467 | 468 | let server = ServerBuilder::default().build(); 469 | let (client, target) = add_target(&server, "Target1"); 470 | target.cancel_action("10"); 471 | 472 | let reply = client.poll().await.expect("poll failed"); 473 | assert!(reply.config_data_request().is_none()); 474 | assert!(reply.update().is_none()); 475 | let cancel_action = reply.cancel_action().expect("missing cancel action"); 476 | 477 | let id = cancel_action 478 | .id() 479 | .await 480 | .expect("failed to fetch cancel action id"); 481 | assert_eq!(id, "10"); 482 | 483 | assert_eq!(target.poll_hits(), 1); 484 | assert_eq!(target.cancel_action_hits(), 1); 485 | 486 | let mut mock = target.expect_cancel_feedback( 487 | &id, 488 | Execution::Proceeding, 489 | Finished::None, 490 | vec!["Cancelling"], 491 | ); 492 | assert_eq!(mock.hits(), 0); 493 | 494 | cancel_action 495 | .send_feedback(Execution::Proceeding, Finished::None, vec!["Cancelling"]) 496 | .await 497 | .expect("Failed to send feedback"); 498 | assert_eq!(mock.hits(), 1); 499 | mock.delete(); 500 | } 501 | -------------------------------------------------------------------------------- /hawkbit_mock/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "hawkbit_mock" 3 | version = "0.6.0" 4 | authors = ["Guillaume Desmottes "] 5 | edition = "2018" 6 | categories = ["development-tools::testing"] 7 | description = "Mock server implementation of Eclipse hawkBit" 8 | license = "MIT OR Apache-2.0" 9 | readme = "README.md" 10 | repository = "https://github.com/collabora/hawkbit-rs" 11 | documentation = "https://docs.rs/hawkbit/" 12 | 13 | [dependencies] 14 | hawkbit = { version = "0.6.0", path = "../hawkbit/" } 15 | httpmock = "0.5.4" 16 | serde = { version = "1.0", features = ["derive"] } 17 | serde_json = "1.0" 18 | -------------------------------------------------------------------------------- /hawkbit_mock/README.md: -------------------------------------------------------------------------------- 1 | # hawkbit_mock 2 | 3 | Mock server implementation of [Eclipse hawkBit](https://www.eclipse.org/hawkbit/) 4 | using [httpmock](https://crates.io/crates/httpmock). 5 | 6 | This mock is used to test the [hawkbit crate](https://crates.io/crates/hawkbit) 7 | but can also be useful to test any `hawkBit` client. 8 | So far only the [Direct Device Integration API](https://www.eclipse.org/hawkbit/apis/ddi_api/) 9 | is implemented. 10 | 11 | ## Documentation 12 | 13 | See the [crate documentation](https://docs.rs/hawkbit_mock/). -------------------------------------------------------------------------------- /hawkbit_mock/src/ddi.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020, Collabora Ltd. 2 | // SPDX-License-Identifier: MIT OR Apache-2.0 3 | 4 | //! [Direct Device Integration](https://www.eclipse.org/hawkbit/apis/ddi_api/) mock server. 5 | //! 6 | //! This module provides a hawkBit mock server implementing the [DDI API](https://www.eclipse.org/hawkbit/apis/ddi_api/). 7 | //! It can be instrumented to test any hawkbit client. 8 | //! 9 | //! # Examples 10 | //! 11 | //! ``` 12 | //! use hawkbit_mock::ddi::ServerBuilder; 13 | //! 14 | //! let server = ServerBuilder::default().build(); 15 | //! let target = server.add_target("Target1"); 16 | //! ``` 17 | //! 18 | //! You can tell call [`Target::request_config`] or [`Target::push_deployment`] to 19 | //! to interact with the server. 20 | //! 21 | //! Check the the hawbit crate for actual tests using this mock server. 22 | 23 | // FIXME: set link to hawbit/tests/tests.rs once we have the final public repo 24 | 25 | use std::rc::Rc; 26 | use std::{ 27 | cell::{Cell, RefCell}, 28 | path::PathBuf, 29 | }; 30 | 31 | use httpmock::{ 32 | Method::{GET, POST, PUT}, 33 | MockRef, MockRefExt, MockServer, 34 | }; 35 | use serde_json::{json, Map, Value}; 36 | 37 | use hawkbit::ddi::{Execution, Finished, MaintenanceWindow, Type}; 38 | 39 | /// Builder of [`Server`]. 40 | /// 41 | /// # Examples 42 | /// 43 | /// ``` 44 | /// use hawkbit_mock::ddi::ServerBuilder; 45 | /// 46 | /// let server = ServerBuilder::default().build(); 47 | /// ``` 48 | pub struct ServerBuilder { 49 | tenant: String, 50 | } 51 | 52 | impl Default for ServerBuilder { 53 | fn default() -> Self { 54 | Self { 55 | tenant: "DEFAULT".into(), 56 | } 57 | } 58 | } 59 | 60 | impl ServerBuilder { 61 | /// Set the tenant of the server, default to `DEFAULT`. 62 | pub fn tenant(self, tenant: &str) -> Self { 63 | let mut builder = self; 64 | builder.tenant = tenant.to_string(); 65 | builder 66 | } 67 | 68 | /// Create the [`Server`]. 69 | pub fn build(self) -> Server { 70 | Server { 71 | server: Rc::new(MockServer::start()), 72 | tenant: self.tenant, 73 | } 74 | } 75 | } 76 | 77 | /// Mock DDI server instance. 78 | pub struct Server { 79 | /// The tenant of the server. 80 | pub tenant: String, 81 | server: Rc, 82 | } 83 | 84 | impl Server { 85 | /// The base URL of the server, such as `http://my-server.com:8080` 86 | pub fn base_url(&self) -> String { 87 | self.server.base_url() 88 | } 89 | 90 | /// Add a new target named `name` to the server. 91 | pub fn add_target(&self, name: &str) -> Target { 92 | Target::new(name, &self.server, &self.tenant) 93 | } 94 | } 95 | 96 | /// A configured device the server can request configuration for and push updates to. 97 | pub struct Target { 98 | /// The name of the target. 99 | pub name: String, 100 | /// The secret authentification token used to identify the target on the server. 101 | pub key: String, 102 | server: Rc, 103 | tenant: String, 104 | poll: Cell, 105 | config_data: RefCell>, 106 | deployment: RefCell>, 107 | cancel_action: RefCell>, 108 | } 109 | 110 | impl Target { 111 | fn new(name: &str, server: &Rc, tenant: &str) -> Self { 112 | let key = format!("Key{}", name); 113 | 114 | let poll = Self::create_poll(server, tenant, name, &key, None, None, None); 115 | Target { 116 | name: name.to_string(), 117 | key, 118 | server: server.clone(), 119 | tenant: tenant.to_string(), 120 | poll: Cell::new(poll), 121 | config_data: RefCell::new(None), 122 | deployment: RefCell::new(None), 123 | cancel_action: RefCell::new(None), 124 | } 125 | } 126 | 127 | fn create_poll( 128 | server: &MockServer, 129 | tenant: &str, 130 | name: &str, 131 | key: &str, 132 | expected_config_data: Option<&PendingAction>, 133 | deployment: Option<&PendingAction>, 134 | cancel_action: Option<&PendingAction>, 135 | ) -> usize { 136 | let mut links = Map::new(); 137 | 138 | if let Some(pending) = expected_config_data { 139 | links.insert("configData".into(), json!({ "href": pending.path })); 140 | } 141 | if let Some(pending) = deployment { 142 | links.insert("deploymentBase".into(), json!({ "href": pending.path })); 143 | } 144 | if let Some(pending) = cancel_action { 145 | links.insert("cancelAction".into(), json!({ "href": pending.path })); 146 | } 147 | 148 | let response = json!({ 149 | "config": { 150 | "polling": { 151 | "sleep": "00:01:00" 152 | } 153 | }, 154 | "_links": links 155 | }); 156 | 157 | let mock = server.mock(|when, then| { 158 | when.method(GET) 159 | .path(format!("/{}/controller/v1/{}", tenant, name)) 160 | .header("Authorization", &format!("TargetToken {}", key)); 161 | 162 | then.status(200) 163 | .header("Content-Type", "application/json") 164 | .json_body(response); 165 | }); 166 | 167 | mock.id() 168 | } 169 | 170 | fn update_poll(&self) { 171 | let old = self.poll.replace(Self::create_poll( 172 | &self.server, 173 | &self.tenant, 174 | &self.name, 175 | &self.key, 176 | self.config_data.borrow().as_ref(), 177 | self.deployment.borrow().as_ref(), 178 | self.cancel_action.borrow().as_ref(), 179 | )); 180 | 181 | let mut old = MockRef::new(old, &self.server); 182 | old.delete(); 183 | } 184 | 185 | /// Request the target to upload its configuration to the server. 186 | /// One can then use [`Target::config_data_hits`] to check that the client 187 | /// uploaded its configuration and that it matches the one passed as `expected_config_data`. 188 | /// 189 | /// # Examples 190 | /// 191 | /// ``` 192 | /// use hawkbit_mock::ddi::ServerBuilder; 193 | /// use serde_json::json; 194 | /// 195 | /// let server = ServerBuilder::default().build(); 196 | /// let target = server.add_target("Target1"); 197 | /// let expected_config_data = json!({ 198 | /// "mode" : "merge", 199 | /// "data" : { 200 | /// "awesome" : true, 201 | /// }, 202 | /// "status" : { 203 | /// "result" : { 204 | /// "finished" : "success" 205 | /// }, 206 | /// "execution" : "closed", 207 | /// "details" : [ "Some stuffs" ] 208 | /// } 209 | /// }); 210 | /// target.request_config(expected_config_data); 211 | /// 212 | /// // Client handles the request and upload its configuration 213 | /// //assert_eq!(target.config_data_hits(), 1); 214 | /// ``` 215 | pub fn request_config(&self, expected_config_data: Value) { 216 | let config_path = self 217 | .server 218 | .url(format!("/DEFAULT/controller/v1/{}/configData", self.name)); 219 | 220 | let config_data = self.server.mock(|when, then| { 221 | when.method(PUT) 222 | .path(format!("/DEFAULT/controller/v1/{}/configData", self.name)) 223 | .header("Content-Type", "application/json") 224 | .header("Authorization", &format!("TargetToken {}", self.key)) 225 | .json_body(expected_config_data); 226 | 227 | then.status(200); 228 | }); 229 | 230 | self.config_data.replace(Some(PendingAction { 231 | server: self.server.clone(), 232 | path: config_path, 233 | mock: config_data.id(), 234 | })); 235 | 236 | self.update_poll(); 237 | } 238 | 239 | /// Push a deployment update to the target. 240 | /// One can then use [`Target::config_data_hits`] to check that the client 241 | /// retrieve the deployment details as expected. 242 | /// 243 | /// # Examples 244 | /// 245 | /// ``` 246 | /// use std::path::Path; 247 | /// use hawkbit_mock::ddi::{ChunkProtocol, ServerBuilder, DeploymentBuilder}; 248 | /// use hawkbit::ddi::{Type, MaintenanceWindow}; 249 | /// 250 | /// let server = ServerBuilder::default().build(); 251 | /// let target = server.add_target("Target1"); 252 | /// 253 | /// let deployment = DeploymentBuilder::new("10", Type::Forced, Type::Attempt) 254 | /// .maintenance_window(MaintenanceWindow::Available) 255 | /// .chunk( 256 | /// ChunkProtocol::BOTH, 257 | /// "app", 258 | /// "1.0", 259 | /// "some-chunk", 260 | /// vec![( 261 | /// Path::new("README.md").to_path_buf(), 262 | /// "42cf69051362d8fa2883cc9b56799fa4", 263 | /// "16da060b7ff443a6b3a7662ad21a9b3023c12627", 264 | /// "5010fbc2769bfc655d15aa9a883703d5b19a320732d37f70703ab3e3b416a602", 265 | /// )], 266 | /// ) 267 | /// .build(); 268 | /// target.push_deployment(deployment); 269 | /// 270 | /// // Client handles the update and fetch details 271 | /// //assert_eq!(target.deployment_hits(), 1); 272 | /// ``` 273 | pub fn push_deployment(&self, deploy: Deployment) { 274 | let deploy_path = self.server.url(format!( 275 | "/DEFAULT/controller/v1/{}/deploymentBase/{}", 276 | self.name, deploy.id 277 | )); 278 | 279 | let base_url = self.server.url("/download"); 280 | let response = deploy.json(&base_url); 281 | 282 | let deploy_mock = self.server.mock(|when, then| { 283 | when.method(GET) 284 | .path(format!( 285 | "/DEFAULT/controller/v1/{}/deploymentBase/{}", 286 | self.name, deploy.id 287 | )) 288 | .header("Authorization", &format!("TargetToken {}", self.key)); 289 | 290 | then.status(200) 291 | .header("Content-Type", "application/json") 292 | .json_body(response); 293 | }); 294 | 295 | // Serve the artifacts 296 | for chunk in deploy.chunks.iter() { 297 | for (artifact, _md5, _sha1, _sha256) in chunk.artifacts.iter() { 298 | let file_name = artifact.file_name().unwrap().to_str().unwrap(); 299 | let path = format!("/download/{}", file_name); 300 | 301 | self.server.mock(|when, then| { 302 | when.method(GET) 303 | .path(path) 304 | .header("Authorization", &format!("TargetToken {}", self.key)); 305 | 306 | then.status(200).body_from_file(artifact.to_str().unwrap()); 307 | }); 308 | } 309 | } 310 | 311 | self.deployment.replace(Some(PendingAction { 312 | server: self.server.clone(), 313 | path: deploy_path, 314 | mock: deploy_mock.id(), 315 | })); 316 | 317 | self.update_poll(); 318 | } 319 | 320 | /// Configure the server to expect deployment feedback from the target. 321 | /// One can then check the feedback has actually been received using 322 | /// `hits()` on the returned object. 323 | /// 324 | /// # Examples 325 | /// 326 | /// ``` 327 | /// use hawkbit_mock::ddi::{ServerBuilder, DeploymentBuilder}; 328 | /// use hawkbit::ddi::{Execution, Finished}; 329 | /// use serde_json::json; 330 | /// 331 | /// let server = ServerBuilder::default().build(); 332 | /// let target = server.add_target("Target1"); 333 | /// let mut mock = target.expect_deployment_feedback( 334 | /// "10", 335 | /// Execution::Closed, 336 | /// Finished::Success, 337 | /// Some(json!({"awesome": true})), 338 | /// vec!["Done"], 339 | /// ); 340 | /// assert_eq!(mock.hits(), 0); 341 | /// 342 | /// //Client send the feedback 343 | /// //assert_eq!(mock.hits(), 1); 344 | /// ``` 345 | pub fn expect_deployment_feedback( 346 | &self, 347 | deployment_id: &str, 348 | execution: Execution, 349 | finished: Finished, 350 | progress: Option, 351 | details: Vec<&str>, 352 | ) -> MockRef<'_> { 353 | self.server.mock(|when, then| { 354 | let expected = match progress { 355 | Some(progress) => json!({ 356 | "id": deployment_id, 357 | "status": { 358 | "result": { 359 | "progress": progress, 360 | "finished": finished 361 | }, 362 | "execution": execution, 363 | "details": details, 364 | }, 365 | }), 366 | None => json!({ 367 | "id": deployment_id, 368 | "status": { 369 | "result": { 370 | "finished": finished 371 | }, 372 | "execution": execution, 373 | "details": details, 374 | }, 375 | }), 376 | }; 377 | 378 | when.method(POST) 379 | .path(format!( 380 | "/{}/controller/v1/{}/deploymentBase/{}/feedback", 381 | self.tenant, self.name, deployment_id 382 | )) 383 | .header("Authorization", &format!("TargetToken {}", self.key)) 384 | .header("Content-Type", "application/json") 385 | .json_body(expected); 386 | 387 | then.status(200); 388 | }) 389 | } 390 | 391 | /// Push a cancel action update to the target. 392 | /// One can then use [`Target::cancel_action_hits`] to check that the client 393 | /// fetched the details about the cancel action. 394 | /// 395 | /// # Examples 396 | /// 397 | /// ``` 398 | /// use hawkbit_mock::ddi::ServerBuilder; 399 | /// 400 | /// let server = ServerBuilder::default().build(); 401 | /// let target = server.add_target("Target1"); 402 | /// target.cancel_action("5"); 403 | /// 404 | /// // Client fetches details about the cancel action 405 | /// //assert_eq!(target.cancel_action_hits(), 1); 406 | /// ``` 407 | pub fn cancel_action(&self, id: &str) { 408 | let cancel_path = self.server.url(format!( 409 | "/DEFAULT/controller/v1/{}/cancelAction/{}", 410 | self.name, id 411 | )); 412 | 413 | let response = json!({ 414 | "id": id, 415 | "cancelAction": { 416 | "stopId": id 417 | } 418 | }); 419 | 420 | let cancel_mock = self.server.mock(|when, then| { 421 | when.method(GET) 422 | .path(format!( 423 | "/DEFAULT/controller/v1/{}/cancelAction/{}", 424 | self.name, id 425 | )) 426 | .header("Authorization", &format!("TargetToken {}", self.key)); 427 | 428 | then.status(200) 429 | .header("Content-Type", "application/json") 430 | .json_body(response); 431 | }); 432 | 433 | self.cancel_action.replace(Some(PendingAction { 434 | server: self.server.clone(), 435 | path: cancel_path, 436 | mock: cancel_mock.id(), 437 | })); 438 | 439 | self.update_poll(); 440 | } 441 | 442 | /// Configure the server to expect cancel feedback from the target. 443 | /// One can then check the feedback has actually been received using 444 | /// `hits()` on the returned object. 445 | /// 446 | /// # Examples 447 | /// 448 | /// ``` 449 | /// use hawkbit_mock::ddi::{ServerBuilder, DeploymentBuilder}; 450 | /// use hawkbit::ddi::{Execution, Finished}; 451 | /// use serde_json::json; 452 | /// 453 | /// let server = ServerBuilder::default().build(); 454 | /// let target = server.add_target("Target1"); 455 | /// target.cancel_action("10"); 456 | /// 457 | /// let mut mock = target.expect_cancel_feedback( 458 | /// "10", 459 | /// Execution::Closed, 460 | /// Finished::Success, 461 | /// vec!["Cancelled"], 462 | /// ); 463 | /// assert_eq!(mock.hits(), 0); 464 | /// 465 | /// //Client send the feedback 466 | /// //assert_eq!(mock.hits(), 1); 467 | /// ``` 468 | pub fn expect_cancel_feedback( 469 | &self, 470 | cancel_id: &str, 471 | execution: Execution, 472 | finished: Finished, 473 | details: Vec<&str>, 474 | ) -> MockRef<'_> { 475 | self.server.mock(|when, then| { 476 | let expected = json!({ 477 | "id": cancel_id, 478 | "status": { 479 | "result": { 480 | "finished": finished 481 | }, 482 | "execution": execution, 483 | "details": details, 484 | }, 485 | }); 486 | 487 | when.method(POST) 488 | .path(format!( 489 | "/{}/controller/v1/{}/cancelAction/{}/feedback", 490 | self.tenant, self.name, cancel_id 491 | )) 492 | .header("Authorization", &format!("TargetToken {}", self.key)) 493 | .header("Content-Type", "application/json") 494 | .json_body(expected); 495 | 496 | then.status(200); 497 | }) 498 | } 499 | 500 | /// Return the number of times the poll API has been called by the client. 501 | pub fn poll_hits(&self) -> usize { 502 | let mock = MockRef::new(self.poll.get(), &self.server); 503 | mock.hits() 504 | } 505 | 506 | /// Return the number of times the target configuration has been uploaded by the client. 507 | pub fn config_data_hits(&self) -> usize { 508 | self.config_data.borrow().as_ref().map_or(0, |m| { 509 | let mock = MockRef::new(m.mock, &self.server); 510 | mock.hits() 511 | }) 512 | } 513 | 514 | /// Return the number of times the deployment details have been fetched by the client. 515 | pub fn deployment_hits(&self) -> usize { 516 | self.deployment.borrow().as_ref().map_or(0, |m| { 517 | let mock = MockRef::new(m.mock, &self.server); 518 | mock.hits() 519 | }) 520 | } 521 | 522 | /// Return the number of times the cancel action URL has been fetched by the client. 523 | pub fn cancel_action_hits(&self) -> usize { 524 | self.cancel_action.borrow().as_ref().map_or(0, |m| { 525 | let mock = MockRef::new(m.mock, &self.server); 526 | mock.hits() 527 | }) 528 | } 529 | } 530 | 531 | struct PendingAction { 532 | server: Rc, 533 | mock: usize, 534 | path: String, 535 | } 536 | 537 | impl Drop for PendingAction { 538 | fn drop(&mut self) { 539 | let mut mock = MockRef::new(self.mock, &self.server); 540 | mock.delete(); 541 | } 542 | } 543 | 544 | /// Builder of [`Deployment`]. 545 | pub struct DeploymentBuilder { 546 | id: String, 547 | download_type: Type, 548 | update_type: Type, 549 | maintenance_window: Option, 550 | chunks: Vec, 551 | } 552 | 553 | /// A pending deployment update pushed to the target. 554 | pub struct Deployment { 555 | /// The id of the deployment 556 | pub id: String, 557 | download_type: Type, 558 | update_type: Type, 559 | maintenance_window: Option, 560 | chunks: Vec, 561 | } 562 | 563 | impl DeploymentBuilder { 564 | /// Start building a new [`Deployment`]. 565 | pub fn new(id: &str, download_type: Type, update_type: Type) -> Self { 566 | Self { 567 | id: id.to_string(), 568 | download_type, 569 | update_type, 570 | maintenance_window: None, 571 | chunks: Vec::new(), 572 | } 573 | } 574 | 575 | /// Set the maintenance window status of the deployment. 576 | pub fn maintenance_window(self, maintenance_window: MaintenanceWindow) -> Self { 577 | let mut builder = self; 578 | builder.maintenance_window = Some(maintenance_window); 579 | builder 580 | } 581 | 582 | /// Add a new software chunk to the deployment. 583 | /// # Arguments 584 | /// * `protocol`: The protocols over which chunks are downloadable 585 | /// * `part`: the type of chunk, e.g. `firmware`, `bundle`, `app` 586 | /// * `version`: software version of the chunk 587 | /// * `name`: name of the chunk 588 | /// * `artifacts`: a [`Vec`] of tuples containing: 589 | /// * the local path of the file; 590 | /// * the `md5sum` of the file; 591 | /// * the `sha1sum` of the file; 592 | /// * the `sha256sum` of the file. 593 | pub fn chunk( 594 | self, 595 | protocol: ChunkProtocol, 596 | part: &str, 597 | version: &str, 598 | name: &str, 599 | artifacts: Vec<(PathBuf, &str, &str, &str)>, 600 | ) -> Self { 601 | let mut builder = self; 602 | 603 | let artifacts = artifacts 604 | .into_iter() 605 | .map(|(path, md5, sha1, sha256)| { 606 | assert!(path.exists()); 607 | (path, md5.to_string(), sha1.to_string(), sha256.to_string()) 608 | }) 609 | .collect(); 610 | 611 | let chunk = Chunk { 612 | protocol, 613 | part: part.to_string(), 614 | version: version.to_string(), 615 | name: name.to_string(), 616 | artifacts, 617 | }; 618 | builder.chunks.push(chunk); 619 | 620 | builder 621 | } 622 | 623 | /// Create the [`Deployment`]. 624 | pub fn build(self) -> Deployment { 625 | Deployment { 626 | id: self.id, 627 | download_type: self.download_type, 628 | update_type: self.update_type, 629 | maintenance_window: self.maintenance_window, 630 | chunks: self.chunks, 631 | } 632 | } 633 | } 634 | 635 | /// Protocol(s) over which chunks are served 636 | pub enum ChunkProtocol { 637 | /// both http and https 638 | BOTH, 639 | /// http only 640 | HTTP, 641 | /// https only 642 | HTTPS, 643 | } 644 | 645 | impl ChunkProtocol { 646 | /// Return whether the http protocol is used for downloads 647 | pub fn http(&self) -> bool { 648 | matches!(self, Self::BOTH | Self::HTTP) 649 | } 650 | 651 | /// Return whether the https protocol is used for downloads 652 | pub fn https(&self) -> bool { 653 | matches!(self, Self::BOTH | Self::HTTPS) 654 | } 655 | } 656 | 657 | /// Software chunk of an update. 658 | pub struct Chunk { 659 | protocol: ChunkProtocol, 660 | part: String, 661 | version: String, 662 | name: String, 663 | artifacts: Vec<(PathBuf, String, String, String)>, // (path, md5, sha1, sha256) 664 | } 665 | 666 | impl Chunk { 667 | fn json(&self, base_url: &str) -> serde_json::Value { 668 | let artifacts: Vec = self 669 | .artifacts 670 | .iter() 671 | .map(|(path, md5, sha1, sha256)| { 672 | let meta = path.metadata().unwrap(); 673 | let file_name = path.file_name().unwrap().to_str().unwrap(); 674 | let download_url = format!("{}/{}", base_url, file_name); 675 | // TODO: the md5 url is not served by the http server 676 | let md5_url = format!("{}.MD5SUM", download_url); 677 | 678 | let mut links = serde_json::Map::new(); 679 | 680 | if self.protocol.https() { 681 | links.insert("download".to_string(), json!({ "href": download_url })); 682 | links.insert("md5sum".to_string(), json!({ "href": md5_url })); 683 | } 684 | if self.protocol.http() { 685 | links.insert("download-http".to_string(), json!({ "href": download_url })); 686 | links.insert("md5sum-http".to_string(), json!({ "href": md5_url })); 687 | } 688 | 689 | json!({ 690 | "filename": file_name, 691 | "hashes": { 692 | "sha1": sha1, 693 | "md5": md5, 694 | "sha256": sha256, 695 | }, 696 | "size": meta.len(), 697 | "_links": links, 698 | }) 699 | }) 700 | .collect(); 701 | 702 | json!({ 703 | "part": self.part, 704 | "version": self.version, 705 | "name": self.name, 706 | "artifacts": artifacts, 707 | }) 708 | } 709 | } 710 | 711 | impl Deployment { 712 | fn json(&self, base_url: &str) -> serde_json::Value { 713 | let chunks: Vec = self.chunks.iter().map(|c| c.json(base_url)).collect(); 714 | 715 | let mut j = json!({ 716 | "id": self.id, 717 | "deployment": { 718 | "download": self.download_type, 719 | "update": self.update_type, 720 | "chunks": chunks, 721 | } 722 | }); 723 | 724 | if let Some(maintenance_window) = &self.maintenance_window { 725 | let d = j.get_mut("deployment").unwrap().as_object_mut().unwrap(); 726 | d.insert("maintenanceWindow".to_string(), json!(maintenance_window)); 727 | } 728 | 729 | j 730 | } 731 | } 732 | -------------------------------------------------------------------------------- /hawkbit_mock/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2020, Collabora Ltd. 2 | // SPDX-License-Identifier: MIT OR Apache-2.0 3 | 4 | #![warn(missing_docs)] 5 | 6 | //! # hawkbit_mock 7 | //! 8 | //! Mock server implementation of [Eclipse hawkBit](https://www.eclipse.org/hawkbit/) 9 | //! using [httpmock](https://crates.io/crates/httpmock). 10 | 11 | //! This mock is used to test the `hawkbit` crate but can also be useful to test any `hawkBit` client. 12 | //! So far only the [Direct Device Integration API](https://www.eclipse.org/hawkbit/apis/ddi_api/) 13 | //! is implemented, see the [`ddi`] module. 14 | 15 | pub mod ddi; 16 | --------------------------------------------------------------------------------