├── .dockerignore ├── .github └── workflows │ └── flowzone.yml ├── .gitignore ├── .rustc_info.json ├── .versionbot └── CHANGELOG.yml ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.toml ├── Cross.toml ├── Dockerfile ├── README.md ├── compile-rpi-zero ├── config └── sample-backup.yaml ├── package-lock.json ├── src ├── common.rs ├── common │ ├── api_calls.rs │ ├── debug.rs │ ├── defs.rs │ ├── disk_util.rs │ ├── disk_util │ │ ├── gzip_file.rs │ │ ├── gzip_stream.rs │ │ ├── image_file.rs │ │ └── plain_file.rs │ ├── error.rs │ ├── logging.rs │ ├── loop_device.rs │ ├── options.rs │ ├── stage2_config.rs │ ├── stream_progress.rs │ ├── system.rs │ └── system │ │ └── fd.rs ├── init.rs ├── macros.rs ├── main.rs ├── stage1.rs ├── stage1 │ ├── backup.rs │ ├── backup │ │ ├── archiver.rs │ │ ├── config.rs │ │ ├── ext_tar_archiver.rs │ │ └── rust_tar_archiver.rs │ ├── block_device_info.rs │ ├── block_device_info │ │ ├── block_device.rs │ │ ├── device.rs │ │ ├── mount.rs │ │ └── partition.rs │ ├── checks.rs │ ├── defs.rs │ ├── device.rs │ ├── device_impl.rs │ ├── device_impl │ │ ├── beaglebone.rs │ │ ├── dummy.rs │ │ ├── intel_nuc.rs │ │ ├── jetson_xavier.rs │ │ └── raspberrypi.rs │ ├── exe_copy.rs │ ├── image_retrieval.rs │ ├── migrate_info.rs │ ├── migrate_info │ │ └── balena_cfg_json.rs │ ├── utils.rs │ ├── wifi_config.rs │ └── wifi_config │ │ ├── connmgr_parser.rs │ │ ├── nwmgr_parser.rs │ │ └── wpa_parser.rs └── stage2.rs ├── test_data └── part.img.gz └── versionist.conf.js /.dockerignore: -------------------------------------------------------------------------------- 1 | /work 2 | /test_data 3 | /target 4 | -------------------------------------------------------------------------------- /.github/workflows/flowzone.yml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: 3 | types: [opened, synchronize, closed] 4 | branches: [main, master] 5 | # allow external contributions to use secrets within trusted code 6 | pull_request_target: 7 | types: [opened, synchronize, closed] 8 | branches: [main, master] 9 | 10 | jobs: 11 | flowzone: 12 | name: Flowzone 13 | uses: product-os/flowzone/.github/workflows/flowzone.yml@master 14 | # prevent duplicate workflow executions for pull_request and pull_request_target 15 | if: | 16 | ( 17 | github.event.pull_request.head.repo.full_name == github.repository && 18 | github.event_name == 'pull_request' 19 | ) || ( 20 | github.event.pull_request.head.repo.full_name != github.repository && 21 | github.event_name == 'pull_request_target' 22 | ) 23 | secrets: inherit 24 | with: 25 | rust_binaries: true 26 | rust_toolchain: 1.76 27 | cargo_targets: | 28 | aarch64-unknown-linux-musl, 29 | armv7-unknown-linux-musleabihf, 30 | arm-unknown-linux-musleabihf, 31 | x86_64-unknown-linux-musl, 32 | i686-unknown-linux-musl 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /work 2 | /target 3 | /.idea 4 | **/*.rs.bk 5 | /cache 6 | -------------------------------------------------------------------------------- /.rustc_info.json: -------------------------------------------------------------------------------- 1 | {"rustc_fingerprint":234914328391272487,"outputs":{"4476964694761187371":["___\nlib___.rlib\nlib___.so\nlib___.so\nlib___.a\nlib___.so\n/root/.rustup/toolchains/stable-x86_64-unknown-linux-gnu\ndebug_assertions\nproc_macro\ntarget_arch=\"x86_64\"\ntarget_endian=\"little\"\ntarget_env=\"gnu\"\ntarget_family=\"unix\"\ntarget_feature=\"fxsr\"\ntarget_feature=\"sse\"\ntarget_feature=\"sse2\"\ntarget_os=\"linux\"\ntarget_pointer_width=\"64\"\ntarget_vendor=\"unknown\"\nunix\n",""],"16929990295604814582":["___\n",""],"6930481218867274249":["___\nlib___.rlib\nlib___.so\nlib___.so\nlib___.a\nlib___.so\n/root/.rustup/toolchains/stable-x86_64-unknown-linux-gnu\ndebug_assertions\nproc_macro\ntarget_arch=\"arm\"\ntarget_endian=\"little\"\ntarget_env=\"gnu\"\ntarget_family=\"unix\"\ntarget_os=\"linux\"\ntarget_pointer_width=\"32\"\ntarget_vendor=\"unknown\"\nunix\n",""],"1164083562126845933":["rustc 1.45.2 (d3fb005a3 2020-07-31)\nbinary: rustc\ncommit-hash: d3fb005a39e62501b8b0b356166e515ae24e2e54\ncommit-date: 2020-07-31\nhost: x86_64-unknown-linux-gnu\nrelease: 1.45.2\nLLVM version: 10.0\n",""]},"successes":{}} -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | 3 | All notable changes to this project will be documented in this file 4 | automatically by Versionist. DO NOT EDIT THIS FILE MANUALLY! 5 | This project adheres to [Semantic Versioning](http://semver.org/). 6 | 7 | ## v0.10.2 - 2025-02-24 8 | 9 | * If change device type, patch API after takeover completes [Ken Bannister] 10 | 11 | ## v0.10.1 - 2024-12-13 12 | 13 | * Allow for system-proxy boot directory absent in balenaOS source [Ken Bannister] 14 | 15 | ## v0.10.0 - 2024-12-10 16 | 17 | * Move api_calls module from stage1 to common [Ken Bannister] 18 | * Send progress messages to API if run within HUP context [Ken Bannister] 19 | 20 | ## v0.9.0 - 2024-12-04 21 | 22 | * Allow logging to internal disk as a workaround [Rahul Thakoor] 23 | 24 | ## v0.8.3 - 2024-07-01 25 | 26 | * Fix use of elapsed seconds [Ken Bannister] 27 | 28 | ## v0.8.2 - 2024-05-09 29 | 30 | * Simplify API Header Authorization [Rahul Thakoor] 31 | 32 | ## v0.8.1 - 2024-05-08 33 | 34 | * Stage1/block_device_info: Improve failure logs for partitions without filesystems [Alexandru Costache] 35 | 36 | ## v0.8.0 - 2024-05-08 37 | 38 | * Download raw images for flasher type device types [Rahul Thakoor] 39 | 40 | ## v0.7.0 - 2024-05-03 41 | 42 | * Check for log device before starting the migration [Leandro Motta Barros] 43 | * Refactor early checks to a separate module [Leandro Motta Barros] 44 | 45 | ## v0.6.0 - 2024-05-02 46 | 47 | * Add option to change device type [Rahul Thakoor] 48 | 49 | ## v0.5.2 - 2024-04-29 50 | 51 | * Handle race: when reading /proc/PID/status [Leandro Motta Barros] 52 | * Handle race: reading /proc/PID/root [Leandro Motta Barros] 53 | * Handle race: read_link when iterating over /proc/PID/fd/FD [Leandro Motta Barros] 54 | * Handle race: lstat when iterating over /proc/PID/fd/FD [Leandro Motta Barros] 55 | 56 | ## v0.5.1 - 2024-04-22 57 | 58 | * Refactor comparison of system connection locations [Leandro Motta Barros] 59 | * Refactor the migration code for consistency [Leandro Motta Barros] 60 | * Use `BALENA_OS_NAME` instead of hardcoding [Leandro Motta Barros] 61 | * Fix typos: "drectory" [Leandro Motta Barros] 62 | 63 | ## v0.5.0 - 2024-04-18 64 | 65 | * Use lddtree to check dependencies [Rahul Thakoor] 66 | 67 | ## v0.4.6 - 2024-04-15 68 | 69 | * Stage1: Compare system-connection files and their contents [Alexandru Costache] 70 | 71 | ## v0.4.5 - 2024-04-11 72 | 73 | * Stage1/stage2: Add support for migrating Jetson Xavier AGX, NX SD and NX eMMC devices [Alexandru Costache] 74 | * Add working balenaOS to balenaOS migration [Ken Bannister] 75 | 76 | ## v0.4.4 - 2024-03-14 77 | 78 | * Document how takeover works [Rahul Thakoor] 79 | 80 | ## v0.4.3 - 2024-03-14 81 | 82 | * Update building instructions [Leandro Motta Barros] 83 | 84 | ## v0.4.2 - 2024-03-07 85 | 86 | * Enable the crt-static target feature [Leandro Motta Barros] 87 | 88 | ## v0.4.1 - 2024-02-29 89 | 90 | * Make copy of `telinit` if a symlink to `init` [Leandro Motta Barros] 91 | 92 | ## v0.4.0 - 2024-02-28 93 | 94 | * Use /ping endpoint to check connection to API [Rahul Thakoor] 95 | 96 | ## v0.3.2 - 2024-02-27 97 | 98 | * Bug: fix command in readme [Aaron Shaw] 99 | * Update README.md [Niels R] 100 | 101 | ## v0.3.1 - 2024-02-26 102 | 103 | * Use latest Rust toolchain [Leandro Motta Barros] 104 | * Migrate from structopt to clap [Leandro Motta Barros] 105 | 106 | ## v0.3.0 - 2024-02-26 107 | 108 | * Log process list before killing processes [Leandro Motta Barros] 109 | * Allow running with unsupported target device types [Leandro Motta Barros] 110 | * Bugfix: Don't use hardcoded device type in message [Leandro Motta Barros] 111 | 112 | ## v0.2.0 - 2024-02-16 113 | 114 | * Update `get_os_version` to use /v6/release [Rahul Thakoor] 115 | 116 | ## v0.1.4 - 2024-02-13 117 | 118 | * Fix multiple typos [Leandro Motta Barros] 119 | * Update dependencies to fix security vulnerabilities [Leandro Motta Barros] 120 | 121 | ## v0.1.3 - 2024-02-12 122 | 123 | * Update reqwest to v0.11.24 [Rahul Thakoor] 124 | 125 | ## v0.1.2 - 2024-02-09 126 | 127 | * Fix get_mem_info() on 32-bit systems [Ken Bannister] 128 | 129 | ## v0.1.1 - 2024-02-06 130 | 131 | * Take into account that chars are signed also in 32-bit x86 [Leandro Motta Barros] 132 | * Fix typing in 32-bit architectures [Leandro Motta Barros] 133 | * Enable Flowzone for CI/CD [Kyle Harding] 134 | 135 | ## v0.1.0 - 2021-06-30 136 | 137 | * Delete CODEOWNERS [Kyle Harding] 138 | * Added support for gzipped internel config.json [Thomas Runte] 139 | * Removed library migrator - including all modules directly from main now [Thomas Runte] 140 | * Updated README.md and applied changes from clippy run [Thomas Runte] 141 | * Tested backup on RPI3 & beaglebone Added option --tar-internal and defaulting to external tar [Thomas Runte] 142 | * Enabled backup feature [Thomas Runte] 143 | * Update README.md, cleanup in Options and changes to block-device-info to allow log device filesystem types other than vfat [Thomas Runte] 144 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "takeover" 3 | description = "Brownfield device migration tool to balenaOS and balenaCloud" 4 | version = "0.10.2" 5 | authors = ["balena.io"] 6 | edition = "2018" 7 | publish = false 8 | 9 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 10 | 11 | [profile.release] 12 | opt-level = "s" 13 | 14 | [dependencies] 15 | 16 | [dependencies.libc] 17 | version = "0.2.70" 18 | 19 | [dependencies.finder] 20 | version = "0.1" 21 | 22 | [dependencies.file_diff] 23 | version = "1.0.0" 24 | 25 | [dependencies.reqwest] 26 | version = "0.11.24" 27 | features = ["blocking", "json"] 28 | 29 | [dependencies.url] 30 | version = "2.5.0" 31 | 32 | [dependencies.paw] 33 | version = "1.0" 34 | 35 | [dependencies.clap] 36 | version = "4.5.0" 37 | features = ["derive"] 38 | 39 | [dependencies.log] 40 | version = "0.4.8" 41 | features = ["std"] 42 | 43 | [dependencies.regex] 44 | version = "1.10.3" 45 | 46 | [dependencies.nix] 47 | version = "0.27.1" 48 | features = ["fs", "mount"] 49 | 50 | [dependencies.semver] 51 | version = "0.9.0" 52 | 53 | [dependencies.serde] 54 | version = "1.0.110" 55 | features = ["derive"] 56 | 57 | [dependencies.serde_yaml] 58 | version = "0.8.12" 59 | 60 | [dependencies.serde_json] 61 | version = "1.0.53" 62 | 63 | [dependencies.flate2] 64 | version = "1.0.14" 65 | 66 | [dependencies.mod_logger] 67 | version = "0.8.4" 68 | 69 | [dependencies.lazy_static] 70 | version = "1.4.0" 71 | 72 | [dependencies.rand] 73 | version = "0.7.3" 74 | 75 | [dependencies.cfg-if] 76 | version = "0.1" 77 | 78 | [dependencies.tar] 79 | version = "0.4" 80 | 81 | [dependencies.which] 82 | version = "6.0.0" 83 | 84 | [dependencies.openssl] 85 | version = "0.10.63" 86 | features = ["vendored"] 87 | 88 | [dependencies.lddtree] 89 | version = "0.3.4" 90 | 91 | [features] 92 | raspberrypi3 = [] 93 | raspberrypi4-64 = [] 94 | intel-nuc = [] 95 | jetson-xavier = [] 96 | 97 | [dependencies.gptman] 98 | version = "1.0.2" 99 | 100 | # Required to get past "ambiguous name" errors in pin-project-internal v0.4.17. 101 | # See https://github.com/taiki-e/pin-project/issues/337 102 | # Problem was triggered by addition of gptman. 103 | # This package seems foundational and important to keep updated. 104 | [dependencies.futures] 105 | version = "0.3.30" 106 | -------------------------------------------------------------------------------- /Cross.toml: -------------------------------------------------------------------------------- 1 | [build.env] 2 | passthrough = ["RUSTFLAGS=-C target-feature=+crt-static"] 3 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | ENV INSIDE_DOCKER_CONTAINER 1 4 | 5 | # Install git and compilers, let's toss gnupg and reprepro in there so we can 6 | # use this container to make the apt repo as well 7 | RUN apt-get update \ 8 | && apt-get -y upgrade \ 9 | && apt-get install -y --no-install-recommends \ 10 | build-essential \ 11 | curl \ 12 | git \ 13 | pkg-config \ 14 | vim \ 15 | perl \ 16 | make \ 17 | && rm -rf /var/lib/apt/lists/* 18 | 19 | 20 | RUN mkdir /toolchain 21 | WORKDIR /toolchain 22 | 23 | # Check out Raspbian cross-compiler (this will work on *ALL* Raspberry Pi versions) 24 | RUN git clone --depth 1 git://github.com/raspberrypi/tools.git rpi-tools \ 25 | && rm -rf rpi-tools/.git 26 | ENV PATH "/toolchain/rpi-tools/arm-bcm2708/gcc-linaro-arm-linux-gnueabihf-raspbian-x64/bin/:${PATH}" 27 | 28 | # Create wrapper around gcc to point to rpi sysroot 29 | # Thanks @ https://github.com/herrernst/librespot/blob/build-rpi/.travis.yml 30 | RUN echo '#!/bin/sh\narm-linux-gnueabihf-gcc --sysroot /toolchain/rpi-tools/arm-bcm2708/arm-bcm2708hardfp-linux-gnueabi/arm-bcm2708hardfp-linux-gnueabi/sysroot "$@"' \ 31 | > rpi-tools/arm-bcm2708/gcc-linaro-arm-linux-gnueabihf-raspbian-x64/bin/gcc-wrapper \ 32 | && chmod +x rpi-tools/arm-bcm2708/gcc-linaro-arm-linux-gnueabihf-raspbian-x64/bin/gcc-wrapper \ 33 | && ln -s ld-linux.so.3 rpi-tools/arm-bcm2708/arm-bcm2708hardfp-linux-gnueabi/arm-bcm2708hardfp-linux-gnueabi/sysroot/lib/ld-linux-armhf.so.3 34 | 35 | ENV PKG_CONFIG_ALLOW_CROSS 1 36 | ENV PKG_CONFIG_PATH "/toolchain/rpi-tools/arm-bcm2708/arm-bcm2708hardfp-linux-gnueabi/arm-bcm2708hardfp-linux-gnueabi/sysroot/lib/pkgconfig" 37 | 38 | RUN curl https://sh.rustup.rs -sSf | sh -s -- -y 39 | ENV PATH="/root/.cargo/bin/:${PATH}" 40 | # ENV CARGO_TARGET_DIR "/build" 41 | RUN mkdir /cargo-home 42 | ENV CARGO_HOME "/cargo-home" 43 | 44 | RUN mkdir -p /.cargo 45 | RUN echo '[target.arm-unknown-linux-gnueabihf]\nlinker = "gcc-wrapper"' > /.cargo/config 46 | RUN rustup target add arm-unknown-linux-gnueabihf 47 | 48 | WORKDIR /build 49 | -------------------------------------------------------------------------------- /compile-rpi-zero: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | docker build -t cross-pi-zero . 3 | docker run --rm \ 4 | -it \ 5 | -v $(pwd):/build \ 6 | -v rust-cache:/cargo-home \ 7 | cross-pi-zero \ 8 | cargo build --release --target arm-unknown-linux-gnueabihf 9 | -------------------------------------------------------------------------------- /config/sample-backup.yaml: -------------------------------------------------------------------------------- 1 | # sample backup definition file 2 | # follows basic yaml syntax 3 | # define a volume to restore files to in balenaOS: 4 | # - volume: 'volume-name' 5 | # define an item inside a volume 6 | # items consist of: 7 | # a source - can be a directory or a file 8 | # a target - the target directory or file name within the volume - can be empty / left out 9 | # if ommitted the file / directory will be created in the volume root 10 | # a filter - a regular expression to filter files by - only applicable to directory type source 11 | # if ommitted no filter is applied 12 | 13 | - volume: "test volume 1" 14 | items: 15 | - item: 16 | source: /home/thomas/develop/balena.io/support 17 | target: "target dir 1.1" 18 | - item: 19 | source: "/home/thomas/develop/balena.io/customer/sonder/unitdata/UnitData files" 20 | target: "target dir 1.2" 21 | - volume: "test volume 2" 22 | - item: 23 | source: "/home/thomas/develop/balena.io/migrate/migratecfg/balena-migrate" 24 | target: "target file 2.1" 25 | - item: 26 | source: "/home/thomas/develop/balena.io/migrate/migratecfg/init-scripts" 27 | target: "target dir 2.2" 28 | filter: 'balena-.*' 29 | - volume: "test_volume_3" 30 | - item: 31 | source: "/home/thomas/develop/balena.io/migrate/migratecfg/init-scripts" 32 | filter: 'balena-.*' 33 | -------------------------------------------------------------------------------- /package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "takeover", 3 | "lockfileVersion": 3, 4 | "requires": true, 5 | "packages": {} 6 | } 7 | -------------------------------------------------------------------------------- /src/common.rs: -------------------------------------------------------------------------------- 1 | use finder::Finder; 2 | use mod_logger::Logger; 3 | use std::cmp::min; 4 | use std::ffi::{CStr, CString, OsString}; 5 | use std::fs::{read_to_string, OpenOptions}; 6 | use std::io::Write; 7 | use std::mem::MaybeUninit; 8 | use std::os::unix::ffi::OsStrExt; 9 | use std::path::{Path, PathBuf}; 10 | use std::process::{exit, Command, ExitStatus, Stdio}; 11 | use std::thread::sleep; 12 | use std::time::Duration; 13 | 14 | use libc::LINUX_REBOOT_CMD_RESTART; 15 | use log::{debug, error, info, trace, warn}; 16 | 17 | use regex::Regex; 18 | 19 | pub(crate) mod stage2_config; 20 | 21 | pub(crate) mod defs; 22 | 23 | pub(crate) mod logging; 24 | pub(crate) mod system; 25 | use system::{is_dir, stat}; 26 | 27 | pub(crate) mod loop_device; 28 | 29 | pub mod error; 30 | pub use error::{Error, ErrorKind, Result, ToError}; 31 | 32 | pub mod options; 33 | use crate::common::defs::{OLD_ROOT_MP, PIDOF_CMD, WHEREIS_CMD}; 34 | 35 | use nix::unistd::sync; 36 | pub use options::Options; 37 | 38 | pub(crate) mod api_calls; 39 | pub(crate) mod debug; 40 | pub(crate) mod disk_util; 41 | pub(crate) mod stream_progress; 42 | 43 | const OS_NAME_REGEX: &str = r#"^PRETTY_NAME="([^"]+)"$"#; 44 | const OS_RELEASE_FILE: &str = "/etc/os-release"; 45 | 46 | #[derive(Debug)] 47 | pub(crate) struct CmdRes { 48 | pub stdout: String, 49 | pub stderr: String, 50 | pub status: ExitStatus, 51 | } 52 | 53 | pub(crate) fn call(cmd: &str, args: &[&str], trim_stdout: bool) -> Result { 54 | trace!("call: '{}' called with {:?}, {}", cmd, args, trim_stdout); 55 | 56 | match Command::new(cmd) 57 | .args(args) 58 | .stdout(Stdio::piped()) 59 | .stderr(Stdio::piped()) 60 | .output() 61 | { 62 | Ok(output) => { 63 | debug!("call: output: {:?}", output); 64 | Ok(CmdRes { 65 | stdout: if trim_stdout { 66 | String::from(String::from_utf8_lossy(&output.stdout).trim()) 67 | } else { 68 | String::from(String::from_utf8_lossy(&output.stdout)) 69 | }, 70 | stderr: String::from(String::from_utf8_lossy(&output.stderr)), 71 | status: output.status, 72 | }) 73 | } 74 | Err(why) => { 75 | error!("call: output failed for command: '{}': {:?}", cmd, why); 76 | Err(Error::with_context( 77 | ErrorKind::Upstream, 78 | &format!("call: failed to execute: command {} '{:?}'", cmd, args), 79 | )) 80 | } 81 | } 82 | } 83 | 84 | // Helper function for finding a file inside directory 85 | pub(crate) fn find_file(file_name: &str, directory: &Path) -> PathBuf { 86 | let file_finder = Finder::new(directory); 87 | let mut file_path = PathBuf::new(); 88 | 89 | for i in file_finder.into_iter() { 90 | if i.path().to_string_lossy().contains(file_name) { 91 | file_path = i.path().to_path_buf(); 92 | break; 93 | } 94 | } 95 | 96 | file_path 97 | } 98 | 99 | pub(crate) fn whereis(cmd: &str) -> Result { 100 | const BIN_DIRS: &[&str] = &["./", "/bin", "/usr/bin", "/sbin", "/usr/sbin"]; 101 | // try manually first 102 | for path in BIN_DIRS { 103 | let path = format!("{}/{}", &path, cmd); 104 | if file_exists(&path) { 105 | return Ok(path); 106 | } 107 | } 108 | 109 | // else try whereis command 110 | let args: [&str; 2] = ["-b", cmd]; 111 | let cmd_res = match call(WHEREIS_CMD, &args, true) { 112 | Ok(cmd_res) => cmd_res, 113 | Err(why) => { 114 | // manually try the usual suspects 115 | return Err(Error::with_context( 116 | ErrorKind::NotFound, 117 | &format!( 118 | "whereis failed to execute for: {:?}, error: {:?}", 119 | args, why 120 | ), 121 | )); 122 | } 123 | }; 124 | 125 | if cmd_res.status.success() { 126 | if cmd_res.stdout.is_empty() { 127 | Err(Error::with_context( 128 | ErrorKind::InvParam, 129 | &format!("whereis: no command output for {}", cmd), 130 | )) 131 | } else { 132 | let mut words = cmd_res.stdout.split(' '); 133 | if let Some(s) = words.nth(1) { 134 | Ok(String::from(s)) 135 | } else { 136 | Err(Error::with_context( 137 | ErrorKind::NotFound, 138 | &format!("whereis: command not found: '{}'", cmd), 139 | )) 140 | } 141 | } 142 | } else { 143 | Err(Error::with_context( 144 | ErrorKind::ExecProcess, 145 | &format!( 146 | "whereis: command failed for {}: {}", 147 | cmd, 148 | cmd_res.status.code().unwrap_or(0) 149 | ), 150 | )) 151 | } 152 | } 153 | 154 | pub(crate) fn pidof(proc_name: &str) -> Result> { 155 | let cmd_res = call(PIDOF_CMD, &[proc_name], true)?; 156 | let mut res: Vec = Vec::new(); 157 | if cmd_res.status.success() { 158 | for pid in cmd_res.stdout.split_whitespace() { 159 | res.push(pid.parse::().upstream_with_context(&format!( 160 | "pidof: Failed to parse string to u32: '{}'", 161 | pid 162 | ))?); 163 | } 164 | } 165 | Ok(res) 166 | } 167 | 168 | pub(crate) fn get_mem_info() -> Result<(u64, u64)> { 169 | trace!("get_mem_info: entered"); 170 | // TODO: could add loads, uptime if needed 171 | let mut s_info: libc::sysinfo = unsafe { MaybeUninit::::zeroed().assume_init() }; 172 | let res = unsafe { libc::sysinfo(&mut s_info) }; 173 | if res == 0 { 174 | // Fields `totalram` and `freeram` are typed either as `u32` or `u64` 175 | // depending on the platform. We need the conversion for 32-bit 176 | // architectures, but clippy would complain about it in 64-bit ones. 177 | // Therefore, we suppress the warning. 178 | #[allow(clippy::unnecessary_cast)] 179 | Ok(( 180 | (s_info.totalram as u64) * (s_info.mem_unit as u64), 181 | (s_info.freeram as u64) * (s_info.mem_unit as u64), 182 | )) 183 | } else { 184 | Err(Error::new(ErrorKind::NotImpl)) 185 | } 186 | } 187 | 188 | /****************************************************************** 189 | * Get OS name from /etc/os-release 190 | ******************************************************************/ 191 | 192 | pub(crate) fn get_os_name() -> Result { 193 | trace!("get_os_name: entered"); 194 | 195 | // TODO: implement other source as fallback 196 | 197 | if file_exists(OS_RELEASE_FILE) { 198 | // TODO: ensure availabilty of method / file exists 199 | if let Some(os_name) = parse_file(OS_RELEASE_FILE, &Regex::new(OS_NAME_REGEX).unwrap())? { 200 | Ok(os_name[1].clone()) 201 | } else { 202 | Err(Error::with_context( 203 | ErrorKind::NotFound, 204 | &format!( 205 | "get_os_name: could not be located in file {}", 206 | OS_RELEASE_FILE 207 | ), 208 | )) 209 | } 210 | } else { 211 | Err(Error::with_context( 212 | ErrorKind::NotFound, 213 | &format!("get_os_name: could not locate file {}", OS_RELEASE_FILE), 214 | )) 215 | } 216 | } 217 | 218 | pub(crate) fn is_admin() -> Result { 219 | trace!("is_admin: entered"); 220 | let admin = unsafe { libc::getuid() } == 0; 221 | Ok(admin) 222 | } 223 | 224 | pub fn file_exists>(file: P) -> bool { 225 | file.as_ref().exists() 226 | } 227 | 228 | pub fn dir_exists>(name: P) -> Result { 229 | match stat(name) { 230 | Ok(stat_info) => Ok(is_dir(&stat_info)), 231 | Err(why) => { 232 | if why.kind() == ErrorKind::FileNotFound { 233 | Ok(false) 234 | } else { 235 | Err(Error::with_cause(ErrorKind::Upstream, Box::new(why))) 236 | } 237 | } 238 | } 239 | } 240 | 241 | pub(crate) fn parse_file>(fname: P, regex: &Regex) -> Result>> { 242 | let path = fname.as_ref(); 243 | let os_info = 244 | read_to_string(path).upstream_with_context(&format!("File read '{}'", path.display()))?; 245 | 246 | for line in os_info.lines() { 247 | debug!("parse_file: line: '{}'", line); 248 | 249 | if let Some(ref captures) = regex.captures(line) { 250 | let mut results: Vec = Vec::new(); 251 | for cap in captures.iter() { 252 | if let Some(cap) = cap { 253 | results.push(String::from(cap.as_str())); 254 | } else { 255 | results.push(String::from("")); 256 | } 257 | } 258 | return Ok(Some(results)); 259 | }; 260 | } 261 | 262 | Ok(None) 263 | } 264 | 265 | const GIB_SIZE: u64 = 1024 * 1024 * 1024; 266 | const MIB_SIZE: u64 = 1024 * 1024; 267 | const KIB_SIZE: u64 = 1024; 268 | 269 | pub fn format_size_with_unit(size: u64) -> String { 270 | if size > (10 * GIB_SIZE) { 271 | format!("{} GiB", size / GIB_SIZE) 272 | } else if size > (10 * MIB_SIZE) { 273 | format!("{} MiB", size / MIB_SIZE) 274 | } else if size > (10 * KIB_SIZE) { 275 | format!("{} KiB", size / KIB_SIZE) 276 | } else { 277 | format!("{} B", size) 278 | } 279 | } 280 | 281 | pub fn get_mountpoint>(device: P) -> Result> { 282 | let device_str = &*device.as_ref().to_string_lossy(); 283 | let mtab = read_to_string("/etc/mtab").upstream_with_context("Failed to read /etc/mtab")?; 284 | for line in mtab.lines() { 285 | let words: Vec<&str> = line.split_whitespace().collect(); 286 | if let Some(device) = words.first() { 287 | if device == &device_str { 288 | if let Some(mountpoint) = words.get(1) { 289 | return Ok(Some(PathBuf::from(mountpoint))); 290 | } else { 291 | return Err(Error::with_context( 292 | ErrorKind::InvState, 293 | &format!("Encountered invalid line in /etc/mtab '{}'", line), 294 | )); 295 | } 296 | } 297 | } else { 298 | warn!("Encountered empty line in /etc/mtab"); 299 | } 300 | } 301 | Ok(None) 302 | } 303 | 304 | pub(crate) fn path_append, P2: AsRef>(base: P1, append: P2) -> PathBuf { 305 | let base = base.as_ref(); 306 | let append = append.as_ref(); 307 | 308 | if append.is_absolute() { 309 | let mut components = append.components(); 310 | let mut curr = PathBuf::from(base); 311 | components.next(); 312 | for comp in components { 313 | curr = curr.join(comp); 314 | } 315 | curr 316 | } else { 317 | base.join(append) 318 | } 319 | } 320 | 321 | pub(crate) fn path_to_cstring>(path: P) -> Result { 322 | let temp: OsString = path.as_ref().into(); 323 | CString::new(temp.as_bytes()).upstream_with_context(&format!( 324 | "Failed to convert path to CString: '{}'", 325 | path.as_ref().display() 326 | )) 327 | } 328 | 329 | #[allow(dead_code)] 330 | pub(crate) unsafe fn hex_dump_ptr_i8(buffer: *const i8, length: isize) -> String { 331 | hex_dump_ptr_u8(buffer as *const u8, length) 332 | } 333 | 334 | pub(crate) unsafe fn hex_dump_ptr_u8(buffer: *const u8, length: isize) -> String { 335 | let mut idx = 0; 336 | let mut output = String::new(); 337 | while idx < length { 338 | output.push_str(&format!("0x{:08x}: ", idx)); 339 | for _ in 0..min(length - idx, 16) { 340 | let byte: u8 = *buffer.offset(idx); 341 | let char: char = 342 | if byte.is_ascii_alphanumeric() || byte.is_ascii_punctuation() || byte == 32 { 343 | char::from(byte) 344 | } else { 345 | '.' 346 | }; 347 | output.push_str(&format!("{:02x} {} ", byte, char)); 348 | idx += 1; 349 | } 350 | output.push('\n'); 351 | } 352 | output 353 | } 354 | 355 | pub(crate) fn hex_dump(buffer: &[u8]) -> String { 356 | unsafe { hex_dump_ptr_u8(buffer as *const [u8] as *const u8, buffer.len() as isize) } 357 | } 358 | 359 | cfg_if::cfg_if! { 360 | if #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] { 361 | pub(crate) fn string_from_c_string(c_string: &[i8]) -> Result { 362 | let mut len: Option = None; 363 | for (idx, char) in c_string.iter().enumerate() { 364 | if *char == 0 { 365 | len = Some(idx); 366 | break; 367 | } 368 | } 369 | if let Some(len) = len { 370 | let u8_str = &c_string[0..=len] as *const [i8] as *const [u8] as *const CStr; 371 | unsafe { Ok(String::from(&*(*u8_str).to_string_lossy())) } 372 | } else { 373 | Err(Error::with_context( 374 | ErrorKind::InvParam, 375 | "Not a nul terminated C string", 376 | )) 377 | } 378 | } 379 | } else { 380 | pub(crate) fn string_from_c_string(c_string: &[u8]) -> Result { 381 | let mut len: Option = None; 382 | for (idx, char) in c_string.iter().enumerate() { 383 | if *char == 0 { 384 | len = Some(idx); 385 | break; 386 | } 387 | } 388 | if let Some(len) = len { 389 | let u8_str = &c_string[0..=len] as *const [u8] as *const CStr; 390 | unsafe { Ok(String::from(&*(*u8_str).to_string_lossy())) } 391 | } else { 392 | Err(Error::with_context( 393 | ErrorKind::InvParam, 394 | "Not a nul terminated C string", 395 | )) 396 | } 397 | } 398 | } 399 | } 400 | 401 | #[allow(dead_code)] 402 | pub(crate) fn log(text: &str) { 403 | let log_path = if let Ok(stat) = stat(OLD_ROOT_MP) { 404 | if is_dir(&stat) { 405 | path_append(OLD_ROOT_MP, "balena-takeover.log") 406 | } else { 407 | PathBuf::from("/balena-takeover.log") 408 | } 409 | } else { 410 | PathBuf::from("/balena-takeover.log") 411 | }; 412 | if let Ok(mut log_file) = OpenOptions::new().create(true).append(true).open(log_path) { 413 | let _res = writeln!(log_file, "{}", text); 414 | let _res = log_file.flush(); 415 | sync() 416 | } 417 | } 418 | 419 | // Utility function used to trigger a reboot of the operating system 420 | pub(crate) fn reboot() -> ! { 421 | trace!("reboot entered"); 422 | Logger::flush(); 423 | sync(); 424 | sleep(Duration::from_secs(3)); 425 | info!("rebooting"); 426 | let _res = unsafe { libc::reboot(LINUX_REBOOT_CMD_RESTART) }; 427 | exit(1); 428 | } 429 | 430 | #[cfg(test)] 431 | mod tests { 432 | use super::*; 433 | #[test] 434 | fn test_path_to_cstring() { 435 | const PATH: &str = "/bla/blub"; 436 | let c_path = path_to_cstring(PATH).unwrap(); 437 | assert_eq!(&*c_path.to_string_lossy(), PATH); 438 | } 439 | } 440 | -------------------------------------------------------------------------------- /src/common/api_calls.rs: -------------------------------------------------------------------------------- 1 | use std::io::Read; 2 | 3 | use log::debug; 4 | 5 | use reqwest::{blocking::Client, header}; 6 | use serde::{Deserialize, Serialize}; 7 | use serde_json::json; 8 | 9 | use crate::common::{Error, ErrorKind, Result, ToError}; 10 | 11 | const OS_VERSION_URL_ENDPOINT: &str = "/v6/release"; 12 | 13 | const OS_IMG_URL: &str = "/download"; 14 | 15 | const DEVICE__TYPE_URL_ENDPOINT: &str = "/v6/device_type"; 16 | 17 | pub(crate) type Versions = Vec; 18 | 19 | #[derive(Debug, Serialize, Deserialize)] 20 | struct ImageRequestData { 21 | #[serde(rename = "deviceType")] 22 | device_type: String, 23 | version: String, 24 | #[serde(rename = "fileType")] 25 | file_type: String, 26 | #[serde(rename = "imageType")] 27 | image_type: Option, 28 | } 29 | /// Structs corresponding to API response for endpoint /v6/releases 30 | #[derive(Serialize, Deserialize, Debug)] 31 | struct ReleasesApiResponse { 32 | d: Vec, 33 | } 34 | 35 | #[derive(Serialize, Deserialize, Debug)] 36 | struct VersionEntry { 37 | raw_version: String, 38 | } 39 | 40 | /// Structs corresponding to API response for endpoint /v6/device_type and with $select=id 41 | #[derive(Serialize, Deserialize, Debug)] 42 | struct DeviceTypeIdApiResponse { 43 | d: Vec, 44 | } 45 | 46 | #[derive(Serialize, Deserialize, Debug)] 47 | struct DeviceIdEntry { 48 | id: u32, 49 | } 50 | 51 | /// Structs corresponding to API response for DeviceType Contract 52 | #[derive(Debug, Deserialize)] 53 | struct ContractData { 54 | media: Media, 55 | #[serde(default)] 56 | #[serde(rename = "flashProtocol")] 57 | flash_protocol: Option, 58 | } 59 | 60 | #[derive(Debug, Deserialize)] 61 | struct Media { 62 | #[serde(default)] 63 | #[serde(rename = "altBoot")] 64 | alt_boot: Option>, 65 | #[serde(rename = "defaultBoot")] 66 | default_boot: String, 67 | } 68 | 69 | #[derive(Debug, Deserialize)] 70 | struct Contract { 71 | data: ContractData, 72 | } 73 | 74 | #[derive(Debug, Deserialize)] 75 | struct DeviceTypeContractInfo { 76 | contract: Contract, 77 | } 78 | 79 | #[derive(Debug, Deserialize)] 80 | struct DeviceContractInfoApiResponse { 81 | d: Vec, 82 | } 83 | 84 | pub(crate) fn get_os_versions(api_endpoint: &str, api_key: &str, device: &str) -> Result { 85 | let headers = get_header(api_key)?; 86 | 87 | // We currently default to non-ESR releases and use a percent-encoded template 88 | // TODO: Improve in the future by percent-encoding in code here 89 | let request_url = format!("{api_endpoint}{OS_VERSION_URL_ENDPOINT}?$select=raw_version&$filter=(is_final%20eq%20true)%20and%20(is_passing_tests%20eq%20true)%20and%20(is_invalidated%20eq%20false)%20and%20(status%20eq%20%27success%27)%20and%20(belongs_to__application/any(bta:((bta/is_host%20eq%20true)%20and%20(bta/is_for__device_type/any(iodt:iodt/slug%20eq%20%27{device}%27)))%20and%20(not(bta/application_tag/any(at:at/tag_key%20eq%20%27release-policy%27))%20or%20(bta/application_tag/any(at:(at/tag_key%20eq%20%27release-policy%27)%20and%20(at/value%20eq%20%27default%27))))))&$orderby=created_at%20desc"); 90 | 91 | debug!("get_os_versions: request_url: '{}'", request_url); 92 | 93 | let res = Client::builder() 94 | .default_headers(headers) 95 | .build() 96 | .upstream_with_context("Failed to create https client")? 97 | .get(&request_url) 98 | .send() 99 | .upstream_with_context(&format!( 100 | "Failed to send https request url: '{}'", 101 | request_url 102 | ))?; 103 | 104 | debug!("Result = {:?}", res); 105 | 106 | let status = res.status(); 107 | if status == 200 { 108 | // The API call returns a response with the following structure: 109 | // { 110 | // "d": [ 111 | // { 112 | // "raw_version": "5.1.20+rev1" 113 | // }, 114 | // { 115 | // "raw_version": "5.1.20" 116 | // } 117 | // ] 118 | // } 119 | // Deserialize the JSON string into the ApiResponse struct 120 | let parsed_data = res 121 | .json::() 122 | .upstream_with_context("Failed to parse request results")?; 123 | 124 | // Extract the `raw_version` values into a Vec 125 | let versions: Vec = parsed_data 126 | .d 127 | .into_iter() 128 | .map(|entry| entry.raw_version) 129 | .collect(); 130 | Ok(versions) 131 | } else { 132 | Err(Error::with_context( 133 | ErrorKind::InvState, 134 | &format!("Balena API request failed with status: {}", status), 135 | )) 136 | } 137 | } 138 | 139 | fn get_header(api_key: &str) -> Result { 140 | let mut headers = header::HeaderMap::new(); 141 | headers.insert( 142 | header::AUTHORIZATION, 143 | header::HeaderValue::from_str(format!("Bearer {api_key}").as_str()) 144 | .upstream_with_context("Failed to create auth header")?, 145 | ); 146 | Ok(headers) 147 | } 148 | 149 | pub(crate) fn get_os_image( 150 | api_endpoint: &str, 151 | api_key: &str, 152 | device: &str, 153 | version: &str, 154 | ) -> Result> { 155 | let headers = get_header(api_key)?; 156 | let request_url = format!("{}{}", api_endpoint, OS_IMG_URL); 157 | 158 | let post_data = if is_device_image_flasher(api_endpoint, api_key, device)? { 159 | debug!("Downloading raw image for device type {device}"); 160 | ImageRequestData { 161 | device_type: String::from(device), 162 | version: String::from(version), 163 | file_type: String::from(".gz"), 164 | image_type: Some(String::from("raw")), 165 | } 166 | } else { 167 | ImageRequestData { 168 | device_type: String::from(device), 169 | version: String::from(version), 170 | file_type: String::from(".gz"), 171 | image_type: None, 172 | } 173 | }; 174 | 175 | debug!("get_os_image: request_url: '{}'", request_url); 176 | debug!("get_os_image: data: '{:?}'", post_data); 177 | 178 | let res = Client::builder() 179 | .default_headers(headers) 180 | .build() 181 | .upstream_with_context("Failed to create https client")? 182 | .post(&request_url) 183 | .json(&post_data) 184 | .send() 185 | .upstream_with_context(&format!( 186 | "Failed to send https request url: '{}'", 187 | request_url 188 | ))?; 189 | 190 | debug!("Result = {:?}", res); 191 | 192 | Ok(Box::new(res)) 193 | } 194 | 195 | pub(crate) fn patch_device_type( 196 | api_endpoint: &str, 197 | api_key: &str, 198 | dt_slug: &str, 199 | uuid: &str, 200 | ) -> Result<()> { 201 | let headers = get_header(api_key)?; 202 | 203 | // Before we can patch the deviceType, we need to get the deviceId corresponding to the slug 204 | let dt_id_request_url = get_device_type_info_url(api_endpoint, "id", dt_slug); 205 | 206 | debug!( 207 | "patch_device_type: dt_id_request_url: '{}'", 208 | dt_id_request_url 209 | ); 210 | 211 | let res = Client::builder() 212 | .default_headers(headers.clone()) 213 | .build() 214 | .upstream_with_context("Failed to create https client")? 215 | .get(&dt_id_request_url) 216 | .send() 217 | .upstream_with_context(&format!( 218 | "Failed to send https request url: '{}'", 219 | dt_id_request_url 220 | ))?; 221 | 222 | debug!("dt_id_request Result = {:?}", res); 223 | 224 | let status = res.status(); 225 | if status.is_success() { 226 | // The API call returns a response with the following structure: 227 | // { 228 | // "d": [ 229 | // { 230 | // "id": 24 231 | // } 232 | // ] 233 | // } 234 | // Deserialize the JSON string into the ApiResponse struct 235 | let parsed_id_resp = res 236 | .json::() 237 | .upstream_with_context("Failed to parse request results")?; 238 | 239 | // Extract the device type id 240 | let id = &parsed_id_resp.d[0].id; 241 | debug!("device type {dt_slug} has id: {id}"); 242 | 243 | // PATCH deviceType 244 | let patch_url = format!("{api_endpoint}/v6/device(uuid='{uuid}')"); 245 | let patch_data = json!({ 246 | "is_of__device_type": id 247 | }); 248 | 249 | let patch_res = Client::builder() 250 | .default_headers(headers) 251 | .build() 252 | .upstream_with_context("Failed to create https client")? 253 | .patch(&patch_url) 254 | .json(&patch_data) 255 | .send() 256 | .upstream_with_context(&format!( 257 | "Failed to send https request url: '{}'", 258 | patch_url 259 | ))?; 260 | 261 | debug!("PATCH request Result = {:?}", patch_res); 262 | 263 | if patch_res.status().is_success() { 264 | debug!("Device type successfully patched to {dt_slug}"); 265 | Ok(()) 266 | } else { 267 | Err(Error::with_context( 268 | ErrorKind::InvState, 269 | &format!( 270 | "Balena API request failed with status: {}", 271 | patch_res.status() 272 | ), 273 | )) 274 | } 275 | } else { 276 | Err(Error::with_context( 277 | ErrorKind::InvState, 278 | &format!( 279 | "Balena API GET Device Type id request failed with status: {}", 280 | status 281 | ), 282 | )) 283 | } 284 | } 285 | 286 | // PATCH device state with HUP details 287 | pub(crate) fn notify_hup_progress( 288 | api_endpoint: &str, 289 | api_key: &str, 290 | uuid: &str, 291 | progress_pct: &str, 292 | progress_msg: &str, 293 | ) -> Result<()> { 294 | let api_url = format!("{}/v6/device(uuid='{}')", api_endpoint, uuid); 295 | let headers = get_header(api_key)?; 296 | let patch_data = json!({ 297 | "provisioning_progress": progress_pct, 298 | "provisioning_state": progress_msg, 299 | "status": "configuring" 300 | }); 301 | 302 | let res = Client::builder() 303 | .default_headers(headers.clone()) 304 | .build() 305 | .upstream_with_context("Failed to create https client")? 306 | .patch(&api_url) 307 | .json(&patch_data) 308 | .send() 309 | .upstream_with_context(&format!("Failed to send https request url: {}", &api_url))?; 310 | debug!("HUP progress result = {:?}", res); 311 | let status = res.status(); 312 | let response = res 313 | .text() 314 | .upstream_with_context("Failed to read response")?; 315 | 316 | if status.is_success() { 317 | Ok(()) 318 | } else { 319 | Err(Error::with_context( 320 | ErrorKind::InvState, 321 | &format!( 322 | "Got an unexpected reply from the API server @ {} : {}", 323 | &api_url, &response 324 | ), 325 | )) 326 | } 327 | } 328 | 329 | fn is_device_image_flasher(api_endpoint: &str, api_key: &str, device: &str) -> Result { 330 | let headers = get_header(api_key)?; 331 | 332 | let dt_contract_request_url = get_device_type_info_url(api_endpoint, "contract", device); 333 | let res = Client::builder() 334 | .default_headers(headers.clone()) 335 | .build() 336 | .upstream_with_context("Failed to create https client")? 337 | .get(&dt_contract_request_url) 338 | .send() 339 | .upstream_with_context(&format!( 340 | "Failed to send https request url: '{}'", 341 | dt_contract_request_url 342 | ))?; 343 | 344 | debug!("dt_contract_request Result = {:?}", res); 345 | 346 | let status = res.status(); 347 | if status.is_success() { 348 | let parsed_contract_resp = res 349 | .json::() 350 | .upstream_with_context("Failed to parse request results")?; 351 | 352 | // determine if device type's OS image is of flasher type 353 | // ref: https://github.com/balena-io/contracts/blob/d06ad25196f67c4d20ad309941192fdddf80e307/README.md?plain=1#L81 354 | let device_contract = &parsed_contract_resp.d[0]; 355 | debug!("Device contract for {device} is {:?}", device_contract); 356 | 357 | // If the defaultBoot is internal and there is an alternative boot method like sdcard and no flashProtocol defined -> flasher 358 | if device_contract.contract.data.media.default_boot == "internal" 359 | && device_contract 360 | .contract 361 | .data 362 | .media 363 | .alt_boot 364 | .as_ref() 365 | .is_some_and(|alt_boot_vec| !alt_boot_vec.is_empty()) 366 | && device_contract.contract.data.flash_protocol.is_none() 367 | { 368 | Ok(true) 369 | } else { 370 | Ok(false) 371 | } 372 | } else { 373 | Err(Error::with_context( 374 | ErrorKind::InvState, 375 | &format!( 376 | "Balena API GET Device Type contract request failed with status: {}", 377 | status 378 | ), 379 | )) 380 | } 381 | } 382 | 383 | fn get_device_type_info_url(api_endpoint: &str, select: &str, device: &str) -> String { 384 | format!("{api_endpoint}{DEVICE__TYPE_URL_ENDPOINT}?$orderby=name%20asc&$top=1&$select={select}&$filter=device_type_alias/any(dta:dta/is_referenced_by__alias%20eq%20%27{device}%27)") 385 | } 386 | -------------------------------------------------------------------------------- /src/common/debug.rs: -------------------------------------------------------------------------------- 1 | use crate::common::{file_exists, path_append}; 2 | 3 | use libc::{close, open, O_RDWR}; 4 | use log::{debug, error}; 5 | use std::ffi::CString; 6 | use std::io; 7 | use std::path::Path; 8 | 9 | pub fn check_loop_control>(text: &str, base_path: P) { 10 | debug!("{}", text); 11 | debug!( 12 | "check if /dev/loop-control exists in new /dev: {}", 13 | file_exists(path_append(base_path.as_ref(), "loop-control")) 14 | ); 15 | 16 | let path = match CString::new("/dev/loop-control") { 17 | Ok(path) => path, 18 | Err(why) => { 19 | error!( 20 | "Failed to create cstring from path: '/dev/loop-control', error: {:?}", 21 | why 22 | ); 23 | return; 24 | } 25 | }; 26 | 27 | let path_ptr = path.into_raw(); 28 | let fd = unsafe { open(path_ptr, O_RDWR) }; 29 | let _dummy = unsafe { CString::from_raw(path_ptr) }; 30 | if fd >= 0 { 31 | debug!("open /dev/loop-control succeeded"); 32 | let _res = unsafe { close(fd) }; 33 | } else { 34 | debug!( 35 | "open /dev/loop-control failed with error: {}", 36 | io::Error::last_os_error() 37 | ); 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/common/defs.rs: -------------------------------------------------------------------------------- 1 | pub(crate) const SWAPOFF_CMD: &str = "swapoff"; 2 | pub(crate) const TELINIT_CMD: &str = "telinit"; 3 | 4 | pub(crate) const MOKUTIL_CMD: &str = "mokutil"; 5 | pub(crate) const WHEREIS_CMD: &str = "whereis"; 6 | pub(crate) const PIDOF_CMD: &str = "pidof"; 7 | pub(crate) const PIVOT_ROOT_CMD: &str = "pivot_root"; 8 | pub(crate) const MOUNT_CMD: &str = "mount"; 9 | pub(crate) const BLKID_CMD: &str = "blkid"; 10 | 11 | pub(crate) const EFIBOOTMGR_CMD: &str = "efibootmgr"; 12 | pub(crate) const DD_CMD: &str = "dd"; 13 | 14 | // The mdtd_debug tool is used on Xavier NX devices to clear and write the QSPI 15 | // with the boot blob included in the target OS image. 16 | // This tools is provided by the mtd-utils package at 17 | // http://git.infradead.org/?p=mtd-utils.git 18 | pub(crate) const MTD_DEBUG_CMD: &str = "mtd_debug"; 19 | 20 | pub(crate) const TAR_CMD: &str = "tar"; 21 | 22 | // below path is used as the root mountpoint during migration 23 | pub(crate) const TAKEOVER_DIR: &str = "/tmp/balena-takeover"; 24 | pub(crate) const STAGE2_CONFIG_NAME: &str = "stage2-config.yml"; 25 | 26 | pub(crate) const BALENA_IMAGE_NAME: &str = "balena.img.gz"; 27 | pub(crate) const BALENA_IMAGE_PATH: &str = "/balena.img.gz"; 28 | 29 | pub(crate) const BALENA_CONFIG_PATH: &str = "/config.json"; 30 | 31 | pub const DISK_BY_LABEL_PATH: &str = "/dev/disk/by-label"; 32 | 33 | // balena boot partition name 34 | pub const BALENA_BOOT_PART: &str = "resin-boot"; 35 | 36 | // Default balena boot partition filesystem type 37 | pub const BALENA_BOOT_FSTYPE: &str = "vfat"; 38 | 39 | // balena rootA partition name 40 | pub const BALENA_ROOTA_PART: &str = "resin-rootA"; 41 | 42 | // balena rootA partition filesystem type 43 | pub const BALENA_ROOTA_FSTYPE: &str = "ext4"; 44 | 45 | // balena data partition name 46 | pub const BALENA_DATA_PART: &str = "resin-data"; 47 | 48 | // balena data partition filesystem type 49 | pub const BALENA_DATA_FSTYPE: &str = "ext4"; 50 | 51 | pub const OLD_ROOT_MP: &str = "/mnt/old_root"; 52 | pub const BALENA_BOOT_MP: &str = "/mnt/balena-boot"; 53 | pub const BALENA_PART_MP: &str = "/mnt/balena-part"; 54 | 55 | // balena directory which holds NetworkManager connection files. 56 | // this directory is located in the resin-boot partition, in balenaOS 57 | pub const SYSTEM_CONNECTIONS_DIR: &str = "system-connections"; 58 | 59 | // balena directory which holds redsocks proxy configuration files 60 | pub const SYSTEM_PROXY_DIR: &str = "system-proxy"; 61 | 62 | // default mountpoint for the balenaOS boot partition 63 | pub const BALENA_OS_BOOT_MP: &str = "/mnt/boot/"; 64 | 65 | // default mountpoint for the balenaOS data partition 66 | pub const BALENA_DATA_MP: &str = "/mnt/data/"; 67 | pub const BALENA_OS_NAME: &str = "balenaOS"; 68 | 69 | pub const BALENA_SYSTEM_CONNECTIONS_BOOT_PATH: &str = "/mnt/boot/system-connections/"; 70 | pub const BALENA_SYSTEM_PROXY_BOOT_PATH: &str = "/mnt/boot/system-proxy/"; 71 | 72 | pub const BALENA_NETWORK_MANAGER_BIND_MOUNT: &str = "/etc/NetworkManager/"; 73 | // Enables writing to the hardware-defined boot partition on AGX Xavier. 74 | // For details on boot partitions access in Linux, 75 | // see https://www.kernel.org/doc/Documentation/mmc/mmc-dev-parts.txt 76 | pub const JETSON_XAVIER_HW_PART_FORCE_RO_FILE: &str = "/sys/block/mmcblk0boot0/force_ro"; 77 | 78 | // Hardware-defined boot partition for Jetson AGX Xavier 79 | pub const BOOT_BLOB_PARTITION_JETSON_XAVIER: &str = "/dev/mmcblk0boot0"; 80 | 81 | // QSPI device - Jetson Xavier NX SD and NX eMMC 82 | pub const BOOT_BLOB_PARTITION_JETSON_XAVIER_NX: &str = "/dev/mtd0"; 83 | 84 | // Stage 2 boot blob file names on AGX Xavier and Xavier NX. These are used 85 | // for programming the QSPI, are provided by the target balenaOS image we migrate 86 | // to, and they've been obtained from a device flashed with balenaOS using 87 | // the Nvidia flashing tools. 88 | pub const BOOT_BLOB_NAME_JETSON_XAVIER: &str = "boot0_mmcblk0boot0.img"; 89 | pub const BOOT_BLOB_NAME_JETSON_XAVIER_NX: &str = "boot0_mtdblock0.img"; 90 | 91 | pub const SYS_EFI_DIR: &str = "/sys/firmware/efi"; 92 | pub const SYS_EFIVARS_DIR: &str = "/sys/firmware/efi/efivars"; 93 | 94 | pub const BACKUP_ARCH_NAME: &str = "backup.tgz"; 95 | 96 | pub const NIX_NONE: Option<&'static [u8]> = None; 97 | 98 | cfg_if::cfg_if! { 99 | if #[cfg(target_env = "musl")] { 100 | pub(crate) type IoctlReq = i32; 101 | } else if #[cfg(target_arch = "x86_64")] { 102 | pub(crate) type IoctlReq = u64; 103 | } else if #[cfg(target_arch = "arm")] { 104 | pub(crate) type IoctlReq = u32; 105 | } else if #[cfg(target_arch = "aarch64")]{ 106 | pub(crate) type IoctlReq = u64; 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /src/common/disk_util/gzip_file.rs: -------------------------------------------------------------------------------- 1 | use flate2::read::GzDecoder; 2 | use log::{debug, trace}; 3 | use std::fs::{File, OpenOptions}; 4 | use std::io::Read; 5 | use std::path::{Path, PathBuf}; 6 | 7 | const DEF_READ_BUFFER: usize = 1024 * 1024; 8 | 9 | use crate::common::{disk_util::image_file::ImageFile, Error, ErrorKind, Result}; 10 | 11 | pub(crate) struct GZipFile { 12 | path: PathBuf, 13 | decoder: GzDecoder, 14 | bytes_read: u64, 15 | } 16 | 17 | impl GZipFile { 18 | pub fn new(path: &Path) -> Result { 19 | trace!("new: entered with '{}'", path.display()); 20 | let file = match OpenOptions::new() 21 | .write(false) 22 | .read(true) 23 | .create(false) 24 | .open(path) 25 | { 26 | Ok(file) => file, 27 | Err(why) => { 28 | return Err(Error::with_context( 29 | ErrorKind::Upstream, 30 | &format!( 31 | "failed to open file for reading: '{}', error {:?}", 32 | path.display(), 33 | why 34 | ), 35 | )); 36 | } 37 | }; 38 | 39 | Ok(GZipFile { 40 | path: path.to_path_buf(), 41 | decoder: GzDecoder::new(file), 42 | bytes_read: 0, 43 | }) 44 | } 45 | 46 | fn reset(&mut self) -> Result<()> { 47 | trace!("reset: entered"); 48 | let file = match OpenOptions::new() 49 | .write(false) 50 | .read(true) 51 | .create(false) 52 | .open(&self.path) 53 | { 54 | Ok(file) => file, 55 | Err(why) => { 56 | return Err(Error::with_context( 57 | ErrorKind::Upstream, 58 | &format!( 59 | "failed to reopen file for reading: '{}', error {:?}", 60 | self.path.display(), 61 | why 62 | ), 63 | )); 64 | } 65 | }; 66 | 67 | self.decoder = GzDecoder::new(file); 68 | self.bytes_read = 0; 69 | Ok(()) 70 | } 71 | 72 | fn seek(&mut self, offset: u64) -> Result<()> { 73 | trace!( 74 | "seek: entered with offset {}, bytes_read: {}", 75 | offset, 76 | self.bytes_read 77 | ); 78 | let mut to_read = if offset < self.bytes_read { 79 | self.reset()?; 80 | offset 81 | } else { 82 | offset - self.bytes_read 83 | }; 84 | 85 | trace!("seek: to_read: {}", to_read); 86 | 87 | if to_read == 0 { 88 | Ok(()) 89 | } else { 90 | let mut buffer: [u8; DEF_READ_BUFFER] = [0; DEF_READ_BUFFER]; 91 | if to_read >= (DEF_READ_BUFFER as u64) { 92 | loop { 93 | match self.decoder.read(&mut buffer) { 94 | Ok(bytes_read) => { 95 | to_read -= bytes_read as u64; 96 | // debug!("bytes_read: {}, to_read:{}", bytes_read, to_read); 97 | if to_read < DEF_READ_BUFFER as u64 { 98 | trace!( 99 | "seek: done with DEF_BUFFER, to_read: {}, bytes_read: {}", 100 | to_read, 101 | bytes_read 102 | ); 103 | break; 104 | } 105 | } 106 | Err(why) => { 107 | return Err(Error::with_context( 108 | ErrorKind::Upstream, 109 | &format!( 110 | "seek: failed to reopen file for reading: '{}', error {:?}", 111 | self.path.display(), 112 | why 113 | ), 114 | )); 115 | } 116 | } 117 | } 118 | } 119 | 120 | if to_read > 0 { 121 | trace!("seek: last buffer, to_read: {}", to_read); 122 | match self.decoder.read_exact(&mut buffer[0..to_read as usize]) { 123 | Ok(_) => { 124 | trace!("seek: read, got {} bytes", to_read); 125 | self.bytes_read = offset; 126 | Ok(()) 127 | } 128 | Err(why) => Err(Error::with_context( 129 | ErrorKind::Upstream, 130 | &format!( 131 | "seek: failed to read from file file '{}', error {:?}", 132 | self.path.display(), 133 | why 134 | ), 135 | )), 136 | } 137 | } else { 138 | debug!("seek: nothing more to_read: {}", to_read); 139 | self.bytes_read = offset; 140 | Ok(()) 141 | } 142 | } 143 | } 144 | } 145 | 146 | impl ImageFile for GZipFile { 147 | fn fill(&mut self, offset: u64, buffer: &mut [u8]) -> Result<()> { 148 | trace!( 149 | "fill: entered with offset {}, size {}", 150 | offset, 151 | buffer.len() 152 | ); 153 | self.seek(offset)?; 154 | 155 | trace!("fill: bytes_read after seek {}", self.bytes_read); 156 | 157 | match self.decoder.read_exact(buffer) { 158 | Ok(_) => { 159 | self.bytes_read = offset + buffer.len() as u64; 160 | Ok(()) 161 | } 162 | Err(why) => Err(Error::with_context( 163 | ErrorKind::Upstream, 164 | &format!( 165 | "failed to read from file: '{}', error {:?}", 166 | self.path.display(), 167 | why 168 | ), 169 | )), 170 | } 171 | } 172 | fn get_path(&self) -> PathBuf { 173 | self.path.clone() 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /src/common/disk_util/gzip_stream.rs: -------------------------------------------------------------------------------- 1 | use std::io::Read; 2 | use std::path::PathBuf; 3 | 4 | use flate2::read::GzDecoder; 5 | use log::{debug, trace}; 6 | 7 | use crate::common::{disk_util::image_file::ImageFile, Error, ErrorKind, Result}; 8 | 9 | const DEF_READ_BUFFER: usize = 1024 * 1024; 10 | 11 | pub(crate) struct GZipStream { 12 | decoder: GzDecoder, 13 | bytes_read: u64, 14 | } 15 | 16 | impl GZipStream { 17 | pub fn new(stream: R) -> Result> { 18 | trace!("new: entered "); 19 | 20 | Ok(GZipStream { 21 | decoder: GzDecoder::new(stream), 22 | bytes_read: 0, 23 | }) 24 | } 25 | 26 | fn seek(&mut self, offset: u64) -> Result<()> { 27 | trace!( 28 | "seek: entered with offset {}, bytes_read: {}", 29 | offset, 30 | self.bytes_read 31 | ); 32 | 33 | let mut to_read = if offset < self.bytes_read { 34 | return Err(Error::with_context( 35 | ErrorKind::InvState, 36 | "cannot seek backwards on stream", 37 | )); 38 | } else { 39 | offset - self.bytes_read 40 | }; 41 | 42 | trace!("seek: to_read: {}", to_read); 43 | 44 | if to_read == 0 { 45 | Ok(()) 46 | } else { 47 | let mut buffer: [u8; DEF_READ_BUFFER] = [0; DEF_READ_BUFFER]; 48 | if to_read >= (DEF_READ_BUFFER as u64) { 49 | loop { 50 | match self.decoder.read(&mut buffer) { 51 | Ok(bytes_read) => { 52 | to_read -= bytes_read as u64; 53 | // debug!("bytes_read: {}, to_read:{}", bytes_read, to_read); 54 | if to_read < DEF_READ_BUFFER as u64 { 55 | trace!( 56 | "seek: done with DEF_BUFFER, to_read: {}, bytes_read: {}", 57 | to_read, 58 | bytes_read 59 | ); 60 | break; 61 | } 62 | } 63 | Err(why) => { 64 | return Err(Error::with_context( 65 | ErrorKind::Upstream, 66 | &format!("seek: read from stream, error {:?}", why), 67 | )); 68 | } 69 | } 70 | } 71 | } 72 | 73 | if to_read > 0 { 74 | trace!("seek: last buffer, to_read: {}", to_read); 75 | match self.decoder.read_exact(&mut buffer[0..to_read as usize]) { 76 | Ok(_) => { 77 | trace!("seek: read, got {} bytes", to_read); 78 | self.bytes_read = offset; 79 | Ok(()) 80 | } 81 | Err(why) => Err(Error::with_context( 82 | ErrorKind::Upstream, 83 | &format!("seek: failed to read from stream, error {:?}", why), 84 | )), 85 | } 86 | } else { 87 | debug!("seek: nothing more to_read: {}", to_read); 88 | self.bytes_read = offset; 89 | Ok(()) 90 | } 91 | } 92 | } 93 | } 94 | 95 | impl ImageFile for GZipStream { 96 | fn fill(&mut self, offset: u64, buffer: &mut [u8]) -> Result<()> { 97 | trace!( 98 | "fill: entered with offset {}, size {}", 99 | offset, 100 | buffer.len() 101 | ); 102 | self.seek(offset)?; 103 | 104 | trace!("fill: bytes_read after seek {}", self.bytes_read); 105 | 106 | match self.decoder.read_exact(buffer) { 107 | Ok(_) => { 108 | self.bytes_read = offset + buffer.len() as u64; 109 | Ok(()) 110 | } 111 | Err(why) => Err(Error::with_context( 112 | ErrorKind::Upstream, 113 | &format!("failed to read from stream, error {:?}", why), 114 | )), 115 | } 116 | } 117 | fn get_path(&self) -> PathBuf { 118 | PathBuf::from("STREAM") 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/common/disk_util/image_file.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use crate::common::Result; 4 | 5 | pub(crate) trait ImageFile { 6 | fn fill(&mut self, offset: u64, buffer: &mut [u8]) -> Result<()>; 7 | fn get_path(&self) -> PathBuf; 8 | } 9 | -------------------------------------------------------------------------------- /src/common/disk_util/plain_file.rs: -------------------------------------------------------------------------------- 1 | use std::fs::{File, OpenOptions}; 2 | use std::io::{Read, Seek, SeekFrom}; 3 | use std::path::{Path, PathBuf}; 4 | 5 | use crate::common::{disk_util::image_file::ImageFile, Error, ErrorKind, Result, ToError}; 6 | 7 | pub(crate) struct PlainFile { 8 | path: PathBuf, 9 | file: File, 10 | } 11 | 12 | impl PlainFile { 13 | pub fn new(path: &Path) -> Result { 14 | let file = match OpenOptions::new() 15 | .write(false) 16 | .read(true) 17 | .create(false) 18 | .open(path) 19 | { 20 | Ok(file) => file, 21 | Err(why) => { 22 | return Err(Error::with_context( 23 | ErrorKind::Upstream, 24 | &format!( 25 | "failed to open file for reading: '{}', error {:?}", 26 | path.display(), 27 | why 28 | ), 29 | )); 30 | } 31 | }; 32 | 33 | Ok(PlainFile { 34 | path: path.to_path_buf(), 35 | file, 36 | }) 37 | } 38 | } 39 | 40 | impl ImageFile for PlainFile { 41 | fn fill(&mut self, offset: u64, buffer: &mut [u8]) -> Result<()> { 42 | self.file 43 | .seek(SeekFrom::Start(offset)) 44 | .upstream_with_context(&format!("failed to seek to offset {}", offset))?; 45 | match self.file.read_exact(buffer) { 46 | Ok(_) => Ok(()), 47 | Err(why) => Err(Error::with_context( 48 | ErrorKind::Upstream, 49 | &format!( 50 | "failed to read from file: '{}', error {:?}", 51 | self.path.display(), 52 | why 53 | ), 54 | )), 55 | } 56 | } 57 | fn get_path(&self) -> PathBuf { 58 | self.path.clone() 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/common/error.rs: -------------------------------------------------------------------------------- 1 | use std::error; 2 | use std::fmt::{self, Display, Formatter}; 3 | use std::io; 4 | use std::result; 5 | 6 | #[allow(dead_code)] 7 | #[derive(Debug, Copy, Clone, PartialEq)] 8 | pub enum ErrorKind { 9 | Upstream, 10 | NotFound, 11 | FileNotFound, 12 | DeviceNotFound, 13 | InvParam, 14 | InvState, 15 | NotImpl, 16 | ImageDownloaded, 17 | ExecProcess, 18 | CmdIo, 19 | Permission, 20 | FileExists, 21 | NotPermitted, 22 | Displayed, 23 | } 24 | 25 | impl Display for ErrorKind { 26 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 27 | let output = match *self { 28 | Self::Upstream => "An error occurred in an upstream function", 29 | Self::NotFound => "An item could not be found", 30 | Self::FileNotFound => "A file could not be found", 31 | Self::DeviceNotFound => "A device could not be found", 32 | Self::InvParam => "An invalid parameter was encountered", 33 | Self::InvState => "An invalid state was encountered", 34 | Self::NotImpl => "A required function has not been implemented yet", 35 | Self::ImageDownloaded => "The image was downloaded successfully", 36 | Self::ExecProcess => "A spawned process returned an error code", 37 | Self::CmdIo => "A command IO stream operation failed", 38 | Self::Permission => "Permission was denied", 39 | Self::NotPermitted => "Operation is not permitted", 40 | Self::FileExists => "The file exists", 41 | Self::Displayed => "The error was displayed upstream", 42 | }; 43 | write!(f, "{}", output) 44 | } 45 | } 46 | 47 | #[derive(Debug)] 48 | pub struct Error { 49 | kind: ErrorKind, 50 | cause: Option>, 51 | context: Option, 52 | } 53 | 54 | impl Error { 55 | pub fn new(kind: ErrorKind) -> Error { 56 | Error { 57 | kind, 58 | cause: None, 59 | context: None, 60 | } 61 | } 62 | 63 | pub fn displayed() -> Error { 64 | Error::new(ErrorKind::Displayed) 65 | } 66 | 67 | pub fn with_context(kind: ErrorKind, context: &str) -> Error { 68 | Error { 69 | kind, 70 | cause: None, 71 | context: Some(context.to_owned()), 72 | } 73 | } 74 | 75 | pub fn with_cause( 76 | kind: ErrorKind, 77 | cause: Box, 78 | ) -> Error { 79 | Error { 80 | kind, 81 | cause: Some(cause), 82 | context: None, 83 | } 84 | } 85 | 86 | pub fn with_all( 87 | kind: ErrorKind, 88 | context: &str, 89 | cause: Box, 90 | ) -> Error { 91 | Error { 92 | kind, 93 | cause: Some(cause), 94 | context: Some(context.to_owned()), 95 | } 96 | } 97 | 98 | #[allow(clippy::wrong_self_convention)] 99 | pub fn from_upstream( 100 | cause: Box, 101 | context: &str, 102 | ) -> Error { 103 | Error { 104 | kind: ErrorKind::Upstream, 105 | cause: Some(cause), 106 | context: Some(context.to_owned()), 107 | } 108 | } 109 | 110 | pub fn from_upstream_error( 111 | cause: Box, 112 | context: &str, 113 | ) -> Error { 114 | Error { 115 | kind: ErrorKind::Upstream, 116 | cause: Some(cause), 117 | context: Some(context.to_owned()), 118 | } 119 | } 120 | 121 | pub fn kind(&self) -> ErrorKind { 122 | self.kind 123 | } 124 | } 125 | 126 | impl Display for Error { 127 | fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { 128 | write!(f, "{}", self.kind)?; 129 | match &self.context { 130 | Some(context) => { 131 | write!(f, ", context: {}", context)?; 132 | } 133 | None => (), 134 | } 135 | let mut curr_err: &dyn error::Error = self; 136 | 137 | while let Some(cause) = curr_err.source() { 138 | write!(f, "\n caused by: {}", cause)?; 139 | curr_err = cause; 140 | } 141 | Ok(()) 142 | } 143 | } 144 | 145 | impl From for Error { 146 | fn from(error: io::Error) -> Self { 147 | match error.kind() { 148 | io::ErrorKind::NotFound => Error::with_cause(ErrorKind::NotFound, Box::new(error)), 149 | _ => Error::from_upstream_error(Box::new(error), ""), 150 | } 151 | } 152 | } 153 | impl error::Error for Error { 154 | fn source(&self) -> Option<&(dyn error::Error + 'static)> { 155 | match &self.cause { 156 | Some(cause) => Some(&**cause), 157 | None => None, 158 | } 159 | } 160 | 161 | fn cause(&self) -> Option<&dyn error::Error> { 162 | match &self.cause { 163 | Some(cause) => Some(&**cause), 164 | None => None, 165 | } 166 | } 167 | } 168 | 169 | pub trait ToError { 170 | fn error(self) -> Result; 171 | fn upstream_with_context(self, context: &str) -> Result; 172 | fn error_with_all(self, kind: ErrorKind, context: &str) -> Result; 173 | fn error_with_kind(self, kind: ErrorKind) -> Result; 174 | } 175 | 176 | impl ToError for result::Result 177 | where 178 | E: error::Error + Send + Sync + 'static, 179 | { 180 | fn error(self) -> Result { 181 | match self { 182 | Ok(ok) => Ok(ok), 183 | Err(why) => Err(Error::with_cause(ErrorKind::Upstream, Box::new(why))), 184 | } 185 | } 186 | fn error_with_all(self, kind: ErrorKind, context: &str) -> Result { 187 | match self { 188 | Ok(ok) => Ok(ok), 189 | Err(why) => Err(Error::with_all(kind, context, Box::new(why))), 190 | } 191 | } 192 | 193 | fn error_with_kind(self, kind: ErrorKind) -> Result { 194 | match self { 195 | Ok(ok) => Ok(ok), 196 | Err(why) => Err(Error::with_cause(kind, Box::new(why))), 197 | } 198 | } 199 | fn upstream_with_context(self, context: &str) -> Result { 200 | match self { 201 | Ok(ok) => Ok(ok), 202 | Err(why) => Err(Error::with_all(ErrorKind::Upstream, context, Box::new(why))), 203 | } 204 | } 205 | } 206 | 207 | pub type Result = result::Result; 208 | -------------------------------------------------------------------------------- /src/common/logging.rs: -------------------------------------------------------------------------------- 1 | use log::{error, info}; 2 | use std::fs::{self, OpenOptions}; 3 | use std::{ 4 | fs::copy, 5 | path::{Path, PathBuf}, 6 | }; 7 | 8 | use crate::common::defs::BALENA_DATA_MP; 9 | use crate::common::{path_append, Error, ToError}; 10 | use crate::{ 11 | common::{ 12 | debug, 13 | defs::{BALENA_DATA_FSTYPE, NIX_NONE}, 14 | disk_util::DEF_BLOCK_SIZE, 15 | error::Result, 16 | loop_device::LoopDevice, 17 | ErrorKind, 18 | }, 19 | stage2::get_partition_infos, 20 | }; 21 | 22 | use nix::{ 23 | mount::{mount, umount, MsFlags}, 24 | unistd::sync, 25 | }; 26 | 27 | use self::stage2_config::Stage2Config; 28 | 29 | use super::{ 30 | defs::{BALENA_PART_MP, OLD_ROOT_MP}, 31 | stage2_config, 32 | }; 33 | 34 | /// Utility function to create a directory and all child directories 35 | fn create_dir_if_not_exist(path: &str) -> Result<()> { 36 | let dir_path = Path::new(path); 37 | if !dir_path.is_dir() { 38 | println!("Directory does not exist. Creating: {}", dir_path.display()); 39 | fs::create_dir_all(dir_path).upstream_with_context( 40 | format!("Failed to create directory {}", dir_path.display()).as_str(), 41 | )?; 42 | println!("Directory created successfully."); 43 | } else { 44 | println!("Directory already exists: {}", dir_path.display()); 45 | } 46 | 47 | Ok(()) 48 | } 49 | 50 | /// Helper function to copy a source file to a directory 51 | /// it keeps the same file name 52 | pub fn copy_file_to_destination_dir(source_file_path: &str, dest_dir_path: &str) -> Result<()> { 53 | info!( 54 | "copy_file_to_destination_dir! Copying {} from tmpfs to {}", 55 | source_file_path, dest_dir_path 56 | ); 57 | 58 | let source_file = Path::new(source_file_path); 59 | if source_file.exists() && source_file.is_file() { 60 | let file_name = source_file 61 | .file_name() 62 | .map(|name| name.to_string_lossy().to_string()) 63 | .expect("Failed to extract file name from path"); 64 | 65 | copy( 66 | PathBuf::from(source_file), 67 | path_append(dest_dir_path, format!("/{}", file_name)), 68 | )?; 69 | 70 | Ok(()) 71 | } else { 72 | Err(Error::with_context( 73 | ErrorKind::FileNotFound, 74 | &format!("source file {} does not exist", source_file_path), 75 | )) 76 | } 77 | } 78 | 79 | /// Helper function to create or open the tmpfs logfile 80 | /// The fallback log mechanism logs to a single file 81 | /// The filename used can be provided as an option 82 | pub fn open_fallback_log_file(fallback_log_filename: &str) -> Option { 83 | let tmpfs_log_file = path_append("/tmp/", fallback_log_filename); 84 | let log_file = match OpenOptions::new() 85 | .append(true) // Append to the file, don't overwrite 86 | .create(true) // Create the file if it does not exist 87 | .open(tmpfs_log_file) 88 | { 89 | Ok(file) => Some(file), 90 | Err(why) => { 91 | error!( 92 | "Could not open /tmp/{}, error {:?}", 93 | fallback_log_filename, why 94 | ); 95 | None 96 | } 97 | }; 98 | log_file 99 | } 100 | 101 | /// Helper function to persist fallback logs from tmpfs to disk 102 | /// This function can be called at different stages during the migration 103 | /// 104 | /// # Arguments 105 | /// * `s2_config` - The stage2 config file 106 | /// * `is_new_image_flashed` - indicates if the function is being called after the 107 | /// the target disk has been flashed with the new os image. 108 | /// 109 | /// If called before flashing, this is usually as part of error handling 110 | /// and we want to persist the fallback logs 111 | pub fn persist_fallback_log_to_data_partition( 112 | s2_config: &Stage2Config, 113 | is_new_image_flashed: bool, 114 | ) -> Result<()> { 115 | let source_tmpfs_log_path = format!("/tmp/{}", s2_config.fallback_log_filename); 116 | 117 | // If true, we need to mount the raw data partition and write to it 118 | if is_new_image_flashed { 119 | let device = &s2_config.flash_dev; 120 | 121 | let (_boot_part, _root_a_part, data_part) = get_partition_infos(device)?; 122 | 123 | let mut loop_device = LoopDevice::get_free(true)?; 124 | info!("Create loop device: '{}'", loop_device.get_path().display()); 125 | let byte_offset = data_part.start_lba * DEF_BLOCK_SIZE as u64; 126 | let size_limit = data_part.num_sectors * DEF_BLOCK_SIZE as u64; 127 | 128 | debug!( 129 | "Setting up device '{}' with offset {}, sizelimit {} on '{}'", 130 | device.display(), 131 | byte_offset, 132 | size_limit, 133 | loop_device.get_path().display() 134 | ); 135 | 136 | loop_device 137 | .setup(device, Some(byte_offset), Some(size_limit)) 138 | .unwrap(); 139 | info!( 140 | "Setup device '{}' with offset {}, sizelimit {} on '{}'", 141 | device.display(), 142 | byte_offset, 143 | size_limit, 144 | loop_device.get_path().display() 145 | ); 146 | 147 | mount( 148 | Some(loop_device.get_path()), 149 | BALENA_PART_MP, 150 | Some(BALENA_DATA_FSTYPE.as_bytes()), 151 | MsFlags::empty(), 152 | NIX_NONE, 153 | ) 154 | .upstream_with_context(&format!( 155 | "Failed to mount '{}' to '{}'", 156 | loop_device.get_path().display(), 157 | BALENA_PART_MP, 158 | ))?; 159 | 160 | info!( 161 | "Mounted data partition as {} on {}", 162 | loop_device.get_path().display(), 163 | BALENA_PART_MP 164 | ); 165 | 166 | let dest_dir = format!("{}/{}", BALENA_PART_MP, s2_config.fallback_log_dirname); 167 | create_dir_if_not_exist(&dest_dir)?; 168 | 169 | copy_file_to_destination_dir(&source_tmpfs_log_path, dest_dir.as_str())?; 170 | 171 | sync(); 172 | umount(BALENA_PART_MP).upstream_with_context("Failed to unmount data partition")?; 173 | info!("Unmounted data partition from {}", BALENA_PART_MP); 174 | 175 | loop_device.unset()?; 176 | } else if Path::new(BALENA_DATA_MP).exists() { 177 | let dest_dir = format!("{}/{}", BALENA_DATA_MP, s2_config.fallback_log_dirname); 178 | create_dir_if_not_exist(&dest_dir)?; 179 | 180 | copy_file_to_destination_dir(&source_tmpfs_log_path, dest_dir.as_str())?; 181 | } else if Path::new(path_append(OLD_ROOT_MP, BALENA_DATA_MP).as_os_str()).exists() { 182 | // else if data partition is relative to OLD_ROOT_MP 183 | 184 | let dest_dir = format!( 185 | "{}/{}/{}", 186 | OLD_ROOT_MP, BALENA_DATA_MP, s2_config.fallback_log_dirname 187 | ); 188 | create_dir_if_not_exist(&dest_dir)?; 189 | 190 | copy_file_to_destination_dir(&source_tmpfs_log_path, dest_dir.as_str())?; 191 | } 192 | 193 | Ok(()) 194 | } 195 | -------------------------------------------------------------------------------- /src/common/options.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | 3 | use clap::Parser; 4 | use log::Level; 5 | 6 | const DEFAULT_CHECK_TIMEOUT: u64 = 10; 7 | 8 | #[derive(Parser, Debug, Clone)] 9 | #[clap(name = env!("CARGO_PKG_NAME"), author, about)] 10 | pub struct Options { 11 | /// what to do 12 | #[clap( 13 | short, 14 | long, 15 | value_name = "DIRECTORY", 16 | value_parser, 17 | help = "Path to working directory" 18 | )] 19 | work_dir: Option, 20 | #[clap( 21 | short, 22 | long, 23 | value_name = "IMAGE", 24 | value_parser, 25 | help = "Path to balena-os image" 26 | )] 27 | image: Option, 28 | #[clap( 29 | short, 30 | long, 31 | value_name = "VERSION", 32 | help = "Version of balena-os image to download" 33 | )] 34 | version: Option, 35 | #[clap( 36 | short, 37 | long, 38 | value_name = "CONFIG_JSON", 39 | value_parser, 40 | help = "Path to balena config.json" 41 | )] 42 | config: Option, 43 | #[clap( 44 | long, 45 | default_value = "info", 46 | help = "Set log level, one of [error,warn,info,debug,trace]" 47 | )] 48 | log_level: Level, 49 | #[clap( 50 | long, 51 | value_name = "LOG_FILE", 52 | value_parser, 53 | help = "Set stage1 log file name" 54 | )] 55 | log_file: Option, 56 | #[clap( 57 | long, 58 | help = "Logs to RAM and then dumps logs to balenaOS disk after flashing" 59 | )] 60 | fallback_log: bool, 61 | #[clap( 62 | long, 63 | value_name = "FALLBACK_LOG_FILENAME", 64 | value_parser, 65 | default_value = "fallback.log", 66 | help = "Set the name of the fallback log" 67 | )] 68 | fallback_log_filename: String, 69 | #[clap( 70 | long, 71 | value_name = "FALLBACK_LOG_DIR", 72 | value_parser, 73 | default_value = "fallback_log", 74 | help = "Set the directory name where fallback logs will be persisted on data partition" 75 | )] 76 | fallback_log_dir: String, 77 | #[clap( 78 | long, 79 | value_name = "BACKUP-CONFIG", 80 | value_parser, 81 | help = "Backup configuration file" 82 | )] 83 | backup_cfg: Option, 84 | #[clap( 85 | long, 86 | help = "Set stage2 log level, one of [error,warn,info,debug,trace]" 87 | )] 88 | s2_log_level: Option, 89 | #[clap( 90 | long, 91 | help = "Scripted mode - no interactive acknowledgement of takeover" 92 | )] 93 | no_ack: bool, 94 | #[clap(long, help = "Pretend mode, do not flash device")] 95 | pretend: bool, 96 | #[clap(long, help = "Internal - stage2 invocation")] 97 | stage2: bool, 98 | #[clap(long, help = "Use internal tar instead of external command")] 99 | tar_internal: bool, 100 | #[clap(long, help = "Debug - do not cleanup after stage1 failure")] 101 | no_cleanup: bool, 102 | #[clap(long, help = "Do not check if OS is supported")] 103 | no_os_check: bool, 104 | #[clap(long, help = "Do not check if the target device type is valid")] 105 | no_dt_check: bool, 106 | #[clap(long, help = "Do not check if balena API is available")] 107 | no_api_check: bool, 108 | #[clap(long, help = "Do not check if balena VPN is available")] 109 | no_vpn_check: bool, 110 | #[clap(long, help = "Do not setup EFI boot")] 111 | no_efi_setup: bool, 112 | #[clap(long, help = "Do not check network manager files exist")] 113 | no_nwmgr_check: bool, 114 | #[clap(long, help = "Do not migrate host-name")] 115 | no_keep_name: bool, 116 | #[clap( 117 | short, 118 | long, 119 | help = "Download image only, do not check device and migrate" 120 | )] 121 | download_only: bool, 122 | #[clap( 123 | long, 124 | value_name = "TIMEOUT", 125 | value_parser, 126 | help = "API/VPN check timeout in seconds." 127 | )] 128 | check_timeout: Option, 129 | #[clap( 130 | long, 131 | short, 132 | value_name = "LOG_DEVICE", 133 | value_parser, 134 | help = "Write stage2 log to LOG_DEVICE" 135 | )] 136 | log_to: Option, 137 | #[clap( 138 | short, 139 | long, 140 | value_name = "INSTALL_DEVICE", 141 | value_parser, 142 | help = "Use INSTALL_DEVICE to flash balena to" 143 | )] 144 | flash_to: Option, 145 | #[clap( 146 | long, 147 | help = "Do not create network manager configurations for configured wifis" 148 | )] 149 | no_wifis: bool, 150 | #[clap( 151 | long, 152 | value_name = "SSID", 153 | help = "Create a network manager configuration for configured wifi with SSID" 154 | )] 155 | wifi: Option>, 156 | #[clap( 157 | long, 158 | value_name = "NWMGR_FILE", 159 | value_parser, 160 | help = "Supply a network manager file to inject into balena-os" 161 | )] 162 | nwmgr_cfg: Option>, 163 | #[clap(long, value_name = "DT_SLUG", help = "Device Type slug to change to")] 164 | change_dt_to: Option, 165 | #[clap(long, help = "Report to balena host update processing")] 166 | report_hup_progress: bool, 167 | } 168 | 169 | impl Options { 170 | pub fn backup_config(&self) -> Option<&Path> { 171 | if let Some(backup_cfg) = &self.backup_cfg { 172 | Some(backup_cfg.as_path()) 173 | } else { 174 | None 175 | } 176 | } 177 | 178 | pub fn stage2(&self) -> bool { 179 | self.stage2 180 | } 181 | 182 | pub fn tar_internal(&self) -> bool { 183 | self.tar_internal 184 | } 185 | 186 | pub fn work_dir(&self) -> PathBuf { 187 | if let Some(work_dir) = &self.work_dir { 188 | work_dir.clone() 189 | } else { 190 | PathBuf::from("./") 191 | } 192 | } 193 | 194 | pub fn image(&self) -> &Option { 195 | &self.image 196 | } 197 | 198 | pub fn version(&self) -> &str { 199 | if let Some(ref version) = self.version { 200 | version.as_str() 201 | } else { 202 | "default" 203 | } 204 | } 205 | 206 | pub fn no_ack(&self) -> bool { 207 | self.no_ack 208 | } 209 | 210 | pub fn migrate(&self) -> bool { 211 | !self.download_only 212 | } 213 | 214 | pub fn config(&self) -> &Option { 215 | &self.config 216 | } 217 | 218 | pub fn pretend(&self) -> bool { 219 | self.pretend 220 | } 221 | 222 | pub fn log_file(&self) -> &Option { 223 | &self.log_file 224 | } 225 | 226 | pub fn log_level(&self) -> Level { 227 | self.log_level 228 | } 229 | 230 | pub fn s2_log_level(&self) -> Level { 231 | if let Some(level) = self.s2_log_level { 232 | level 233 | } else { 234 | self.log_level 235 | } 236 | } 237 | 238 | pub fn os_check(&self) -> bool { 239 | !self.no_os_check 240 | } 241 | 242 | pub fn dt_check(&self) -> bool { 243 | !self.no_dt_check 244 | } 245 | 246 | pub fn no_efi_setup(&self) -> bool { 247 | self.no_efi_setup 248 | } 249 | 250 | pub fn api_check(&self) -> bool { 251 | !self.no_api_check 252 | } 253 | 254 | pub fn vpn_check(&self) -> bool { 255 | !self.no_vpn_check 256 | } 257 | 258 | pub fn log_to(&self) -> &Option { 259 | &self.log_to 260 | } 261 | 262 | pub fn flash_to(&self) -> &Option { 263 | &self.flash_to 264 | } 265 | 266 | pub fn check_timeout(&self) -> u64 { 267 | if let Some(timeout) = self.check_timeout { 268 | timeout 269 | } else { 270 | DEFAULT_CHECK_TIMEOUT 271 | } 272 | } 273 | 274 | pub fn no_wifis(&self) -> bool { 275 | self.no_wifis 276 | } 277 | 278 | pub fn wifis(&self) -> &[String] { 279 | const NO_WIFIS: [String; 0] = []; 280 | if let Some(wifis) = &self.wifi { 281 | wifis.as_slice() 282 | } else { 283 | &NO_WIFIS 284 | } 285 | } 286 | 287 | pub fn nwmgr_cfg(&self) -> &[PathBuf] { 288 | if let Some(nwmgr_cfgs) = &self.nwmgr_cfg { 289 | nwmgr_cfgs.as_slice() 290 | } else { 291 | const NO_NWMGR_CFGS: [PathBuf; 0] = []; 292 | &NO_NWMGR_CFGS 293 | } 294 | } 295 | 296 | pub fn no_nwmgr_check(&self) -> bool { 297 | self.no_nwmgr_check 298 | } 299 | 300 | pub fn migrate_name(&self) -> bool { 301 | !self.no_keep_name 302 | } 303 | 304 | pub fn cleanup(&self) -> bool { 305 | !self.no_cleanup 306 | } 307 | 308 | pub fn change_dt_to(&self) -> &Option { 309 | &self.change_dt_to 310 | } 311 | 312 | pub fn fallback_log(&self) -> bool { 313 | self.fallback_log 314 | } 315 | 316 | pub fn fallback_log_filename(&self) -> &str { 317 | &self.fallback_log_filename 318 | } 319 | 320 | pub fn fallback_log_dir(&self) -> &str { 321 | &self.fallback_log_dir 322 | } 323 | 324 | pub fn report_hup_progress(&self) -> bool { 325 | self.report_hup_progress 326 | } 327 | } 328 | -------------------------------------------------------------------------------- /src/common/stage2_config.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | 5 | use crate::common::error::{Result, ToError}; 6 | 7 | #[derive(Debug, Deserialize, Serialize, Clone)] 8 | pub(crate) struct UmountPart { 9 | pub dev_name: PathBuf, 10 | pub mountpoint: PathBuf, 11 | pub fs_type: String, 12 | } 13 | 14 | #[derive(Debug, Deserialize, Serialize, Clone)] 15 | pub(crate) struct LogDevice { 16 | pub dev_name: PathBuf, 17 | pub fs_type: String, 18 | } 19 | 20 | #[derive(Debug, Deserialize, Serialize, Clone)] 21 | pub(crate) struct Stage2Config { 22 | pub log_dev: Option, 23 | pub log_level: String, 24 | pub fallback_log: bool, 25 | pub fallback_log_filename: String, 26 | pub fallback_log_dirname: String, 27 | pub flash_dev: PathBuf, 28 | pub pretend: bool, 29 | pub umount_parts: Vec, 30 | pub work_dir: PathBuf, 31 | pub image_path: PathBuf, 32 | pub config_path: PathBuf, 33 | pub backup_path: Option, 34 | pub device_type: String, 35 | pub tty: PathBuf, 36 | pub api_endpoint: String, 37 | pub api_key: String, 38 | pub uuid: String, 39 | pub report_hup_progress: bool, 40 | pub change_dt_to: Option, 41 | } 42 | 43 | #[allow(dead_code)] 44 | impl Stage2Config { 45 | pub fn log_dev(&self) -> Option<&LogDevice> { 46 | if let Some(log_device) = &self.log_dev { 47 | Some(log_device) 48 | } else { 49 | None 50 | } 51 | } 52 | 53 | pub fn serialize(&self) -> Result { 54 | serde_yaml::to_string(self).upstream_with_context("Failed to deserialize stage2 config") 55 | } 56 | 57 | pub fn deserialze(config_str: &str) -> Result { 58 | serde_yaml::from_str(config_str).upstream_with_context("Failed to parse stage2 config") 59 | } 60 | 61 | pub fn flash_dev(&self) -> &PathBuf { 62 | &self.flash_dev 63 | } 64 | 65 | /// Remove value for api_key from serialization output. Useful for logging. 66 | /// Expects input is a multiline string. 67 | pub fn sanitize_text(serialized: &str) -> String { 68 | let mut clean_txt = String::new(); 69 | for element in serialized.lines() { 70 | if element.starts_with("api_key") { 71 | clean_txt.push_str("api_key: "); 72 | } else { 73 | clean_txt.push_str(element); 74 | } 75 | clean_txt.push('\n'); 76 | } 77 | clean_txt 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /src/common/stream_progress.rs: -------------------------------------------------------------------------------- 1 | use log::{debug, error, info, trace, warn, Level}; 2 | use std::io::Read; 3 | use std::time::Instant; 4 | 5 | use crate::common::format_size_with_unit; 6 | 7 | pub(crate) struct StreamProgress { 8 | input: T, 9 | size: Option, 10 | bytes_read: u64, 11 | last_log: u64, 12 | every: u32, 13 | level: Level, 14 | start_time: Instant, 15 | done: bool, 16 | } 17 | 18 | impl StreamProgress { 19 | pub fn new(input: T, every: u32, level: Level, size: Option) -> StreamProgress { 20 | StreamProgress { 21 | input, 22 | size, 23 | bytes_read: 0, 24 | last_log: 0, 25 | every, 26 | level, 27 | start_time: Instant::now(), 28 | done: false, 29 | } 30 | } 31 | } 32 | 33 | impl Read for StreamProgress { 34 | fn read(&mut self, buf: &mut [u8]) -> Result { 35 | let curr_bytes_read = self.input.read(buf)?; 36 | if curr_bytes_read == 0 { 37 | if !self.done { 38 | let elapsed = Instant::now().duration_since(self.start_time).as_secs(); 39 | let printout = if let Some(size) = self.size { 40 | format!( 41 | "{} of {} read in {} seconds @{}/sec ", 42 | format_size_with_unit(self.bytes_read), 43 | format_size_with_unit(size), 44 | elapsed, 45 | format_size_with_unit(self.bytes_read / elapsed.max(1)), 46 | ) 47 | } else { 48 | format!( 49 | "{} read in {} seconds @{}/sec ", 50 | format_size_with_unit(self.bytes_read), 51 | elapsed, 52 | format_size_with_unit(self.bytes_read / elapsed.max(1)), 53 | ) 54 | }; 55 | 56 | match self.level { 57 | Level::Trace => trace!("{}", printout), 58 | Level::Debug => debug!("{}", printout), 59 | Level::Warn => warn!("{}", printout), 60 | Level::Error => error!("{}", printout), 61 | Level::Info => info!("{}", printout), 62 | } 63 | 64 | self.done = true; 65 | } 66 | return Ok(curr_bytes_read); 67 | } 68 | 69 | self.bytes_read += curr_bytes_read as u64; 70 | let elapsed = Instant::now().duration_since(self.start_time).as_secs(); 71 | 72 | let logs = elapsed / self.every as u64; 73 | if logs > self.last_log { 74 | self.last_log = logs; 75 | let printout = if let Some(size) = self.size { 76 | format!( 77 | "{} of {} read in {} seconds @{}/sec ", 78 | format_size_with_unit(self.bytes_read), 79 | format_size_with_unit(size), 80 | elapsed, 81 | format_size_with_unit(self.bytes_read / elapsed.max(1)), 82 | ) 83 | } else { 84 | format!( 85 | "{} read in {} seconds @{}/sec ", 86 | format_size_with_unit(self.bytes_read), 87 | elapsed, 88 | format_size_with_unit(self.bytes_read / elapsed.max(1)), 89 | ) 90 | }; 91 | 92 | match self.level { 93 | Level::Trace => trace!("{}", printout), 94 | Level::Debug => debug!("{}", printout), 95 | Level::Warn => warn!("{}", printout), 96 | Level::Error => error!("{}", printout), 97 | Level::Info => info!("{}", printout), 98 | } 99 | } 100 | Ok(curr_bytes_read) 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/common/system/fd.rs: -------------------------------------------------------------------------------- 1 | use std::ffi::CString; 2 | use std::io; 3 | use std::os::raw::c_int; 4 | 5 | use libc::{self, close, open, ENODEV, ENOENT}; 6 | use std::path::Path; 7 | 8 | use log::debug; 9 | use nix::errno::errno; 10 | 11 | use crate::common::{ 12 | error::{Error, ErrorKind, Result}, 13 | path_to_cstring, 14 | }; 15 | 16 | pub(crate) struct Fd { 17 | fd: c_int, 18 | } 19 | 20 | impl Fd { 21 | pub fn get_fd(&self) -> c_int { 22 | self.fd 23 | } 24 | 25 | pub fn open>(file: P, mode: c_int) -> Result { 26 | let file_name = path_to_cstring(&file)?; 27 | let fname_ptr = file_name.into_raw(); 28 | let fd = unsafe { open(fname_ptr, mode) }; 29 | let _file_name = unsafe { CString::from_raw(fname_ptr) }; 30 | if fd >= 0 { 31 | debug!( 32 | "Fd::open: opened path: '{}' as fd {}", 33 | file.as_ref().display(), 34 | fd 35 | ); 36 | Ok(Fd { fd }) 37 | } else { 38 | let err_no = errno(); 39 | debug!( 40 | "Fd:open: failed to open file '{}', error {}, ", 41 | file.as_ref().display(), 42 | err_no, 43 | ); 44 | 45 | if (err_no == ENOENT) || (err_no == ENODEV) { 46 | Err(Error::with_context( 47 | ErrorKind::FileNotFound, 48 | &format!( 49 | "Fd::open: Failed to open file '{}', error {}", 50 | file.as_ref().display(), 51 | io::Error::last_os_error() 52 | ), 53 | )) 54 | } else { 55 | Err(Error::with_context( 56 | ErrorKind::Upstream, 57 | &format!( 58 | "Fd::open: Failed to open file '{}', error {}", 59 | file.as_ref().display(), 60 | io::Error::last_os_error() 61 | ), 62 | )) 63 | } 64 | } 65 | } 66 | } 67 | 68 | impl Drop for Fd { 69 | fn drop(&mut self) { 70 | unsafe { close(self.fd) }; 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/macros.rs: -------------------------------------------------------------------------------- 1 | #[macro_export] 2 | 3 | macro_rules! call_command { 4 | ( $cmd:expr, $args:expr , $errmsg:expr ) => { 5 | match call($cmd, $args, true) { 6 | Ok(cmd_res) => { 7 | if cmd_res.status.success() { 8 | Ok(cmd_res.stdout) 9 | } else { 10 | Err(Error::with_context( 11 | ErrorKind::ExecProcess, 12 | &format!("{}, stderr: {}", $errmsg, cmd_res.stderr), 13 | )) 14 | } 15 | } 16 | Err(why) => Err(why), 17 | } 18 | }; 19 | ( $cmd:expr, $args:expr ) => { 20 | match call($cmd, $args, true) { 21 | Ok(cmd_res) => { 22 | if cmd_res.status.success() { 23 | Ok(cmd_res.stdout) 24 | } else { 25 | Err(Error::with_context( 26 | ErrorKind::ExecProcess, 27 | &format!("stderr: {}", cmd_res.stderr), 28 | )) 29 | } 30 | } 31 | Err(why) => Err(why), 32 | } 33 | }; 34 | } 35 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod macros; 3 | mod common; 4 | mod init; 5 | mod stage1; 6 | mod stage2; 7 | 8 | use log::error; 9 | use std::process::exit; 10 | 11 | use clap::Parser; 12 | use mod_logger::Logger; 13 | 14 | use crate::{ 15 | common::{error::ErrorKind, Options}, 16 | init::init, 17 | stage1::stage1, 18 | stage2::stage2, 19 | }; 20 | 21 | fn is_init() -> bool { 22 | let pid = unsafe { libc::getpid() }; 23 | pid == 1 24 | } 25 | 26 | fn main() { 27 | let mut exit_code = 0; 28 | 29 | if is_init() { 30 | init(); 31 | } else { 32 | let opts = Options::parse(); 33 | 34 | if opts.stage2() { 35 | stage2(&opts); 36 | } else if let Err(why) = stage1(&opts) { 37 | exit_code = 1; 38 | match why.kind() { 39 | ErrorKind::Displayed => (), 40 | _ => error!("Migrate stage 1 returned an error: {}", why), 41 | }; 42 | }; 43 | Logger::flush(); 44 | exit(exit_code); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/stage1/backup.rs: -------------------------------------------------------------------------------- 1 | use log::{debug, info, trace}; 2 | use regex::Regex; 3 | use std::fs::read_dir; 4 | use std::path::{Path, PathBuf}; 5 | 6 | // Recurse through directories 7 | pub mod config; 8 | 9 | mod archiver; 10 | 11 | mod rust_tar_archiver; 12 | 13 | mod ext_tar_archiver; 14 | 15 | use crate::{ 16 | common::{ 17 | error::{Error, ErrorKind, Result, ToError}, 18 | path_append, 19 | }, 20 | stage1::backup::{ 21 | archiver::Archiver, config::VolumeConfig, ext_tar_archiver::ExtTarArchiver, 22 | rust_tar_archiver::RustTarArchiver, 23 | }, 24 | }; 25 | 26 | fn archive_dir( 27 | dir_path: &Path, 28 | target_path: &Path, 29 | archiver: &mut impl Archiver, 30 | filter: &Option, 31 | ) -> Result { 32 | trace!( 33 | "archive_dir: dir_path: '{}', target_path: '{}' filter: {:?}", 34 | dir_path.display(), 35 | target_path.display(), 36 | filter 37 | ); 38 | let mut written = false; 39 | 40 | for entry in read_dir(dir_path).upstream_with_context(&format!( 41 | "Failed to list directory backup source: '{}'", 42 | dir_path.display() 43 | ))? { 44 | match entry { 45 | Ok(dir_entry) => { 46 | let source_path = dir_entry.path(); 47 | let source_file = source_path.file_name().unwrap(); 48 | debug!("processing source: '{}'", source_path.display()); 49 | let metadata = dir_entry.metadata().upstream_with_context(&format!( 50 | "Failed to retrieve metadata for file: '{}'", 51 | source_path.display() 52 | ))?; 53 | 54 | if metadata.is_dir() { 55 | if archive_dir( 56 | &source_path, 57 | &path_append(target_path, source_file), 58 | archiver, 59 | filter, 60 | )? { 61 | written = true; 62 | } 63 | } else if let Some(filter) = filter { 64 | if filter.is_match(&source_path.to_string_lossy()) { 65 | let target = path_append(target_path, source_file); 66 | archiver 67 | .add_file(target.as_path(), source_path.as_path()) 68 | .upstream_with_context(&format!( 69 | "Failed to append file: '{}' to archive path: '{}'", 70 | source_path.display(), 71 | target.display() 72 | ))?; 73 | written = true; 74 | debug!( 75 | "appended source: '{}' to archive as '{}'", 76 | source_path.display(), 77 | target.display() 78 | ); 79 | } else { 80 | debug!("No match on file: '{}'", &source_path.display()); 81 | } 82 | } else { 83 | let target = path_append(target_path, source_file); 84 | archiver 85 | .add_file(target.as_path(), source_path.as_path()) 86 | .upstream_with_context(&format!( 87 | "Failed to append file: '{}' to archive path: '{}'", 88 | source_path.display(), 89 | target.display() 90 | ))?; 91 | written = true; 92 | debug!( 93 | "appended source: '{}' to archive as '{}'", 94 | source_path.display(), 95 | target.display() 96 | ); 97 | } 98 | } 99 | Err(why) => { 100 | return Err(Error::with_all( 101 | ErrorKind::Upstream, 102 | "Failed to read entry from ", 103 | Box::new(why), 104 | )); 105 | } 106 | } 107 | } 108 | 109 | Ok(written) 110 | } 111 | 112 | #[allow(dead_code)] 113 | pub(crate) fn create_ext(file: &Path, config: Vec) -> Result { 114 | if !config.is_empty() { 115 | info!("creating new backup in '{}", file.display()); 116 | let mut archiver = ExtTarArchiver::new(file)?; 117 | if create_int(&mut archiver, config)? { 118 | info!("The backup was created successfully"); 119 | Ok(true) 120 | } else { 121 | info!("No backup was created"); 122 | Ok(false) 123 | } 124 | } else { 125 | info!("The backup configuration was empty - nothing backed up"); 126 | Ok(false) 127 | } 128 | } 129 | 130 | pub(crate) fn create>(file: P, config: Vec) -> Result { 131 | if !config.is_empty() { 132 | info!("creating new backup in '{}", file.as_ref().display()); 133 | let mut archiver = RustTarArchiver::new(file)?; 134 | if create_int(&mut archiver, config)? { 135 | info!("The backup was created successfully"); 136 | Ok(true) 137 | } else { 138 | info!("No backup was created"); 139 | Ok(false) 140 | } 141 | } else { 142 | info!("The backup configuration was empty - nothing backed up"); 143 | Ok(false) 144 | } 145 | } 146 | 147 | fn create_int(archiver: &mut impl Archiver, config: Vec) -> Result { 148 | // TODO: stop selected services, containers, add this to backup config 149 | 150 | trace!("create_int entered with: {:?}", config); 151 | 152 | let mut written = false; 153 | for volume in config { 154 | info!("backup to volume: '{}'", volume.volume); 155 | 156 | for item in &volume.items { 157 | let item_src = PathBuf::from(&item.source) 158 | .canonicalize() 159 | .upstream_with_context(&format!("Failed to process source '{}'", item.source))?; 160 | 161 | debug!("processing item: source. '{}'", item_src.display()); 162 | 163 | if let Ok(metadata) = item_src.metadata() { 164 | if metadata.is_dir() { 165 | let target_path = if let Some(ref target) = item.target { 166 | path_append(PathBuf::from(&volume.volume), target) 167 | } else { 168 | PathBuf::from(&volume.volume) 169 | }; 170 | 171 | debug!("source: '{}' is a directory", item_src.display()); 172 | let filter = if let Some(ref filter) = item.filter { 173 | Some(Regex::new(filter).upstream_with_context(&format!( 174 | "Failed to create regular expression from filter '{}'", 175 | filter 176 | ))?) 177 | } else { 178 | None 179 | }; 180 | 181 | if archive_dir(&item_src, &target_path, archiver, &filter)? { 182 | written = true; 183 | } 184 | } else { 185 | debug!("source: '{}' is a file", item_src.display()); 186 | let target = if let Some(ref target) = item.target { 187 | path_append(PathBuf::from(&volume.volume), target) 188 | } else { 189 | path_append(PathBuf::from(&volume.volume), item_src.file_name().unwrap()) 190 | }; 191 | 192 | debug!("target: '{}'", target.display()); 193 | archiver 194 | .add_file(target.as_path(), item_src.as_path()) 195 | .upstream_with_context(&format!( 196 | "Failed to append '{}' to archive path '{}'", 197 | item_src.display(), 198 | target.display() 199 | ))?; 200 | written = true; 201 | debug!( 202 | "appended source: '{}' to archive as '{}'", 203 | item_src.display(), 204 | target.display() 205 | ); 206 | } 207 | } else { 208 | return Err(Error::with_context( 209 | ErrorKind::NotFound, 210 | &format!("Missing source for backup: '{}'", item.source), 211 | )); 212 | } 213 | } 214 | } 215 | 216 | archiver 217 | .finish() 218 | .upstream_with_context("Failed to create backup archive")?; 219 | 220 | debug!("create_int: returning {}", written); 221 | Ok(written) 222 | } 223 | -------------------------------------------------------------------------------- /src/stage1/backup/archiver.rs: -------------------------------------------------------------------------------- 1 | use crate::common::error::Result; 2 | use std::path::Path; 3 | 4 | pub trait Archiver { 5 | fn add_file(&mut self, target: &Path, source: &Path) -> Result<()>; 6 | fn finish(&mut self) -> Result<()>; 7 | } 8 | -------------------------------------------------------------------------------- /src/stage1/backup/config.rs: -------------------------------------------------------------------------------- 1 | use crate::common::error::{Result, ToError}; 2 | 3 | use serde::Deserialize; 4 | use std::fs::read_to_string; 5 | use std::path::Path; 6 | 7 | #[derive(Debug, Deserialize)] 8 | pub(crate) struct ItemConfig { 9 | pub source: String, 10 | pub target: Option, 11 | // TODO: filter.allow, filter.deny 12 | pub filter: Option, 13 | } 14 | 15 | #[derive(Debug, Deserialize)] 16 | pub(crate) struct VolumeConfig { 17 | pub volume: String, 18 | pub items: Vec, 19 | } 20 | 21 | pub(crate) fn backup_cfg_from_file>(file: P) -> Result> { 22 | serde_yaml::from_str( 23 | &read_to_string(file.as_ref()).upstream_with_context(&format!( 24 | "Failed to read backup configuration from file: '{}'", 25 | file.as_ref().display() 26 | ))?, 27 | ) 28 | .upstream_with_context(&format!( 29 | "Failed to parse backup configuration from file: '{}'", 30 | file.as_ref().display() 31 | )) 32 | } 33 | -------------------------------------------------------------------------------- /src/stage1/backup/ext_tar_archiver.rs: -------------------------------------------------------------------------------- 1 | use log::{debug, warn}; 2 | use std::fs::{create_dir_all, remove_dir_all}; 3 | use std::path::{Path, PathBuf}; 4 | 5 | use crate::common::system::symlink; 6 | use crate::stage1::utils::mktemp; 7 | use crate::{ 8 | common::{ 9 | call, 10 | defs::{BACKUP_ARCH_NAME, TAR_CMD}, 11 | dir_exists, 12 | error::{Error, ErrorKind, Result, ToError}, 13 | path_append, 14 | }, 15 | stage1::backup::archiver::Archiver, 16 | }; 17 | 18 | // use external tar / gzip for archiving 19 | // strategy is to link (ln -s ) all files / directories to a temporary directory 20 | // and tar/gizip that directory on finish 21 | #[cfg(target_os = "linux")] 22 | pub(crate) struct ExtTarArchiver { 23 | tmp_dir: PathBuf, 24 | archive: PathBuf, 25 | } 26 | 27 | #[cfg(target_os = "linux")] 28 | impl ExtTarArchiver { 29 | pub fn new>(file: P) -> Result { 30 | const NO_PATH: Option<&Path> = None; 31 | Ok(ExtTarArchiver { 32 | tmp_dir: mktemp(true, None, None, NO_PATH)?, 33 | archive: PathBuf::from(file.as_ref()), 34 | }) 35 | } 36 | } 37 | 38 | #[cfg(target_os = "linux")] 39 | impl Archiver for ExtTarArchiver { 40 | fn add_file(&mut self, target: &Path, source: &Path) -> Result<()> { 41 | debug!( 42 | "ExtTarArchiver::add_file: '{}' , '{}'", 43 | target.display(), 44 | source.display() 45 | ); 46 | if let Some(parent_dir) = target.parent() { 47 | let parent_dir = path_append(&self.tmp_dir, parent_dir); 48 | if !dir_exists(&parent_dir).upstream_with_context(&format!( 49 | "Failed to access directory '{}'", 50 | parent_dir.display() 51 | ))? { 52 | debug!( 53 | "ExtTarArchiver::add_file: create directory '{}'", 54 | parent_dir.display() 55 | ); 56 | create_dir_all(&parent_dir).upstream_with_context(&format!( 57 | "Failed to create directory '{}'", 58 | parent_dir.display() 59 | ))?; 60 | } 61 | } 62 | 63 | let lnk_target = path_append(&self.tmp_dir, target); 64 | 65 | debug!( 66 | "ExtTarArchiver::add_file: link '{}' to '{}'", 67 | source.display(), 68 | lnk_target.display() 69 | ); 70 | 71 | symlink(source, &lnk_target).upstream_with_context(&format!( 72 | "Failed to link '{}' to '{}'", 73 | source.display(), 74 | lnk_target.display() 75 | ))?; 76 | Ok(()) 77 | } 78 | 79 | fn finish(&mut self) -> Result<()> { 80 | let _res = call_command!( 81 | TAR_CMD, 82 | &[ 83 | "-h", 84 | "-czf", 85 | BACKUP_ARCH_NAME, 86 | "-C", 87 | &*self.tmp_dir.to_string_lossy(), 88 | ".", 89 | ], 90 | &format!("Failed to create archive in '{}'", self.archive.display(),) 91 | )?; 92 | 93 | if let Err(why) = remove_dir_all(&self.tmp_dir) { 94 | warn!( 95 | "Failed to delete temporary directory '{}' error: {:?}", 96 | self.tmp_dir.display(), 97 | why 98 | ); 99 | } 100 | 101 | Ok(()) 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/stage1/backup/rust_tar_archiver.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | common::error::{Result, ToError}, 3 | stage1::backup::archiver::Archiver, 4 | }; 5 | 6 | use flate2::{write::GzEncoder, Compression}; 7 | use std::fs::File; 8 | use std::path::Path; 9 | use tar::Builder; 10 | 11 | pub(crate) struct RustTarArchiver { 12 | archive: Builder>, 13 | } 14 | 15 | // use rust internal tar / gzip for archiving 16 | 17 | impl RustTarArchiver { 18 | pub fn new>(file: P) -> Result { 19 | Ok(RustTarArchiver { 20 | archive: Builder::new(GzEncoder::new( 21 | File::create(file.as_ref()).upstream_with_context(&format!( 22 | "Failed to create backup in file '{}'", 23 | file.as_ref().display() 24 | ))?, 25 | Compression::default(), 26 | )), 27 | }) 28 | } 29 | } 30 | 31 | impl Archiver for RustTarArchiver { 32 | fn add_file(&mut self, target: &Path, source: &Path) -> Result<()> { 33 | self.archive 34 | .append_path_with_name(source, target) 35 | .upstream_with_context(&format!( 36 | "Failed to append file: '{}' to archive path: '{}'", 37 | source.display(), 38 | target.display() 39 | )) 40 | } 41 | 42 | fn finish(&mut self) -> Result<()> { 43 | self.archive 44 | .finish() 45 | .upstream_with_context("Failed to create backup archive") 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/stage1/block_device_info.rs: -------------------------------------------------------------------------------- 1 | use crate::common::{path_append, Error, Result, ToError}; 2 | 3 | use lazy_static::lazy_static; 4 | use log::{debug, trace}; 5 | use nix::sys::stat::{major, minor, stat}; 6 | use regex::Regex; 7 | use std::collections::HashMap; 8 | use std::fmt; 9 | use std::fs::{read_dir, read_to_string}; 10 | use std::path::{Path, PathBuf}; 11 | use std::rc::Rc; 12 | use std::result; 13 | 14 | mod mount; 15 | use mount::{Mount, MountTab}; 16 | 17 | pub(crate) mod block_device; 18 | pub(crate) use block_device::BlockDevice; 19 | 20 | mod device; 21 | use device::Device; 22 | 23 | mod partition; 24 | use crate::ErrorKind; 25 | use partition::Partition; 26 | use std::str::FromStr; 27 | 28 | // TODO: add mountpoints for partitions 29 | 30 | const BLOC_DEV_SUPP_MAJ_NUMBERS: [u64; 45] = [ 31 | 3, 8, 9, 21, 33, 34, 44, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 64, 65, 66, 67, 68, 69, 32 | 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 179, 180, 259, 33 | ]; 34 | 35 | type DeviceMap = HashMap>; 36 | 37 | #[derive(Clone, Debug, PartialEq)] 38 | pub(crate) struct DeviceNum { 39 | major: u64, 40 | minor: u64, 41 | } 42 | 43 | impl DeviceNum { 44 | pub fn new(raw_num: u64) -> DeviceNum { 45 | DeviceNum { 46 | major: major(raw_num), 47 | minor: minor(raw_num), 48 | } 49 | } 50 | 51 | pub fn major(&self) -> u64 { 52 | self.major 53 | } 54 | 55 | pub fn minor(&self) -> u64 { 56 | self.minor 57 | } 58 | } 59 | 60 | impl fmt::Display for DeviceNum { 61 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 62 | write!(f, "{}:{}", self.major, self.minor) 63 | } 64 | } 65 | 66 | impl FromStr for DeviceNum { 67 | type Err = Error; 68 | 69 | fn from_str(s: &str) -> result::Result { 70 | lazy_static! { 71 | static ref DEVNUM_RE: Regex = Regex::new(r#"^(\d+):(\d+)$"#).unwrap(); 72 | } 73 | 74 | if let Some(captures) = DEVNUM_RE.captures(s.trim()) { 75 | Ok(Self { 76 | major: captures 77 | .get(1) 78 | .unwrap() 79 | .as_str() 80 | .parse::() 81 | .upstream_with_context(&format!( 82 | "Failed to parse device major number from '{}'", 83 | s 84 | ))?, 85 | minor: captures 86 | .get(2) 87 | .unwrap() 88 | .as_str() 89 | .parse::() 90 | .upstream_with_context(&format!( 91 | "Failed to parse major device major number from '{}'", 92 | s 93 | ))?, 94 | }) 95 | } else { 96 | Err(Error::with_context( 97 | ErrorKind::InvState, 98 | &format!( 99 | "Failed to parse block device major:minor numbers from '{}'", 100 | s 101 | ), 102 | )) 103 | } 104 | } 105 | } 106 | 107 | #[derive(Clone)] 108 | pub(crate) struct BlockDeviceInfo { 109 | root_device: Rc, 110 | root_partition: Option>, 111 | devices: DeviceMap, 112 | } 113 | 114 | impl BlockDeviceInfo { 115 | /// Create a BlockDeviceInfo based on the storage device used for the root directory. 116 | pub fn new() -> Result { 117 | BlockDeviceInfo::new_for_dir("/") 118 | } 119 | 120 | /// Create a BlockDeviceInfo based on the storage device used for the provided 121 | /// directory. 122 | pub fn new_for_dir(dir: &str) -> Result { 123 | let stat_res = stat(dir).upstream_with_context(&format!("Failed to stat for {}", dir))?; 124 | let root_number = DeviceNum::new(stat_res.st_dev); 125 | let mounts = Mount::from_mtab()?; 126 | 127 | debug!( 128 | "new: Root device number is: {}:{}", 129 | root_number.major(), 130 | root_number.minor() 131 | ); 132 | 133 | let sys_path = PathBuf::from("/sys/block/"); 134 | let read_dir = read_dir(&sys_path).upstream_with_context(&format!( 135 | "Failed to read directory '{}'", 136 | sys_path.display() 137 | ))?; 138 | 139 | let mut device_map: DeviceMap = DeviceMap::new(); 140 | for entry in read_dir { 141 | match entry { 142 | Ok(entry) => { 143 | let curr_path = entry.path(); 144 | let curr_dev = BlockDeviceInfo::path_filename_as_string(&curr_path)?; 145 | let curr_number = BlockDeviceInfo::get_maj_minor(&curr_path)?; 146 | trace!( 147 | "new: Looking at path '{}', device '{}' number: {}", 148 | curr_path.display(), 149 | curr_dev, 150 | curr_number, 151 | ); 152 | 153 | if !BLOC_DEV_SUPP_MAJ_NUMBERS.contains(&curr_number.major()) { 154 | trace!( 155 | "Skipping device '{}' with block device major {}", 156 | curr_dev, 157 | curr_number.major() 158 | ); 159 | continue; 160 | } 161 | 162 | let dev_path = path_append("/dev", &curr_dev); 163 | if !dev_path.exists() { 164 | return Err(Error::with_context( 165 | ErrorKind::DeviceNotFound, 166 | &format!("device path does not exist: '{}'", dev_path.display()), 167 | )); 168 | } 169 | 170 | // TODO: fill mounted 171 | 172 | let mounted: Option = if let Some(mount) = mounts.get(&dev_path) { 173 | Some(mount.clone()) 174 | } else if root_number == curr_number { 175 | mounts.get(PathBuf::from("/dev/root").as_path()).cloned() 176 | } else { 177 | None 178 | }; 179 | 180 | let device = Rc::new(Device { 181 | name: curr_dev, 182 | device_num: curr_number, 183 | mounted, 184 | }) as Rc; 185 | 186 | BlockDeviceInfo::read_partitions( 187 | &device, 188 | &mounts, 189 | &curr_path, 190 | &root_number, 191 | &mut device_map, 192 | )?; 193 | device_map.insert(dev_path, device.clone()); 194 | 195 | debug!("new: got device: {:?}", device); 196 | } 197 | Err(why) => { 198 | return Err(Error::with_all( 199 | ErrorKind::Upstream, 200 | &format!( 201 | "Failed to read directory entry from '{}'", 202 | sys_path.display(), 203 | ), 204 | Box::new(why), 205 | )); 206 | } 207 | } 208 | } 209 | 210 | let mut root_device: Option> = None; 211 | let mut root_partition: Option> = None; 212 | 213 | for device_rc in device_map.values_mut() { 214 | let device = device_rc.as_ref(); 215 | if device.get_device_num() == &root_number { 216 | if let Some(parent) = device.get_parent() { 217 | root_device = Some(parent.clone()); 218 | root_partition = Some(device_rc.clone()) 219 | } else { 220 | root_device = Some(device_rc.clone()); 221 | root_partition = None; 222 | } 223 | break; 224 | } 225 | } 226 | 227 | if let Some(root_device) = root_device { 228 | if let Some(root_partition) = root_partition { 229 | return Ok(BlockDeviceInfo { 230 | root_device, 231 | root_partition: Some(root_partition), 232 | devices: device_map, 233 | }); 234 | } 235 | } 236 | 237 | Err(Error::with_context( 238 | ErrorKind::InvState, 239 | "Failed to find root device", 240 | )) 241 | } 242 | 243 | fn read_partitions>( 244 | device: &Rc, 245 | mounts: &MountTab, 246 | dev_path: P, 247 | root_number: &DeviceNum, 248 | device_map: &mut DeviceMap, 249 | ) -> Result<()> { 250 | trace!( 251 | "read_partitions: device: {} dev_path: {}", 252 | device.get_name(), 253 | dev_path.as_ref().display() 254 | ); 255 | 256 | let dev_path = dev_path.as_ref(); 257 | let dir_entries = read_dir(dev_path).upstream_with_context(&format!( 258 | "Failed to read directory '{}'", 259 | dev_path.display() 260 | ))?; 261 | 262 | let regex_str = format!(r"^{}p?\d+$", device.get_name()); 263 | let part_regex = Regex::new(regex_str.as_str()) 264 | .upstream_with_context(&format!("Failed to create regex from '{}'", regex_str))?; 265 | 266 | for entry in dir_entries { 267 | match entry { 268 | Ok(entry) => { 269 | let currdir = entry.path(); 270 | if entry 271 | .metadata() 272 | .upstream_with_context(&format!( 273 | "Failed to retrieve metadata for '{}'", 274 | currdir.display() 275 | ))? 276 | .is_dir() 277 | { 278 | let part_name = BlockDeviceInfo::path_filename_as_string(&currdir)?; 279 | 280 | if !part_regex.is_match(part_name.as_str()) { 281 | trace!("Skipping folder '{}", currdir.display()); 282 | continue; 283 | } 284 | 285 | let curr_number = BlockDeviceInfo::get_maj_minor(&currdir)?; 286 | let dev_path = path_append("/dev", &part_name); 287 | 288 | let mounted = if let Some(mount) = mounts.get(dev_path.as_path()) { 289 | Some(mount.clone()) 290 | } else if curr_number == *root_number { 291 | mounts.get(PathBuf::from("/dev/root").as_path()).cloned() 292 | } else { 293 | None 294 | }; 295 | 296 | let partition = Rc::new(Partition::new( 297 | part_name.as_str(), 298 | curr_number, 299 | mounted, 300 | device.clone(), 301 | )?) as Rc; 302 | 303 | debug!( 304 | "found partition '{:?}' in '{}'", 305 | partition.get_name(), 306 | currdir.display(), 307 | ); 308 | device_map.insert(dev_path, partition); 309 | } 310 | } 311 | Err(why) => { 312 | return Err(Error::with_all( 313 | ErrorKind::Upstream, 314 | &format!( 315 | "Failed to read directory entry from '{}'", 316 | dev_path.display(), 317 | ), 318 | Box::new(why), 319 | )); 320 | } 321 | } 322 | } 323 | 324 | Ok(()) 325 | } 326 | 327 | pub fn get_root_device(&self) -> &Rc { 328 | &self.root_device 329 | } 330 | 331 | #[allow(dead_code)] 332 | pub fn get_root_partition(&self) -> &Option> { 333 | &self.root_partition 334 | } 335 | 336 | pub fn get_devices(&self) -> &DeviceMap { 337 | &self.devices 338 | } 339 | 340 | fn get_maj_minor>(dev_path: P) -> Result { 341 | let dev_info_path = path_append(dev_path.as_ref(), "dev"); 342 | let dev_info = read_to_string(&dev_info_path).upstream_with_context(&format!( 343 | "Failed to read file '{}'", 344 | dev_info_path.display() 345 | ))?; 346 | 347 | DeviceNum::from_str(dev_info.as_str()) 348 | } 349 | 350 | /// extract last element of path as string 351 | fn path_filename_as_string>(path: P) -> Result { 352 | let path = path.as_ref(); 353 | if let Some(dev_name) = path.file_name() { 354 | if let Some(dev_name) = dev_name.to_str() { 355 | Ok(String::from(dev_name)) 356 | } else { 357 | Err(Error::with_context( 358 | ErrorKind::InvParam, 359 | &format!( 360 | "Invalid characters found in device name '{}'", 361 | path.display() 362 | ), 363 | )) 364 | } 365 | } else { 366 | Err(Error::with_context( 367 | ErrorKind::InvParam, 368 | &format!("Failed to retrieve filename from path '{}'", path.display()), 369 | )) 370 | } 371 | } 372 | } 373 | -------------------------------------------------------------------------------- /src/stage1/block_device_info/block_device.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{self, Debug}; 2 | use std::path::PathBuf; 3 | use std::rc::Rc; 4 | 5 | use crate::stage1::block_device_info::mount::Mount; 6 | use crate::stage1::block_device_info::partition::PartitionInfo; 7 | use crate::stage1::block_device_info::DeviceNum; 8 | 9 | pub(crate) trait BlockDevice { 10 | fn get_device_num(&self) -> &DeviceNum; 11 | fn get_mountpoint(&self) -> &Option; 12 | fn get_name(&self) -> &str; 13 | fn get_dev_path(&self) -> PathBuf; 14 | fn get_parent(&self) -> Option<&Rc>; 15 | fn is_partition(&self) -> bool; 16 | fn set_mountpoint(&mut self, mountpoint: Mount); 17 | fn get_partition_info(&self) -> Option<&PartitionInfo>; 18 | } 19 | 20 | impl Debug for dyn BlockDevice { 21 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 22 | let parent_val = if let Some(parent) = self.get_parent() { 23 | parent.as_ref().get_dev_path().display().to_string() 24 | } else { 25 | "None".to_string() 26 | }; 27 | 28 | f.debug_struct("BlockDevice") 29 | .field("name", &self.get_name()) 30 | .field("device_num", &self.get_device_num()) 31 | .field("mounted", &self.get_mountpoint()) 32 | .field("parent", &parent_val) 33 | .finish() 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/stage1/block_device_info/device.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | use std::rc::Rc; 3 | 4 | use crate::stage1::block_device_info::partition::PartitionInfo; 5 | use crate::stage1::block_device_info::DeviceNum; 6 | use crate::{ 7 | common::path_append, 8 | stage1::block_device_info::{block_device::BlockDevice, mount::Mount}, 9 | }; 10 | 11 | #[derive(Clone, Debug)] 12 | pub(crate) struct Device { 13 | pub name: String, 14 | pub device_num: DeviceNum, 15 | pub mounted: Option, 16 | } 17 | 18 | impl BlockDevice for Device { 19 | fn get_device_num(&self) -> &DeviceNum { 20 | &self.device_num 21 | } 22 | 23 | fn get_mountpoint(&self) -> &Option { 24 | &self.mounted 25 | } 26 | 27 | fn get_name(&self) -> &str { 28 | self.name.as_str() 29 | } 30 | 31 | fn get_dev_path(&self) -> PathBuf { 32 | path_append("/dev", &self.name) 33 | } 34 | 35 | fn get_parent(&self) -> Option<&Rc> { 36 | None 37 | } 38 | 39 | fn is_partition(&self) -> bool { 40 | false 41 | } 42 | 43 | fn set_mountpoint(&mut self, mountpoint: Mount) { 44 | self.mounted = Some(mountpoint); 45 | } 46 | 47 | fn get_partition_info(&self) -> Option<&PartitionInfo> { 48 | None 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/stage1/block_device_info/mount.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::fs::read_to_string; 3 | use std::path::{Path, PathBuf}; 4 | 5 | use log::{debug, trace}; 6 | 7 | use crate::common::{Error, Result, ToError}; 8 | use crate::ErrorKind; 9 | 10 | #[derive(Clone, Debug)] 11 | pub(crate) struct Mount { 12 | mountpoint: PathBuf, 13 | fs_type: String, 14 | } 15 | 16 | impl Mount { 17 | pub fn get_mountpoint(&self) -> &Path { 18 | self.mountpoint.as_path() 19 | } 20 | 21 | #[allow(dead_code)] 22 | pub fn get_fs_type(&self) -> &str { 23 | self.fs_type.as_str() 24 | } 25 | } 26 | 27 | pub(crate) type MountTab = HashMap; 28 | 29 | impl Mount { 30 | pub fn from_mtab() -> Result { 31 | let mtab_str = 32 | read_to_string("/etc/mtab").upstream_with_context("Failed to read from '/etc/mtab'")?; 33 | 34 | let mut mounts: MountTab = MountTab::new(); 35 | 36 | for (line_no, line) in mtab_str.lines().enumerate() { 37 | let columns: Vec<&str> = line.split_whitespace().collect(); 38 | if columns.len() < 3 { 39 | return Err(Error::with_context( 40 | ErrorKind::InvParam, 41 | &format!("Failed to parse /etc/mtab line {} : '{}'", line_no, line), 42 | )); 43 | } 44 | 45 | let device_name = columns[0]; 46 | if device_name.starts_with("/dev/") { 47 | let mount = Mount { 48 | mountpoint: PathBuf::from(columns[1]), 49 | fs_type: columns[2].to_string(), 50 | }; 51 | 52 | debug!("from_mtab: processing mount {:?}", mount); 53 | mounts.insert(PathBuf::from(device_name), mount); 54 | } else { 55 | trace!("from_mtab: not processing line {}", line); 56 | } 57 | } 58 | 59 | Ok(mounts) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/stage1/block_device_info/partition.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | use std::rc::Rc; 3 | 4 | use crate::stage1::block_device_info::DeviceNum; 5 | use crate::{ 6 | common::{ 7 | call, 8 | defs::BLKID_CMD, 9 | error::{Error, ErrorKind, Result, ToError}, 10 | path_append, 11 | }, 12 | stage1::block_device_info::{block_device::BlockDevice, mount::Mount}, 13 | }; 14 | use lazy_static::lazy_static; 15 | use log::{debug, warn}; 16 | use regex::Regex; 17 | 18 | #[allow(dead_code)] 19 | #[derive(Clone, Debug)] 20 | pub(crate) struct PartitionInfo { 21 | uuid: Option, 22 | block_size: Option, 23 | fs_type: Option, 24 | label: Option, 25 | part_uuid: Option, 26 | } 27 | 28 | impl PartitionInfo { 29 | pub fn new>(device: P) -> Result { 30 | lazy_static! { 31 | static ref START_REGEX: Regex = Regex::new(r"^([^:]+):\s+(.+)$").unwrap(); 32 | static ref NEXT_REGEX: Regex = 33 | Regex::new(r##"^([^=]+)="([^"]*)"(\s+(.+))?$"##).unwrap(); 34 | } 35 | 36 | let cmd_res = call_command!( 37 | BLKID_CMD, 38 | &[&*device.as_ref().to_string_lossy()], 39 | "Failed to call blkid" 40 | )?; 41 | 42 | if let Some(captures) = START_REGEX.captures(cmd_res.as_str()) { 43 | let mut next_params = captures.get(2); 44 | 45 | let mut uuid: Option = None; 46 | let mut block_size: Option = None; 47 | let mut fs_type: Option = None; 48 | let mut label: Option = None; 49 | let mut part_uuid: Option = None; 50 | 51 | while let Some(params) = next_params { 52 | if let Some(captures) = NEXT_REGEX.captures(params.as_str()) { 53 | let param_name = captures.get(1).unwrap().as_str(); 54 | let param_value = captures.get(2).unwrap().as_str(); 55 | debug!( 56 | "PartitionInfo::new: {} got param name: {}, value: {}", 57 | device.as_ref().display(), 58 | param_name, 59 | param_value 60 | ); 61 | match param_name { 62 | "UUID" => { 63 | uuid = Some(param_value.to_owned()); 64 | } 65 | "BLOCK_SIZE" => { 66 | block_size = Some(param_value.parse::().upstream_with_context( 67 | &format!("Failed to parse block size from {}", param_value), 68 | )?); 69 | } 70 | "TYPE" => { 71 | fs_type = Some(param_value.to_owned()); 72 | } 73 | "PARTLABEL" => { 74 | label = Some(param_value.to_owned()); 75 | } 76 | "PARTUUID" => { 77 | part_uuid = Some(param_value.to_owned()); 78 | } 79 | _ => { 80 | warn!("unexpected parameter name found: '{}'", param_name); 81 | } 82 | } 83 | next_params = captures.get(4); 84 | } else { 85 | break; 86 | } 87 | } 88 | 89 | let part_info = PartitionInfo { 90 | uuid, 91 | block_size, 92 | part_uuid, 93 | fs_type, 94 | label, 95 | }; 96 | debug!( 97 | "PartitionInfo::new: for {} got {:?}", 98 | device.as_ref().display(), 99 | part_info, 100 | ); 101 | Ok(part_info) 102 | } else { 103 | Err(Error::with_context( 104 | ErrorKind::InvParam, 105 | &format!( 106 | "Empty or unexpected blkid output '{}' for path '{}'", 107 | cmd_res, 108 | &*device.as_ref().to_string_lossy() 109 | ), 110 | )) 111 | } 112 | } 113 | pub fn fs_type(&self) -> Option<&str> { 114 | if let Some(fs_type) = &self.fs_type { 115 | Some(fs_type) 116 | } else { 117 | None 118 | } 119 | } 120 | } 121 | 122 | #[derive(Clone)] 123 | pub(crate) struct Partition { 124 | name: String, 125 | device_num: DeviceNum, 126 | mounted: Option, 127 | parent: Rc, 128 | partition_info: PartitionInfo, 129 | } 130 | 131 | impl Partition { 132 | pub fn new( 133 | name: &str, 134 | device_num: DeviceNum, 135 | mounted: Option, 136 | parent: Rc, 137 | ) -> Result { 138 | Ok(Partition { 139 | name: name.to_owned(), 140 | device_num, 141 | mounted, 142 | parent, 143 | partition_info: PartitionInfo::new(format!("/dev/{}", name))?, 144 | }) 145 | } 146 | } 147 | 148 | impl BlockDevice for Partition { 149 | fn get_device_num(&self) -> &DeviceNum { 150 | &self.device_num 151 | } 152 | 153 | fn get_mountpoint(&self) -> &Option { 154 | &self.mounted 155 | } 156 | 157 | fn get_name(&self) -> &str { 158 | self.name.as_str() 159 | } 160 | 161 | fn get_dev_path(&self) -> PathBuf { 162 | path_append("/dev", &self.name) 163 | } 164 | 165 | fn get_parent(&self) -> Option<&Rc> { 166 | Some(&self.parent) 167 | } 168 | 169 | fn is_partition(&self) -> bool { 170 | true 171 | } 172 | 173 | fn set_mountpoint(&mut self, mountpoint: Mount) { 174 | self.mounted = Some(mountpoint); 175 | } 176 | 177 | fn get_partition_info(&self) -> Option<&PartitionInfo> { 178 | Some(&self.partition_info) 179 | } 180 | } 181 | -------------------------------------------------------------------------------- /src/stage1/checks.rs: -------------------------------------------------------------------------------- 1 | use log::error; 2 | 3 | use super::{block_device_info::BlockDeviceInfo, get_block_dev_info, get_log_device}; 4 | use crate::common::{is_admin, Error, Options, Result}; 5 | 6 | /// Performs checks to ensure that the program can run properly with the 7 | /// provided command-line options. Returns an error if the program cannot run 8 | /// for some reason. 9 | pub(crate) fn do_early_checks(opts: &Options) -> Result<()> { 10 | if !is_admin()? { 11 | error!("please run this program as root"); 12 | return Err(Error::displayed()); 13 | } 14 | 15 | let block_dev_info = get_block_dev_info()?; 16 | 17 | if !check_log_device(opts, &block_dev_info) { 18 | error!("the requested log device is not suitable for writing stage2 logs"); 19 | return Err(Error::displayed()); 20 | } 21 | 22 | Ok(()) 23 | } 24 | 25 | /// Checks if the log device requested with `--log-device` is suitable for 26 | /// writing stage2 logs. 27 | fn check_log_device(opts: &Options, block_dev_info: &BlockDeviceInfo) -> bool { 28 | if opts.log_to().is_none() { 29 | // No log device requested: that's fine! 30 | return true; 31 | }; 32 | 33 | // But if the user requested a log device, we must be able to get it. If we 34 | // can't, *that* is a problem! 35 | get_log_device(opts, block_dev_info).is_some() 36 | } 37 | -------------------------------------------------------------------------------- /src/stage1/defs.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{self, Display}; 2 | 3 | pub const DEV_TYPE_INTEL_NUC: &str = "intel-nuc"; 4 | pub const DEV_TYPE_GEN_X86_64: &str = "genericx86-64-ext"; // MBR 5 | pub const DEV_TYPE_GEN_AMD64: &str = "generic-amd64"; // GPT 6 | pub const DEV_TYPE_RPI3: &str = "raspberrypi3"; 7 | pub const DEV_TYPE_RPI2: &str = "raspberry-pi2"; 8 | pub const DEV_TYPE_RPI1: &str = "raspberry-pi"; 9 | pub const DEV_TYPE_RPI4_64: &str = "raspberrypi4-64"; 10 | pub const DEV_TYPE_BBG: &str = "beaglebone-green"; 11 | pub const DEV_TYPE_BBB: &str = "beaglebone-black"; 12 | pub const DEV_TYPE_BBXM: &str = "beagleboard-xm"; 13 | pub const DEV_TYPE_JETSON_XAVIER: &str = "jetson-xavier"; 14 | pub const DEV_TYPE_JETSON_XAVIER_NX: &str = "jetson-xavier-nx-devkit"; 15 | pub const DEV_TYPE_JETSON_XAVIER_NX_EMMC: &str = "jetson-xavier-nx-devkit-emmc"; 16 | 17 | pub const MAX_CONFIG_JSON: usize = 2048; 18 | pub const GZIP_MAGIC_COOKIE: u16 = 0x1f8b; 19 | 20 | #[derive(Debug, Clone, Copy, PartialEq)] 21 | pub(crate) enum DeviceType { 22 | BeagleboneGreen, 23 | BeagleboneBlack, 24 | BeagleboardXM, 25 | IntelNuc, 26 | RaspberryPi1, 27 | RaspberryPi2, 28 | RaspberryPi3, 29 | RaspberryPi4, 30 | Dummy, 31 | JetsonXavier, 32 | JetsonXavierNX, 33 | } 34 | 35 | impl Display for DeviceType { 36 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 37 | write!( 38 | f, 39 | "{},", 40 | match self { 41 | Self::IntelNuc => "X68_64/Intel Nuc", 42 | Self::BeagleboneGreen => "Beaglebone Green", 43 | Self::BeagleboneBlack => "Beaglebone Black", 44 | Self::BeagleboardXM => "Beagleboard XM", 45 | Self::RaspberryPi1 => "Raspberry Pi 1/Zero", 46 | Self::RaspberryPi2 => "Raspberry Pi 2", 47 | Self::RaspberryPi3 => "Raspberry Pi 3", 48 | Self::RaspberryPi4 => "Raspberry Pi 4", 49 | Self::Dummy => "Dummy", 50 | Self::JetsonXavier => "Jetson Xavier AGX", 51 | Self::JetsonXavierNX => "Jetson Xavier NX", 52 | } 53 | ) 54 | } 55 | } 56 | 57 | #[allow(clippy::upper_case_acronyms)] 58 | #[derive(Debug, Clone)] 59 | pub(crate) enum OSArch { 60 | AMD64, 61 | ARMHF, 62 | I386, 63 | ARM64, 64 | /* 65 | ARMEL, 66 | MIPS, 67 | MIPSEL, 68 | Powerpc, 69 | PPC64EL, 70 | S390EX, 71 | */ 72 | } 73 | -------------------------------------------------------------------------------- /src/stage1/device.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::{self, Debug, Display}; 2 | 3 | use crate::stage1::defs::DeviceType; 4 | 5 | pub(crate) trait Device { 6 | fn supports_device_type(&self, dev_type: &str) -> bool; 7 | fn get_device_type(&self) -> DeviceType; 8 | } 9 | 10 | impl Display for dyn Device { 11 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 12 | write!(f, "{},", self.get_device_type()) 13 | } 14 | } 15 | 16 | impl Debug for dyn Device { 17 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 18 | f.debug_struct("Device") 19 | .field("type", &self.get_device_type()) 20 | .finish() 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/stage1/device_impl.rs: -------------------------------------------------------------------------------- 1 | use log::{error, info, warn}; 2 | use std::fs::read_to_string; 3 | 4 | use crate::common::ToError; 5 | use crate::{ 6 | common::{get_os_name, Error, ErrorKind, Options, Result}, 7 | stage1::{defs::OSArch, device::Device, utils::get_os_arch}, 8 | }; 9 | 10 | mod beaglebone; 11 | mod dummy; 12 | mod intel_nuc; 13 | mod jetson_xavier; 14 | mod raspberrypi; 15 | 16 | const DEVICE_TREE_MODEL: &str = "/proc/device-tree/model"; 17 | 18 | pub(crate) fn check_os(supported: &[&str], opts: &Options, dev_type: &str) -> Result { 19 | let os_name = get_os_name()?; 20 | info!("Detected OS name is {}", os_name); 21 | 22 | let os_supported = supported.iter().any(|&r| r == os_name); 23 | 24 | if !os_supported { 25 | if opts.os_check() { 26 | error!( 27 | "The OS '{}' has not been tested with {} for device type {}, to override this check use the no-os-check option on the command line", 28 | os_name, 29 | dev_type, 30 | env!("CARGO_PKG_NAME") 31 | ); 32 | Ok(false) 33 | } else { 34 | warn!( 35 | "The OS '{}' has not been tested with {} for device type {}, proceeding due to no-os-check option", 36 | os_name, 37 | dev_type, 38 | env!("CARGO_PKG_NAME")); 39 | Ok(true) 40 | } 41 | } else { 42 | Ok(true) 43 | } 44 | } 45 | 46 | pub(crate) fn get_device(opts: &Options) -> Result> { 47 | let os_arch = get_os_arch()?; 48 | info!("Detected OS Architecture is {:?}", os_arch); 49 | 50 | if !opts.dt_check() { 51 | info!("Disabling all device type-related checks due to no-dt-check option"); 52 | return Ok(Box::new(dummy::Dummy::new())); 53 | } 54 | 55 | match os_arch { 56 | OSArch::ARMHF | OSArch::ARM64 => { 57 | let dev_tree_model = String::from( 58 | read_to_string(DEVICE_TREE_MODEL) 59 | .upstream_with_context(&format!( 60 | "get_device: unable to determine model due to inaccessible file '{}'", 61 | DEVICE_TREE_MODEL 62 | ))? 63 | .trim_end_matches('\0') 64 | .trim_end(), 65 | ); 66 | 67 | if let Some(device) = raspberrypi::is_rpi(opts, &dev_tree_model)? { 68 | return Ok(device); 69 | } 70 | 71 | if let Some(device) = beaglebone::is_bb(opts, &dev_tree_model)? { 72 | return Ok(device); 73 | } 74 | 75 | if let Some(device) = jetson_xavier::is_jetson_xavier(opts, &dev_tree_model)? { 76 | return Ok(device); 77 | } 78 | 79 | let message = format!( 80 | "Your device type: '{}' is not supported by balena-migrate.", 81 | dev_tree_model 82 | ); 83 | error!("{}", message); 84 | Err(Error::with_context(ErrorKind::InvState, &message)) 85 | } 86 | OSArch::AMD64 => Ok(Box::new(intel_nuc::IntelNuc::from_config(opts)?)), 87 | /* OSArch::I386 => { 88 | migrator.init_i386()?; 89 | }, 90 | */ 91 | _ => Err(Error::with_context( 92 | ErrorKind::InvParam, 93 | &format!("get_device: unexpected OsArch encountered: {:?}", os_arch), 94 | )), 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /src/stage1/device_impl/beaglebone.rs: -------------------------------------------------------------------------------- 1 | use log::{debug, error, trace}; 2 | use regex::Regex; 3 | 4 | use crate::{ 5 | common::{Error, ErrorKind, Options, Result}, 6 | stage1::{ 7 | defs::{DeviceType, DEV_TYPE_BBB, DEV_TYPE_BBG, DEV_TYPE_BBXM}, 8 | device::Device, 9 | device_impl::check_os, 10 | }, 11 | }; 12 | 13 | const SUPPORTED_OSSES: [&str; 4] = [ 14 | "Ubuntu 18.04.2 LTS", 15 | "Ubuntu 14.04.1 LTS", 16 | "Debian GNU/Linux 9 (stretch)", 17 | "Debian GNU/Linux 7 (wheezy)", 18 | ]; 19 | 20 | // Supported models 21 | // TI OMAP3 BeagleBoard xM 22 | const BB_MODEL_REGEX: &str = r#"^((\S+\s+)*(\S+))\s+Beagle(Bone|Board)\s+(\S+)$"#; 23 | 24 | const BBG_SLUGS: [&str; 1] = [DEV_TYPE_BBG]; 25 | const BBB_SLUGS: [&str; 1] = [DEV_TYPE_BBB]; 26 | const BBXM_SLUGS: [&str; 1] = [DEV_TYPE_BBXM]; 27 | 28 | // TODO: check location of uEnv.txt or other files files to improve reliability 29 | 30 | pub(crate) fn is_bb(opts: &Options, model_string: &str) -> Result>> { 31 | trace!( 32 | "Beaglebone::is_bb: entered with model string: '{}'", 33 | model_string 34 | ); 35 | 36 | if model_string.eq("TI AM335x BeagleBone") { 37 | // TODO: found this device model string on a beaglebone-green running debian wheezy 38 | debug!("match found for BeagleboneGreen"); 39 | Ok(Some(Box::new(BeagleboneGreen::from_config(opts)?))) 40 | } else if let Some(captures) = Regex::new(BB_MODEL_REGEX).unwrap().captures(model_string) { 41 | let model = captures 42 | .get(5) 43 | .unwrap() 44 | .as_str() 45 | .trim_matches(char::from(0)); 46 | 47 | match model { 48 | "xM" => { 49 | debug!("match found for BeagleboardXM"); 50 | // TODO: dtb-name is a guess replace with real one 51 | Ok(Some(Box::new(BeagleboardXM::from_config(opts)?))) 52 | } 53 | "Green" => { 54 | debug!("match found for BeagleboneGreen"); 55 | Ok(Some(Box::new(BeagleboneGreen::from_config(opts)?))) 56 | } 57 | "Black" => { 58 | debug!("match found for BeagleboneBlack"); 59 | Ok(Some(Box::new(BeagleboneBlack::from_config(opts)?))) 60 | } 61 | _ => { 62 | let message = format!("The beaglebone model reported by your device ('{}') is not supported by balena-migrate", model); 63 | error!("{}", message); 64 | Err(Error::with_context(ErrorKind::InvParam, &message)) 65 | } 66 | } 67 | } else { 68 | debug!("no match for beaglebone on: <{}>", model_string); 69 | Ok(None) 70 | } 71 | } 72 | 73 | pub(crate) struct BeagleboneGreen {} 74 | 75 | impl BeagleboneGreen { 76 | // this is used in stage1 77 | fn from_config(opts: &Options) -> Result { 78 | if !check_os(&SUPPORTED_OSSES, opts, "Beaglebone Green")? { 79 | return Err(Error::displayed()); 80 | } 81 | 82 | Ok(BeagleboneGreen {}) 83 | } 84 | } 85 | 86 | impl Device for BeagleboneGreen { 87 | fn supports_device_type(&self, dev_type: &str) -> bool { 88 | BBG_SLUGS.contains(&dev_type) 89 | } 90 | 91 | fn get_device_type(&self) -> DeviceType { 92 | DeviceType::BeagleboneGreen 93 | } 94 | } 95 | 96 | pub(crate) struct BeagleboneBlack {} 97 | 98 | impl BeagleboneBlack { 99 | // this is used in stage1 100 | fn from_config(opts: &Options) -> Result { 101 | if !check_os(&SUPPORTED_OSSES, opts, "Beaglebone Black")? { 102 | return Err(Error::displayed()); 103 | } 104 | 105 | Ok(BeagleboneBlack {}) 106 | } 107 | } 108 | 109 | impl Device for BeagleboneBlack { 110 | fn supports_device_type(&self, dev_type: &str) -> bool { 111 | BBB_SLUGS.contains(&dev_type) 112 | } 113 | 114 | fn get_device_type(&self) -> DeviceType { 115 | DeviceType::BeagleboneBlack 116 | } 117 | } 118 | 119 | pub(crate) struct BeagleboardXM {} 120 | 121 | impl BeagleboardXM { 122 | // this is used in stage1 123 | fn from_config(opts: &Options) -> Result { 124 | if opts.migrate() && !check_os(&SUPPORTED_OSSES, opts, "Beagleboard XM")? { 125 | return Err(Error::displayed()); 126 | } 127 | 128 | Ok(BeagleboardXM {}) 129 | } 130 | } 131 | 132 | impl Device for BeagleboardXM { 133 | fn supports_device_type(&self, dev_type: &str) -> bool { 134 | BBXM_SLUGS.contains(&dev_type) 135 | } 136 | 137 | fn get_device_type(&self) -> DeviceType { 138 | DeviceType::BeagleboardXM 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /src/stage1/device_impl/dummy.rs: -------------------------------------------------------------------------------- 1 | use crate::{stage1::defs::DeviceType, stage1::device::Device}; 2 | 3 | /// The Dummy device skips all compatibility checks. This is useful when the 4 | /// user's actual device type is not supported by takeover, but it is 5 | /// technically capable of running the migration. This device type is used when 6 | /// the user passes the `--no-dt-check` CLI option. 7 | pub(crate) struct Dummy; 8 | 9 | impl Dummy { 10 | pub fn new() -> Dummy { 11 | Dummy 12 | } 13 | } 14 | 15 | impl Device for Dummy { 16 | fn supports_device_type(&self, _dev_type: &str) -> bool { 17 | // When using the Dummy device type, we want to skip all DT-specific 18 | // code. We achieve that by saying that the Dummy device type does not 19 | // support any device type. 20 | false 21 | } 22 | fn get_device_type(&self) -> DeviceType { 23 | DeviceType::Dummy 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /src/stage1/device_impl/intel_nuc.rs: -------------------------------------------------------------------------------- 1 | use log::{error, info}; 2 | 3 | use crate::stage1::device_impl::check_os; 4 | use crate::{ 5 | common::{Error, Options, Result}, 6 | // linux_common::is_secure_boot, 7 | stage1::{ 8 | defs::{DeviceType, DEV_TYPE_GEN_AMD64, DEV_TYPE_GEN_X86_64, DEV_TYPE_INTEL_NUC}, 9 | device::Device, 10 | utils::is_secure_boot, 11 | }, 12 | }; 13 | 14 | const X86_SLUGS: [&str; 3] = [DEV_TYPE_INTEL_NUC, DEV_TYPE_GEN_X86_64, DEV_TYPE_GEN_AMD64]; 15 | 16 | pub(crate) struct IntelNuc; 17 | 18 | impl IntelNuc { 19 | pub fn from_config(opts: &Options) -> Result { 20 | const SUPPORTED_OSSES: &[&str] = &[ 21 | "Ubuntu 20.04 LTS", 22 | "Ubuntu 18.04.4 LTS", 23 | "Ubuntu 18.04.3 LTS", 24 | "Ubuntu 18.04.2 LTS", 25 | "Ubuntu 16.04.2 LTS", 26 | "Ubuntu 16.04.6 LTS", 27 | "Ubuntu 14.04.2 LTS", 28 | "Ubuntu 14.04.5 LTS", 29 | "Ubuntu 14.04.6 LTS", 30 | "Manjaro Linux", 31 | "balenaOS 4.0.23", 32 | ]; 33 | 34 | if opts.migrate() { 35 | if !check_os(SUPPORTED_OSSES, opts, "Generic x86_64/Intel Nuc")? { 36 | return Err(Error::displayed()); 37 | } 38 | 39 | // ********************************************************************** 40 | // ** AMD64 specific initialisation/checks 41 | // ********************************************************************** 42 | 43 | let secure_boot = is_secure_boot()?; 44 | info!( 45 | "Secure boot is {}enabled", 46 | if secure_boot { "" } else { "not " } 47 | ); 48 | 49 | if secure_boot { 50 | error!( 51 | "{} does not currently support systems with secure boot enabled.", 52 | env!("CARGO_PKG_NAME") 53 | ); 54 | return Err(Error::displayed()); 55 | } 56 | } 57 | Ok(IntelNuc) 58 | } 59 | } 60 | 61 | impl Device for IntelNuc { 62 | fn supports_device_type(&self, dev_type: &str) -> bool { 63 | X86_SLUGS.contains(&dev_type) 64 | } 65 | fn get_device_type(&self) -> DeviceType { 66 | DeviceType::IntelNuc 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/stage1/device_impl/jetson_xavier.rs: -------------------------------------------------------------------------------- 1 | use crate::stage1::device_impl::check_os; 2 | use crate::{ 3 | common::{Error, Options, Result}, 4 | // linux_common::is_secure_boot, 5 | stage1::{ 6 | defs::{ 7 | DeviceType, DEV_TYPE_JETSON_XAVIER, DEV_TYPE_JETSON_XAVIER_NX, 8 | DEV_TYPE_JETSON_XAVIER_NX_EMMC, 9 | }, 10 | device::Device, 11 | }, 12 | }; 13 | use log::{debug, trace}; 14 | 15 | pub(crate) fn is_jetson_xavier( 16 | opts: &Options, 17 | model_string: &str, 18 | ) -> Result>> { 19 | trace!( 20 | "JetsonXavier::is_jetson_xavier: entered with model string: '{}'", 21 | model_string 22 | ); 23 | 24 | // Below strings have been taken from the AGX Xavier 8GB Devkit and the Xavier NX SD/eMMC Devkits 25 | if model_string.eq("Jetson-AGX") { 26 | debug!("match found for Xavier AGX"); 27 | Ok(Some(Box::new(JetsonXavier::from_config(opts)?))) 28 | } else if model_string.eq("NVIDIA Jetson Xavier NX Developer Kit") { 29 | debug!("match found for Xavier NX Devkit"); 30 | Ok(Some(Box::new(JetsonXavierNX::from_config(opts)?))) 31 | } else { 32 | debug!( 33 | "no match for Jetson-AGX or NVIDIA Jetson Xavier NX Developer Kit on: <{}>", 34 | model_string 35 | ); 36 | Ok(None) 37 | } 38 | } 39 | 40 | const XAVIER_AGX_SLUGS: [&str; 1] = [DEV_TYPE_JETSON_XAVIER]; 41 | const XAVIER_NX_SLUGS: [&str; 2] = [DEV_TYPE_JETSON_XAVIER_NX, DEV_TYPE_JETSON_XAVIER_NX_EMMC]; 42 | 43 | pub(crate) struct JetsonXavier; 44 | 45 | impl JetsonXavier { 46 | pub fn from_config(opts: &Options) -> Result { 47 | const SUPPORTED_OSSES: &[&str] = &["balenaOS 5.1.20", "balenaOS 3.1.3+rev1"]; 48 | 49 | if opts.migrate() && !check_os(SUPPORTED_OSSES, opts, "balenaOS 5.1.20")? { 50 | return Err(Error::displayed()); 51 | } 52 | // ********************************************************************** 53 | // ** Xavier AGX specific initialisation/checks 54 | // ********************************************************************** 55 | Ok(JetsonXavier) 56 | } 57 | } 58 | 59 | impl Device for JetsonXavier { 60 | fn supports_device_type(&self, dev_type: &str) -> bool { 61 | XAVIER_AGX_SLUGS.contains(&dev_type) 62 | } 63 | fn get_device_type(&self) -> DeviceType { 64 | DeviceType::JetsonXavier 65 | } 66 | } 67 | 68 | pub(crate) struct JetsonXavierNX; 69 | 70 | impl JetsonXavierNX { 71 | pub fn from_config(opts: &Options) -> Result { 72 | const SUPPORTED_OSSES: &[&str] = &["balenaOS 5.1.20", "balenaOS 3.1.3+rev1"]; 73 | 74 | if opts.migrate() && !check_os(SUPPORTED_OSSES, opts, "balenaOS 5.1.20")? { 75 | return Err(Error::displayed()); 76 | } 77 | // ********************************************************************** 78 | // ** Xavier NX (SD and eMMC) specific initialisation/checks 79 | // ********************************************************************** 80 | Ok(JetsonXavierNX) 81 | } 82 | } 83 | 84 | impl Device for JetsonXavierNX { 85 | fn supports_device_type(&self, dev_type: &str) -> bool { 86 | XAVIER_NX_SLUGS.contains(&dev_type) 87 | } 88 | fn get_device_type(&self) -> DeviceType { 89 | DeviceType::JetsonXavierNX 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /src/stage1/device_impl/raspberrypi.rs: -------------------------------------------------------------------------------- 1 | use log::{debug, error, info}; 2 | use regex::Regex; 3 | 4 | use crate::stage1::device_impl::check_os; 5 | use crate::{ 6 | common::{options::Options, Error, ErrorKind, Result}, 7 | stage1::{ 8 | defs::{DeviceType, DEV_TYPE_RPI1, DEV_TYPE_RPI2, DEV_TYPE_RPI3, DEV_TYPE_RPI4_64}, 9 | device::Device, 10 | }, 11 | }; 12 | 13 | // Pi Zero W: "Raspberry Pi Zero W Rev 1.1" 14 | // Balena Fin: "Raspberry Pi Compute Module 3 Plus Rev 1.0" 15 | // RPI 4: "Raspberry Pi 4 Model B Rev 1.1" 16 | // RPI 2: "Raspberry Pi 2 Model B Rev 1.1" 17 | // RPI Zero W: "Raspberry Pi Zero W Rev 1.1" 18 | const RPI_MODEL_REGEX: &str = r#"^Raspberry\s+Pi\s+(1|2|3|4|Compute Module 3|Zero)\s+(Model\s+(\S+)|W|Plus)\s+(Rev\s+(\S+))$"#; 19 | const RPI1_SLUGS: [&str; 1] = [DEV_TYPE_RPI1]; 20 | const RPI2_SLUGS: [&str; 1] = [DEV_TYPE_RPI2]; 21 | const RPI3_SLUGS: [&str; 1] = [DEV_TYPE_RPI3]; 22 | const RPI4_64_SLUGS: [&str; 1] = [DEV_TYPE_RPI4_64]; 23 | 24 | const SUPPORTED_OSSES: [&str; 4] = [ 25 | "Raspbian GNU/Linux 8 (jessie)", 26 | "Raspbian GNU/Linux 9 (stretch)", 27 | "Raspbian GNU/Linux 10 (buster)", 28 | "Ubuntu 20.04 LTS", 29 | ]; 30 | 31 | pub(crate) fn is_rpi(opts: &Options, model_string: &str) -> Result>> { 32 | debug!( 33 | "raspberrypi::is_rpi: entered with model string: '{}'", 34 | model_string 35 | ); 36 | 37 | if let Some(captures) = Regex::new(RPI_MODEL_REGEX).unwrap().captures(model_string) { 38 | let pitype = captures.get(1).unwrap().as_str(); 39 | let model = if let Some(model) = captures.get(3) { 40 | model.as_str().trim_matches(char::from(0)) 41 | } else { 42 | captures 43 | .get(2) 44 | .unwrap() 45 | .as_str() 46 | .trim_matches(char::from(0)) 47 | }; 48 | 49 | let revision = captures 50 | .get(5) 51 | .unwrap() 52 | .as_str() 53 | .trim_matches(char::from(0)); 54 | 55 | debug!( 56 | "raspberrypi::is_rpi: selection entered with string: '{}'", 57 | pitype 58 | ); 59 | 60 | match pitype { 61 | "1" | "Zero" => { 62 | info!("Identified RaspberryPi 1/Zero",); 63 | Ok(Some(Box::new(RaspberryPi1::from_config(opts)?))) 64 | } 65 | "2" => { 66 | info!("Identified RaspberryPi 2",); 67 | Ok(Some(Box::new(RaspberryPi2::from_config(opts)?))) 68 | } 69 | "3" | "Compute Module 3" => { 70 | info!("Identified RaspberryPi 3"); 71 | Ok(Some(Box::new(RaspberryPi3::from_config(opts)?))) 72 | } 73 | "4" => { 74 | info!("Identified RaspberryPi 4"); 75 | Ok(Some(Box::new(RaspberryPi4_64::from_config(opts)?))) 76 | } 77 | _ => { 78 | debug!("unknown PI type: '{}'", pitype); 79 | let message = format!("The raspberry pi type reported by your device ('{} {} rev {}') is not supported by balena-migrate", pitype, model, revision); 80 | error!("{}", message); 81 | Err(Error::with_context(ErrorKind::InvParam, &message)) 82 | } 83 | } 84 | } else { 85 | debug!("no match for Raspberry PI on: {}", model_string); 86 | Ok(None) 87 | } 88 | } 89 | 90 | pub(crate) struct RaspberryPi1; 91 | impl RaspberryPi1 { 92 | pub fn from_config(opts: &Options) -> Result { 93 | if opts.migrate() && !check_os(&SUPPORTED_OSSES, opts, "Raspberry PI 1")? { 94 | return Err(Error::displayed()); 95 | } 96 | 97 | Ok(RaspberryPi1 {}) 98 | } 99 | } 100 | 101 | impl Device for RaspberryPi1 { 102 | fn supports_device_type(&self, dev_type: &str) -> bool { 103 | RPI1_SLUGS.contains(&dev_type) 104 | } 105 | 106 | fn get_device_type(&self) -> DeviceType { 107 | DeviceType::RaspberryPi1 108 | } 109 | } 110 | 111 | pub(crate) struct RaspberryPi2; 112 | impl RaspberryPi2 { 113 | pub fn from_config(opts: &Options) -> Result { 114 | if opts.migrate() && !check_os(&SUPPORTED_OSSES, opts, "Raspberry PI 2")? { 115 | return Err(Error::displayed()); 116 | } 117 | 118 | Ok(RaspberryPi2 {}) 119 | } 120 | } 121 | 122 | impl Device for RaspberryPi2 { 123 | fn supports_device_type(&self, dev_type: &str) -> bool { 124 | RPI2_SLUGS.contains(&dev_type) 125 | } 126 | 127 | fn get_device_type(&self) -> DeviceType { 128 | DeviceType::RaspberryPi2 129 | } 130 | } 131 | 132 | pub(crate) struct RaspberryPi3; 133 | 134 | impl RaspberryPi3 { 135 | pub fn from_config(opts: &Options) -> Result { 136 | if opts.migrate() && !check_os(&SUPPORTED_OSSES, opts, "Raspberry PI 3")? { 137 | return Err(Error::displayed()); 138 | } 139 | 140 | Ok(RaspberryPi3) 141 | } 142 | } 143 | 144 | impl Device for RaspberryPi3 { 145 | fn supports_device_type(&self, dev_type: &str) -> bool { 146 | RPI3_SLUGS.contains(&dev_type) 147 | } 148 | 149 | fn get_device_type(&self) -> DeviceType { 150 | DeviceType::RaspberryPi3 151 | } 152 | } 153 | 154 | pub(crate) struct RaspberryPi4_64; 155 | 156 | impl RaspberryPi4_64 { 157 | pub fn from_config(opts: &Options) -> Result { 158 | if opts.migrate() && !check_os(&SUPPORTED_OSSES, opts, "Raspberry PI 4")? { 159 | return Err(Error::displayed()); 160 | } 161 | 162 | Ok(RaspberryPi4_64) 163 | } 164 | } 165 | 166 | impl Device for RaspberryPi4_64 { 167 | fn supports_device_type(&self, dev_type: &str) -> bool { 168 | RPI4_64_SLUGS.contains(&dev_type) 169 | } 170 | 171 | fn get_device_type(&self) -> DeviceType { 172 | DeviceType::RaspberryPi4 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /src/stage1/exe_copy.rs: -------------------------------------------------------------------------------- 1 | use crate::common::system::stat; 2 | use crate::common::{dir_exists, path_append, whereis, Error, ErrorKind, Result, ToError}; 3 | 4 | use lddtree::DependencyAnalyzer; 5 | use log::{debug, error, info, trace}; 6 | use std::collections::HashSet; 7 | use std::fs::{copy, create_dir, create_dir_all, read_link}; 8 | use std::path::{Path, PathBuf}; 9 | 10 | pub(crate) struct ExeCopy { 11 | req_space: u64, 12 | libraries: HashSet, 13 | executables: HashSet, 14 | } 15 | 16 | impl ExeCopy { 17 | pub fn new(cmd_list: Vec<&str>) -> Result { 18 | trace!("new: entered with {:?}", cmd_list); 19 | 20 | let mut executables: HashSet = HashSet::new(); 21 | 22 | executables.insert( 23 | read_link("/proc/self/exe") 24 | .upstream_with_context("Failed to read link to this executable")? 25 | .to_string_lossy() 26 | .to_string(), 27 | ); 28 | 29 | for command in cmd_list { 30 | executables.insert(whereis(command).error_with_all( 31 | ErrorKind::FileNotFound, 32 | &format!("Command '{}' could not be located", command), 33 | )?); 34 | } 35 | 36 | let mut efi_files = ExeCopy { 37 | req_space: 0, 38 | libraries: HashSet::new(), 39 | executables, 40 | }; 41 | 42 | efi_files.get_libs_for()?; 43 | 44 | Ok(efi_files) 45 | } 46 | pub fn get_req_space(&self) -> u64 { 47 | self.req_space 48 | } 49 | 50 | fn get_libs_for(&mut self) -> Result<()> { 51 | trace!("get_libs_for: entered"); 52 | let mut check_libs: HashSet = HashSet::new(); 53 | 54 | for curr_path in &self.executables { 55 | let stat = stat(curr_path) 56 | .upstream_with_context(&format!("Failed to stat '{}'", curr_path))?; 57 | self.req_space += stat.st_size as u64; 58 | self.get_libs(curr_path, &mut check_libs)?; 59 | } 60 | 61 | // This goes down the dependency tree and check sub-dependencies 62 | while !check_libs.is_empty() { 63 | let mut unchecked_libs: HashSet = HashSet::new(); 64 | for curr in &check_libs { 65 | self.add_lib(curr)?; 66 | } 67 | for curr in &check_libs { 68 | self.get_libs(curr, &mut unchecked_libs)?; 69 | } 70 | check_libs = unchecked_libs; 71 | } 72 | Ok(()) 73 | } 74 | 75 | fn add_lib(&mut self, lib_path: &str) -> Result { 76 | trace!("add_lib: entered with '{}'", lib_path); 77 | match stat(lib_path) { 78 | Ok(stat) => { 79 | if self.libraries.insert(lib_path.to_owned()) { 80 | self.req_space += stat.st_size as u64; 81 | Ok(true) 82 | } else { 83 | Ok(false) 84 | } 85 | } 86 | Err(why) => Err(Error::with_all( 87 | ErrorKind::FileNotFound, 88 | &format!("The file could not be found: '{}'", lib_path), 89 | Box::new(why), 90 | )), 91 | } 92 | } 93 | 94 | fn get_libs(&self, file: &str, found: &mut HashSet) -> Result<()> { 95 | trace!("get_libs: entered with '{}'", file); 96 | let analyzer = DependencyAnalyzer::new(PathBuf::from("/")); 97 | 98 | match analyzer.analyze(file) { 99 | Ok(dependencies) => { 100 | trace!("Dependency Tree for {file}:\n {:#?}", dependencies); 101 | for (_libname, library) in dependencies.libraries.iter() { 102 | let path = library.path.to_str().unwrap(); 103 | 104 | if !self.libraries.contains(path) { 105 | found.insert(path.to_owned()); 106 | } 107 | } 108 | Ok(()) 109 | } 110 | Err(why) => { 111 | error!("Error analysing dependency for: '{}': {:?}", file, why); 112 | Err(Error::with_context( 113 | ErrorKind::Upstream, 114 | &format!("Error analysing dependency for: '{}': {:?}", file, why), 115 | )) 116 | } 117 | } 118 | } 119 | 120 | fn copy_file, P2: AsRef>(src_path: P1, takeover_dir: P2) -> Result<()> { 121 | trace!( 122 | "copy_file: entered with '{}'", 123 | takeover_dir.as_ref().display() 124 | ); 125 | let src_path = src_path.as_ref(); 126 | 127 | let dest_dir = if let Some(parent) = src_path.parent() { 128 | path_append(takeover_dir, parent) 129 | } else { 130 | takeover_dir.as_ref().to_path_buf() 131 | }; 132 | 133 | if !dir_exists(&dest_dir)? { 134 | create_dir_all(&dest_dir).upstream_with_context(&format!( 135 | "Failed to create target directory '{}'", 136 | dest_dir.display() 137 | ))?; 138 | } 139 | 140 | let dest_path = if let Some(name) = src_path.file_name() { 141 | path_append(dest_dir, name) 142 | } else { 143 | return Err(Error::with_context( 144 | ErrorKind::InvState, 145 | &format!( 146 | "Failed to extract file name from path: {}", 147 | src_path.display() 148 | ), 149 | )); 150 | }; 151 | 152 | debug!( 153 | "copy_file: copying '{}' to '{}'", 154 | src_path.display(), 155 | dest_path.display() 156 | ); 157 | copy(src_path, &dest_path).upstream_with_context(&format!( 158 | "Failed toop copy '{}' to '{}'", 159 | src_path.display(), 160 | dest_path.display() 161 | ))?; 162 | 163 | Ok(()) 164 | } 165 | 166 | pub fn copy_files>(&self, takeover_dir: P) -> Result<()> { 167 | trace!( 168 | "copy_files: entered with '{}'", 169 | takeover_dir.as_ref().display() 170 | ); 171 | let takeover_dir = takeover_dir.as_ref(); 172 | 173 | for src_path in &self.libraries { 174 | ExeCopy::copy_file(src_path, takeover_dir)?; 175 | } 176 | 177 | let dest_path = path_append(takeover_dir, "/bin"); 178 | if !dest_path.exists() { 179 | create_dir(&dest_path).upstream_with_context(&format!( 180 | "Failed to create directory '{}'", 181 | dest_path.display() 182 | ))?; 183 | } 184 | 185 | for file in &self.executables { 186 | if let Some(file_name) = PathBuf::from(file).file_name() { 187 | let dest_path = path_append(&dest_path, file_name); 188 | trace!( 189 | "copy_files: copying '{}' to '{}'", 190 | &file, 191 | dest_path.display() 192 | ); 193 | copy(file, &dest_path).upstream_with_context(&format!( 194 | "Failed to copy '{}' to '{}'", 195 | file, 196 | dest_path.display() 197 | ))?; 198 | info!("Copied '{}' to '{}'", &file, dest_path.display()); 199 | } else { 200 | return Err(Error::with_context( 201 | ErrorKind::InvState, 202 | &format!("Failed to retrieve filename from '{}'", file), 203 | )); 204 | } 205 | } 206 | 207 | Ok(()) 208 | } 209 | } 210 | -------------------------------------------------------------------------------- /src/stage1/image_retrieval.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::io::copy; 3 | use std::path::{Path, PathBuf}; 4 | 5 | use log::{debug, error, info, warn, Level}; 6 | 7 | use semver::{Version, VersionReq}; 8 | 9 | use crate::{ 10 | common::{ 11 | api_calls::{get_os_image, get_os_versions, Versions}, 12 | path_append, 13 | stream_progress::StreamProgress, 14 | Error, Options, Result, ToError, 15 | }, 16 | stage1::{ 17 | defs::{ 18 | DEV_TYPE_BBB, DEV_TYPE_BBG, DEV_TYPE_GEN_X86_64, DEV_TYPE_INTEL_NUC, 19 | DEV_TYPE_JETSON_XAVIER, DEV_TYPE_RPI1, DEV_TYPE_RPI2, DEV_TYPE_RPI3, DEV_TYPE_RPI4_64, 20 | }, 21 | migrate_info::balena_cfg_json::BalenaCfgJson, 22 | }, 23 | ErrorKind, 24 | }; 25 | 26 | const SUPPORTED_DEVICES: [&str; 9] = [ 27 | DEV_TYPE_RPI3, 28 | DEV_TYPE_RPI2, 29 | DEV_TYPE_RPI4_64, 30 | DEV_TYPE_RPI1, 31 | DEV_TYPE_INTEL_NUC, 32 | DEV_TYPE_GEN_X86_64, 33 | DEV_TYPE_BBG, 34 | DEV_TYPE_BBB, 35 | DEV_TYPE_JETSON_XAVIER, 36 | ]; 37 | 38 | fn parse_versions(versions: &Versions) -> Vec { 39 | let mut sem_vers: Vec = versions 40 | .iter() 41 | .map(|ver_str| Version::parse(ver_str)) 42 | .filter_map(|ver_res| match ver_res { 43 | Ok(version) => Some(version), 44 | Err(why) => { 45 | error!("Failed to parse version, error: {:?}", why); 46 | None 47 | } 48 | }) 49 | .collect(); 50 | sem_vers.sort(); 51 | sem_vers.reverse(); 52 | sem_vers 53 | } 54 | 55 | fn determine_version(ver_str: &str, versions: &Versions) -> Result { 56 | match ver_str { 57 | "default" => { 58 | let mut found: Option = None; 59 | for cmp_ver in parse_versions(versions) { 60 | debug!("Looking at version {}", cmp_ver); 61 | if cmp_ver.is_prerelease() { 62 | continue; 63 | } else { 64 | found = Some(cmp_ver); 65 | break; 66 | } 67 | } 68 | 69 | if let Some(found) = found { 70 | info!("Selected default version ({}) for download", found); 71 | Ok(found) 72 | } else { 73 | Err(Error::with_context( 74 | ErrorKind::InvParam, 75 | &format!("No version found for '{}'", ver_str), 76 | )) 77 | } 78 | } 79 | _ => { 80 | if ver_str.starts_with('^') || ver_str.starts_with('~') { 81 | let ver_req = VersionReq::parse(ver_str).upstream_with_context(&format!( 82 | "Failed to parse version from '{}'", 83 | ver_str 84 | ))?; 85 | let mut found: Option = None; 86 | for cmp_ver in parse_versions(versions) { 87 | if ver_req.matches(&cmp_ver) && !cmp_ver.is_prerelease() { 88 | found = Some(cmp_ver); 89 | break; 90 | } 91 | } 92 | if let Some(found) = found { 93 | info!("Selected version {} for download", found); 94 | Ok(found) 95 | } else { 96 | Err(Error::with_context( 97 | ErrorKind::InvParam, 98 | &format!("No version found for '{}'", ver_str), 99 | )) 100 | } 101 | } else { 102 | let ver_req = Version::parse(ver_str).upstream_with_context(&format!( 103 | "Failed to parse version from '{}'", 104 | ver_str 105 | ))?; 106 | 107 | let mut found: Option = None; 108 | for cmp_ver in parse_versions(versions) { 109 | if ver_req == cmp_ver 110 | && !cmp_ver.is_prerelease() 111 | && (cmp_ver.build == ver_req.build) 112 | { 113 | found = Some(cmp_ver); 114 | break; 115 | } 116 | } 117 | if let Some(found) = found { 118 | info!("Selected version {} for download", found); 119 | Ok(found) 120 | } else { 121 | Err(Error::with_context( 122 | ErrorKind::InvParam, 123 | &format!("No version found for '{}'", ver_str), 124 | )) 125 | } 126 | } 127 | } 128 | } 129 | } 130 | 131 | pub(crate) fn download_image( 132 | opts: &Options, 133 | balena_cfg: &BalenaCfgJson, 134 | work_dir: &Path, 135 | device_type: &str, 136 | version: &str, 137 | ) -> Result { 138 | if !SUPPORTED_DEVICES.contains(&device_type) { 139 | if opts.dt_check() { 140 | return Err(Error::with_context( 141 | ErrorKind::InvParam, 142 | &format!( 143 | "OS download is not supported for device type '{}', to override this check use the no-dt-check option on the command line", 144 | device_type 145 | ), 146 | )); 147 | } else { 148 | warn!( 149 | "OS download is not supported for device type '{}', proceeding due to no-dt-check option", 150 | device_type 151 | ); 152 | } 153 | } 154 | 155 | let api_key = balena_cfg.get_api_key().upstream_with_context( 156 | "Failed to retrieve api-key from config.json - unable to retrieve os-image", 157 | )?; 158 | 159 | let api_endpoint = balena_cfg.get_api_endpoint().upstream_with_context( 160 | "Failed to retrieve api-endpoint from config.json - unable to retrieve os-image", 161 | )?; 162 | 163 | let versions = get_os_versions(&api_endpoint, &api_key, device_type)?; 164 | 165 | let version = determine_version(version, &versions)?; 166 | 167 | info!( 168 | "Downloading Balena OS image, selected version is: '{}'", 169 | version.to_string() 170 | ); 171 | 172 | // TODO: extract OS image for flasher 173 | 174 | let stream = get_os_image(&api_endpoint, &api_key, device_type, &version.to_string())?; 175 | 176 | let img_file_name = path_append( 177 | work_dir, 178 | format!("balena-cloud-{}-{}.img.gz", device_type, version), 179 | ); 180 | 181 | debug!("Downloading file '{}'", img_file_name.display()); 182 | let mut file = File::create(&img_file_name).upstream_with_context(&format!( 183 | "Failed to create file: '{}'", 184 | img_file_name.display() 185 | ))?; 186 | 187 | // TODO: show progress 188 | let mut progress = StreamProgress::new(stream, 10, Level::Info, None); 189 | copy(&mut progress, &mut file).upstream_with_context(&format!( 190 | "Failed to write downloaded data to '{}'", 191 | img_file_name.display() 192 | ))?; 193 | info!( 194 | "The balena OS image was successfully written to '{}'", 195 | img_file_name.display() 196 | ); 197 | 198 | Ok(img_file_name) 199 | } 200 | 201 | #[cfg(test)] 202 | mod tests { 203 | use super::*; 204 | const VERSIONS: [&str; 6] = [ 205 | "5.1.20+rev1", 206 | "3.2.25", 207 | "3.3.0", 208 | "4.0.26+rev", 209 | "5.0.1+rev1", 210 | "0.0.0+rev60", 211 | ]; 212 | use mod_logger::Logger; 213 | 214 | #[test] 215 | fn returns_latest_version_by_default() { 216 | Logger::set_default_level(Level::Trace); 217 | 218 | let selection = "default"; 219 | debug!("Selection is {}", selection); 220 | 221 | let versions: Versions = VERSIONS.iter().map(|&s| s.to_string()).collect(); 222 | 223 | let result = determine_version(selection, &versions); 224 | assert_eq!( 225 | result.unwrap(), 226 | Version::parse("5.1.20+rev1").expect("Could not parse version") 227 | ); 228 | } 229 | 230 | #[test] 231 | fn returns_specific_version() { 232 | Logger::set_default_level(Level::Trace); 233 | let selection = "4.0.26+rev"; 234 | debug!("Selection is {}", selection); 235 | 236 | let versions: Versions = VERSIONS.iter().map(|&s| s.to_string()).collect(); 237 | 238 | let result = determine_version(selection, &versions); 239 | assert_eq!( 240 | result.unwrap(), 241 | Version::parse("4.0.26+rev").expect("Could not parse version") 242 | ); 243 | } 244 | 245 | #[test] 246 | fn returns_compatible_version() { 247 | Logger::set_default_level(Level::Trace); 248 | let selection = "^3.2"; 249 | debug!("Selection is {}", selection); 250 | 251 | let versions: Versions = VERSIONS.iter().map(|&s| s.to_string()).collect(); 252 | 253 | let result = determine_version(selection, &versions); 254 | assert_eq!( 255 | result.unwrap(), 256 | Version::parse("3.3.0").expect("Could not parse version") 257 | ); 258 | } 259 | 260 | #[test] 261 | fn returns_closest_version() { 262 | Logger::set_default_level(Level::Trace); 263 | let selection = "~3.2.8"; 264 | debug!("Selection is {}", selection); 265 | 266 | let versions: Versions = VERSIONS.iter().map(|&s| s.to_string()).collect(); 267 | 268 | let result = determine_version(selection, &versions); 269 | assert_eq!( 270 | result.unwrap(), 271 | Version::parse("3.2.25").expect("Could not parse version") 272 | ); 273 | } 274 | } 275 | -------------------------------------------------------------------------------- /src/stage1/migrate_info/balena_cfg_json.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | common::{Error, ErrorKind, Options, Result, ToError}, 3 | stage1::{device::Device, utils::check_tcp_connect}, 4 | }; 5 | 6 | use log::{debug, error, info}; 7 | use reqwest::blocking::Client; 8 | use serde_json::Value; 9 | use std::collections::HashMap; 10 | use std::fs::{File, OpenOptions}; 11 | use std::io::BufReader; 12 | use std::path::{Path, PathBuf}; 13 | 14 | #[derive(Debug, Clone)] 15 | pub(crate) struct BalenaCfgJson { 16 | config: HashMap, 17 | file: PathBuf, 18 | modified: bool, 19 | } 20 | 21 | impl BalenaCfgJson { 22 | pub fn new>(cfg_file: P) -> Result { 23 | let cfg_file = cfg_file 24 | .as_ref() 25 | .canonicalize() 26 | .upstream_with_context(&format!( 27 | "Failed to canonicalize path: '{}'", 28 | cfg_file.as_ref().display() 29 | ))?; 30 | 31 | Ok(BalenaCfgJson { 32 | config: serde_json::from_reader(BufReader::new( 33 | File::open(&cfg_file).upstream_with_context(&format!( 34 | "new: cannot open file '{}'", 35 | cfg_file.display() 36 | ))?, 37 | )) 38 | .upstream_with_context(&format!( 39 | "Failed to parse json from file '{}'", 40 | cfg_file.display() 41 | ))?, 42 | file: cfg_file, 43 | modified: false, 44 | }) 45 | } 46 | 47 | pub fn write>(&mut self, target_path: P) -> Result<()> { 48 | let target_path = target_path.as_ref(); 49 | let out_file = OpenOptions::new() 50 | .create(true) 51 | .write(true) 52 | .open(target_path) 53 | .upstream_with_context(&format!( 54 | "Failed to open file for writing: '{}'", 55 | target_path.display() 56 | ))?; 57 | 58 | serde_json::to_writer(out_file, &self.config).upstream_with_context(&format!( 59 | "Failed save modified config.json to '{}'", 60 | target_path.display() 61 | ))?; 62 | 63 | self.modified = false; 64 | self.file = target_path.canonicalize().upstream_with_context(&format!( 65 | "Failed to canonicalize path: '{}'", 66 | target_path.display() 67 | ))?; 68 | 69 | Ok(()) 70 | } 71 | 72 | pub fn check(&self, opts: &Options, device: &dyn Device) -> Result<()> { 73 | info!("Configured for fleet id: {}", self.get_app_id()?); 74 | 75 | let device_type = self.get_device_type()?; 76 | if opts.dt_check() { 77 | if !device.supports_device_type(device_type.as_str()) { 78 | error!("The device type configured in config.json ({}) is not supported by the detected device type {:?}", 79 | device_type, device.get_device_type()); 80 | return Err(Error::displayed()); 81 | } 82 | } else { 83 | info!("Device type configured in config.json is {}; skipping compatibility check due to --no-dt-check option", 84 | device_type) 85 | } 86 | 87 | if opts.api_check() { 88 | let api_endpoint = &self.get_api_endpoint()?; 89 | check_api(api_endpoint)?; 90 | } 91 | 92 | if opts.vpn_check() { 93 | let vpn_endpoint = self.get_vpn_endpoint()?; 94 | let vpn_port = self.get_vpn_port()? as u16; 95 | if let Ok(_v) = check_tcp_connect(&vpn_endpoint, vpn_port, opts.check_timeout()) { 96 | // TODO: call a command on API instead of just connecting 97 | info!("connection to vpn: {}:{} is ok", vpn_endpoint, vpn_port); 98 | } else { 99 | return Err(Error::with_context( 100 | ErrorKind::InvState, 101 | &format!( 102 | "failed to connect to vpn server @ {}:{} your device might not come online", 103 | vpn_endpoint, vpn_port 104 | ), 105 | )); 106 | } 107 | } 108 | 109 | Ok(()) 110 | } 111 | 112 | pub fn is_modified(&self) -> bool { 113 | self.modified 114 | } 115 | 116 | fn get_str_val(&self, name: &str) -> Result { 117 | if let Some(value) = self.config.get(name) { 118 | if let Some(value) = value.as_str() { 119 | Ok(value.to_string()) 120 | } else { 121 | Err(Error::with_context( 122 | ErrorKind::InvParam, 123 | &format!( 124 | "Invalid type encountered for '{}', expected String, found {:?} in config.json", 125 | name, value 126 | ), 127 | )) 128 | } 129 | } else { 130 | Err(Error::with_context( 131 | ErrorKind::NotFound, 132 | &format!("Key could not be found in config.json: '{}'", name), 133 | )) 134 | } 135 | } 136 | 137 | fn get_uint_val(&self, name: &str) -> Result { 138 | if let Some(value) = self.config.get(name) { 139 | if let Some(value) = value.as_u64() { 140 | Ok(value) 141 | } else if let Some(str_val) = value.as_str() { 142 | Ok(str_val.parse::().upstream_with_context(&format!( 143 | "Failed to parse uint value for '{}' from config.json", 144 | name 145 | ))?) 146 | } else { 147 | Err(Error::with_context( 148 | ErrorKind::InvParam, 149 | &format!( 150 | "Invalid type encountered for '{}', expected uint, found {:?}", 151 | name, value 152 | ), 153 | )) 154 | } 155 | } else { 156 | Err(Error::with_context( 157 | ErrorKind::NotFound, 158 | &format!("Key could not be found in config.json: '{}'", name), 159 | )) 160 | } 161 | } 162 | 163 | /*pub fn get_hostname(&self) -> Result { 164 | self.get_str_val("hostname") 165 | }*/ 166 | 167 | pub fn set_host_name(&mut self, hostname: &str) -> Option { 168 | self.modified = true; 169 | 170 | self.config 171 | .insert("hostname".to_string(), Value::String(hostname.to_string())) 172 | .map(|value| value.to_string()) 173 | } 174 | 175 | pub fn get_app_id(&self) -> Result { 176 | self.get_uint_val("applicationId") 177 | } 178 | 179 | pub fn get_api_key(&self) -> Result { 180 | // The API Key required can exist with key `apiKey` or in case of an already provisioned device, `deviceApikey` 181 | match self.get_str_val("apiKey") { 182 | Ok(value) => Ok(value), 183 | Err(e) => { 184 | if let ErrorKind::NotFound = e.kind() { 185 | // If the error kind is NotFound, try "deviceApiKey" 186 | match self.get_str_val("deviceApiKey") { 187 | Ok(value) => Ok(value), 188 | Err(e) => Err(e), // Propagate any errors from the second attempt 189 | } 190 | } else { 191 | Err(e) // Propagate any other kinds of errors 192 | } 193 | } 194 | } 195 | } 196 | 197 | pub fn get_api_endpoint(&self) -> Result { 198 | self.get_str_val("apiEndpoint") 199 | } 200 | 201 | fn get_vpn_endpoint(&self) -> Result { 202 | self.get_str_val("vpnEndpoint") 203 | } 204 | 205 | fn get_vpn_port(&self) -> Result { 206 | self.get_uint_val("vpnPort") 207 | } 208 | 209 | pub fn get_device_type(&self) -> Result { 210 | self.get_str_val("deviceType") 211 | } 212 | 213 | pub fn get_path(&self) -> &Path { 214 | &self.file 215 | } 216 | 217 | pub fn override_device_type(&mut self, change_to: &str) -> Option { 218 | self.modified = true; 219 | 220 | self.config 221 | .insert( 222 | "deviceType".to_string(), 223 | Value::String(change_to.to_string()), 224 | ) 225 | .map(|value| value.to_string()) 226 | } 227 | 228 | pub fn get_uuid(&self) -> Result { 229 | self.get_str_val("uuid") 230 | } 231 | } 232 | 233 | fn check_api(api_endpoint: &str) -> Result<()> { 234 | let ping_endpoint = format!("{}/ping", api_endpoint); 235 | let res = Client::builder() 236 | .build() 237 | .upstream_with_context("Failed to create https client")? 238 | .get(&ping_endpoint) 239 | .send() 240 | .upstream_with_context(&format!( 241 | "Failed to send https request url: {}", 242 | &api_endpoint 243 | ))?; 244 | debug!("Result = {:?}", res); 245 | let status = res.status(); 246 | let response = res 247 | .text() 248 | .upstream_with_context("Failed to read response")?; 249 | 250 | if status.is_success() && response.trim() == "OK" { 251 | info!("connection to api: {} is ok", &api_endpoint); 252 | Ok(()) 253 | } else { 254 | Err(Error::with_context( 255 | ErrorKind::InvState, 256 | &format!( 257 | "Got an unexpected reply from the API server @ {} : {}", 258 | &ping_endpoint, &response 259 | ), 260 | )) 261 | } 262 | } 263 | 264 | #[cfg(test)] 265 | mod tests { 266 | use super::*; 267 | use std::collections::HashMap; 268 | use std::path::PathBuf; 269 | 270 | #[test] 271 | fn test_get_api_key_found_as_api_key() { 272 | let mut config: HashMap = HashMap::new(); 273 | config.insert("apiKey".to_string(), "abcd".into()); 274 | let balena_cfg = BalenaCfgJson { 275 | config, 276 | file: PathBuf::new(), 277 | modified: false, 278 | }; 279 | assert_eq!(balena_cfg.get_api_key().unwrap(), "abcd"); 280 | } 281 | 282 | #[test] 283 | fn test_get_api_key_found_as_device_api_key() { 284 | let mut config: HashMap = HashMap::new(); 285 | config.insert("deviceApiKey".to_string(), "abcd".into()); 286 | let balena_cfg = BalenaCfgJson { 287 | config, 288 | file: PathBuf::new(), 289 | modified: false, 290 | }; 291 | assert_eq!(balena_cfg.get_api_key().unwrap(), "abcd"); 292 | } 293 | 294 | #[test] 295 | fn test_get_api_key_not_found() { 296 | let config = HashMap::new(); // No API keys present 297 | let balena_cfg = BalenaCfgJson { 298 | config, 299 | file: PathBuf::new(), 300 | modified: false, 301 | }; 302 | assert!(balena_cfg.get_api_key().is_err()); 303 | } 304 | } 305 | -------------------------------------------------------------------------------- /src/stage1/utils.rs: -------------------------------------------------------------------------------- 1 | use libc::S_IFREG; 2 | use log::info; 3 | use nix::mount::{mount, MsFlags}; 4 | use rand::distributions::Alphanumeric; 5 | use rand::{thread_rng, Rng}; 6 | use std::cmp::min; 7 | use std::io; 8 | use std::path::{Path, PathBuf}; 9 | 10 | use crate::{ 11 | common::{ 12 | call, 13 | defs::{MOKUTIL_CMD, NIX_NONE, SYS_EFI_DIR}, 14 | dir_exists, 15 | system::{mkdir, mknod, uname}, 16 | whereis, Error, ErrorKind, Result, ToError, 17 | }, 18 | stage1::defs::OSArch, 19 | }; 20 | 21 | use log::{error, trace, warn}; 22 | use regex::Regex; 23 | 24 | use crate::common::path_append; 25 | use crate::stage1::migrate_info::MigrateInfo; 26 | 27 | use std::fs::create_dir_all; 28 | use std::io::Read; 29 | 30 | pub(crate) fn get_os_arch() -> Result { 31 | trace!("get_os_arch: entered"); 32 | 33 | let uname_res = uname()?; 34 | let machine = uname_res.get_machine(); 35 | match machine { 36 | "x86_64" => Ok(OSArch::AMD64), 37 | "i386" => Ok(OSArch::I386), 38 | "armv7l" => Ok(OSArch::ARMHF), 39 | "armv6l" => Ok(OSArch::ARMHF), 40 | "aarch64" => Ok(OSArch::ARM64), 41 | _ => Err(Error::with_context( 42 | ErrorKind::InvParam, 43 | &format!("get_os_arch: unsupported architecture '{}'", machine), 44 | )), 45 | } 46 | } 47 | 48 | /****************************************************************** 49 | * Try to find out if secure boot is enabled using mokutil 50 | * assuming secure boot is not enabled if mokutil is absent 51 | ******************************************************************/ 52 | 53 | pub(crate) fn is_secure_boot() -> Result { 54 | trace!("is_secure_boot: entered"); 55 | 56 | // TODO: check for efi vars 57 | 58 | if dir_exists(SYS_EFI_DIR)? { 59 | let mokutil_path = match whereis(MOKUTIL_CMD) { 60 | Ok(path) => path, 61 | Err(_why) => { 62 | warn!("The mokutil command '{}' could not be found", MOKUTIL_CMD); 63 | return Ok(false); 64 | } 65 | }; 66 | 67 | let cmd_res = call(&mokutil_path, &["--sb-state"], true)?; 68 | if cmd_res.stderr.is_empty() { 69 | let regex = Regex::new(r"^SecureBoot\s+(disabled|enabled)$").unwrap(); 70 | let lines = cmd_res.stdout.lines(); 71 | for line in lines { 72 | if let Some(cap) = regex.captures(line) { 73 | if cap.get(1).unwrap().as_str() == "enabled" { 74 | return Ok(true); 75 | } else { 76 | return Ok(false); 77 | } 78 | } 79 | } 80 | 81 | error!( 82 | "is_secure_boot: failed to parse command output: '{}'", 83 | cmd_res.stdout 84 | ); 85 | Err(Error::with_context( 86 | ErrorKind::InvParam, 87 | "is_secure_boot: failed to parse command output", 88 | )) 89 | } else if cmd_res 90 | .stderr 91 | .starts_with("This system doesn't support Secure Boot") 92 | { 93 | Ok(false) 94 | } else { 95 | Err(Error::with_context( 96 | ErrorKind::ExecProcess, 97 | &format!("mokutil returned an error message: '{}'", cmd_res.stderr), 98 | )) 99 | } 100 | } else { 101 | Ok(false) 102 | } 103 | } 104 | 105 | pub(crate) fn mktemp>( 106 | dir: bool, 107 | prefix: Option<&str>, 108 | suffix: Option<&str>, 109 | path: Option

, 110 | ) -> Result { 111 | loop { 112 | let mut file_name = String::new(); 113 | if let Some(prefix) = prefix { 114 | file_name.push_str(prefix); 115 | } 116 | file_name.push_str( 117 | thread_rng() 118 | .sample_iter(&Alphanumeric) 119 | .take(10) 120 | .collect::() 121 | .as_str(), 122 | ); 123 | if let Some(suffix) = suffix { 124 | file_name.push_str(suffix); 125 | } 126 | 127 | let new_path = if let Some(path) = &path { 128 | path_append(path.as_ref(), file_name.as_str()) 129 | } else { 130 | path_append("/tmp", file_name.as_str()) 131 | }; 132 | 133 | match if dir { 134 | mkdir(new_path.as_path(), 0o755) 135 | } else { 136 | mknod(new_path.as_path(), S_IFREG | 0o755, 0) 137 | } { 138 | Ok(_) => return Ok(new_path), 139 | Err(why) => { 140 | if why.kind() != ErrorKind::FileExists { 141 | return Err(Error::with_cause(ErrorKind::Upstream, Box::new(why))); 142 | } 143 | } 144 | } 145 | } 146 | } 147 | 148 | pub(crate) fn check_tcp_connect(host: &str, port: u16, timeout: u64) -> Result<()> { 149 | use std::net::{Shutdown, TcpStream, ToSocketAddrs}; 150 | use std::time::Duration; 151 | let url = format!("{}:{}", host, port); 152 | let mut addrs_iter = url.to_socket_addrs().upstream_with_context(&format!( 153 | "check_tcp_connect: failed to resolve host address: '{}'", 154 | url 155 | ))?; 156 | 157 | if let Some(ref sock_addr) = addrs_iter.next() { 158 | let tcp_stream = TcpStream::connect_timeout(sock_addr, Duration::from_secs(timeout)) 159 | .upstream_with_context(&format!( 160 | "check_tcp_connect: failed to connect to: '{}' with timeout: {}", 161 | url, timeout 162 | ))?; 163 | 164 | let _res = tcp_stream.shutdown(Shutdown::Both); 165 | Ok(()) 166 | } else { 167 | Err(Error::with_context( 168 | ErrorKind::InvState, 169 | &format!( 170 | "check_tcp_connect: no results from name resolution for: '{}", 171 | url 172 | ), 173 | )) 174 | } 175 | } 176 | 177 | pub(crate) fn mount_fs>( 178 | mount_dir: P, 179 | fs: &str, 180 | fs_type: &str, 181 | mig_info: Option<&mut MigrateInfo>, 182 | ) -> Result<()> { 183 | let mount_dir = mount_dir.as_ref(); 184 | if !dir_exists(mount_dir)? { 185 | create_dir_all(mount_dir).upstream_with_context(&format!( 186 | "Failed to create mount directory '{}'", 187 | mount_dir.display() 188 | ))?; 189 | } 190 | 191 | mount( 192 | Some(fs.as_bytes()), 193 | mount_dir, 194 | Some(fs_type.as_bytes()), 195 | MsFlags::empty(), 196 | NIX_NONE, 197 | ) 198 | .upstream_with_context(&format!( 199 | "Failed to mount {} on {} with fstype {}", 200 | fs, 201 | mount_dir.display(), 202 | fs_type 203 | ))?; 204 | 205 | if let Some(mig_info) = mig_info { 206 | mig_info.add_mount(mount_dir); 207 | } 208 | 209 | info!("Mounted {} file system on '{}'", fs, mount_dir.display()); 210 | 211 | Ok(()) 212 | } 213 | 214 | pub(crate) struct ReadBuffer<'a> { 215 | buffer: &'a [u8], 216 | read_pos: usize, 217 | } 218 | 219 | impl<'a> ReadBuffer<'a> { 220 | pub fn new(buffer: &'a [u8]) -> ReadBuffer { 221 | ReadBuffer { 222 | buffer, 223 | read_pos: 0, 224 | } 225 | } 226 | } 227 | 228 | impl<'a> Read for ReadBuffer<'a> { 229 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 230 | if self.read_pos >= self.buffer.len() { 231 | Ok(0) 232 | } else { 233 | let bytes = min(buf.len(), self.buffer.len() - self.read_pos); 234 | buf[0..bytes].copy_from_slice(&self.buffer[self.read_pos..self.read_pos + bytes]); 235 | self.read_pos += bytes; 236 | Ok(bytes) 237 | } 238 | } 239 | } 240 | 241 | #[cfg(test)] 242 | mod tests { 243 | use super::*; 244 | use std::io::copy; 245 | 246 | #[test] 247 | fn test_read_buffer() { 248 | const BUFFER: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; 249 | let mut read_buffer = ReadBuffer::new(&BUFFER[..]); 250 | let mut buffer: Vec = Vec::with_capacity(16); 251 | copy(&mut read_buffer, &mut buffer).unwrap(); 252 | assert_eq!(&BUFFER[..], buffer.as_slice()); 253 | } 254 | } 255 | -------------------------------------------------------------------------------- /src/stage1/wifi_config.rs: -------------------------------------------------------------------------------- 1 | use log::{info, trace, warn}; 2 | use std::fs::{read_to_string, File}; 3 | use std::io::Write; 4 | use std::path::{Path, PathBuf}; 5 | 6 | #[cfg(target_os = "windows")] 7 | use crate::common::call; 8 | 9 | mod connmgr_parser; 10 | mod nwmgr_parser; 11 | mod wpa_parser; 12 | 13 | use crate::{ 14 | common::{dir_exists, file_exists, path_append, pidof, Result, ToError}, 15 | stage1::wifi_config::{ 16 | connmgr_parser::{parse_connmgr_config, CONNMGR_CONFIG_DIR}, 17 | nwmgr_parser::NWMGR_CONFIG_DIR, 18 | nwmgr_parser::{parse_nwmgr_config, replace_nwmgr_id}, 19 | wpa_parser::{WpaParser, WPA_CONFIG_FILE}, 20 | }, 21 | }; 22 | 23 | pub const BALENA_FILE_TAG: &str = "## created by balena-migrate"; 24 | //const NWM_CONFIG_DIR: &str = "/etc/NetworkManager/system-connections/"; 25 | 26 | const NWMGR_CONTENT: &str = r##"## created by balena-migrate 27 | [connection] 28 | id=__FILE_NAME__ 29 | type=wifi 30 | 31 | [wifi] 32 | hidden=true 33 | mode=infrastructure 34 | ssid=__SSID__ 35 | 36 | [ipv4] 37 | method=auto 38 | 39 | [ipv6] 40 | addr-gen-mode=stable-privacy 41 | method=auto 42 | "##; 43 | 44 | const NWMGR_CONTENT_PSK: &str = r##"[wifi-security] 45 | auth-alg=open 46 | key-mgmt=wpa-psk 47 | psk=__PSK__ 48 | "##; 49 | 50 | #[derive(Debug)] 51 | pub(crate) struct Params { 52 | ssid: String, 53 | psk: Option, 54 | // TODO: prepare for static config 55 | } 56 | 57 | #[derive(Debug)] 58 | pub(crate) struct NwmgrFile { 59 | ssid: String, 60 | file: PathBuf, 61 | // TODO: prepare for static config 62 | } 63 | 64 | #[derive(Debug)] 65 | pub(crate) enum WifiConfig { 66 | Params(Params), 67 | NwMgrFile(NwmgrFile), 68 | } 69 | 70 | impl<'a> WifiConfig { 71 | pub fn scan(ssid_filter: &[String]) -> Result> { 72 | trace!("WifiConfig::scan: entered with {:?}", ssid_filter); 73 | if !pidof("NetworkManager")?.is_empty() && dir_exists(NWMGR_CONFIG_DIR)? { 74 | Ok(parse_nwmgr_config(ssid_filter)?) 75 | } else if !pidof("wpa_supplicant")?.is_empty() && file_exists(WPA_CONFIG_FILE) { 76 | Ok(WpaParser::parse_config(ssid_filter)?) 77 | } else if !pidof("wpa_supplicant")?.is_empty() && dir_exists(CONNMGR_CONFIG_DIR)? { 78 | Ok(parse_connmgr_config(ssid_filter)?) 79 | } else { 80 | warn!("No supported network managers found, no wifis will be migrated"); 81 | Ok(Vec::new()) 82 | } 83 | } 84 | 85 | pub fn get_ssid(&'a self) -> &'a str { 86 | match self { 87 | WifiConfig::NwMgrFile(file) => &file.ssid, 88 | WifiConfig::Params(params) => ¶ms.ssid, 89 | } 90 | } 91 | 92 | pub(crate) fn create_nwmgr_file>( 93 | &self, 94 | base_path: P, 95 | index: u64, 96 | ) -> Result { 97 | let base_path = base_path.as_ref(); 98 | let path = path_append(base_path, format!("resin-wifi-{}", index)); 99 | 100 | info!("Creating NetworkManager file in '{}'", path.display()); 101 | let mut nwmgr_file = File::create(&path) 102 | .upstream_with_context(&format!("Failed to create file in '{}'", path.display()))?; 103 | 104 | let name = path.file_name().unwrap().to_string_lossy(); 105 | 106 | let content = match self { 107 | WifiConfig::Params(config) => { 108 | let mut content = NWMGR_CONTENT.replace("__SSID__", &config.ssid); 109 | content = content.replace("__FILE_NAME__", &name); 110 | 111 | if let Some(ref psk) = config.psk { 112 | content.push_str(&NWMGR_CONTENT_PSK.replace("__PSK__", psk)); 113 | } 114 | content 115 | } 116 | WifiConfig::NwMgrFile(nwmgr_file) => { 117 | let mut content = format!("{}\n", BALENA_FILE_TAG); 118 | 119 | content.push_str( 120 | replace_nwmgr_id( 121 | read_to_string(&nwmgr_file.file) 122 | .upstream_with_context(&format!( 123 | "Failed to read file '{}'", 124 | nwmgr_file.file.display() 125 | ))? 126 | .as_str(), 127 | &name, 128 | )? 129 | .as_str(), 130 | ); 131 | content 132 | } 133 | }; 134 | 135 | trace!("writing nwmgr file as: \n{}", content); 136 | 137 | nwmgr_file 138 | .write_all(content.as_bytes()) 139 | .upstream_with_context(&format!("failed to write new '{:?}'", path.display()))?; 140 | Ok(index) 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /src/stage1/wifi_config/connmgr_parser.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | common::{dir_exists, path_append, Error, ErrorKind, Result, ToError}, 3 | stage1::wifi_config::{Params, WifiConfig}, 4 | }; 5 | 6 | use std::fs::{read_dir, File}; 7 | use std::io::{BufRead, BufReader}; 8 | use std::path::Path; 9 | 10 | use log::{debug, info, warn}; 11 | use regex::Regex; 12 | 13 | pub(crate) const CONNMGR_CONFIG_DIR: &str = "/var/lib/connman"; 14 | 15 | struct ConnMgrParser { 16 | skip_re: Regex, 17 | param_re: Regex, 18 | } 19 | 20 | impl ConnMgrParser { 21 | fn new() -> ConnMgrParser { 22 | ConnMgrParser { 23 | skip_re: Regex::new(r##"^(\s*#.*|\s*)$"##).unwrap(), 24 | param_re: Regex::new(r#"^\s*(\S+)\s*=\s*(\S+)\s*$"#).unwrap(), 25 | } 26 | } 27 | 28 | fn parse_conmgr_file(&self, file_path: &Path) -> Result> { 29 | let mut ssid = String::from(""); 30 | let mut psk: Option = None; 31 | 32 | let file = File::open(file_path) 33 | .upstream_with_context(&format!("failed to open file {}", file_path.display()))?; 34 | 35 | for line in BufReader::new(file).lines() { 36 | match line { 37 | Ok(line) => { 38 | if self.skip_re.is_match(&line) { 39 | debug!("parse_conmgr_file: skipping line: '{}'", line); 40 | continue; 41 | } 42 | 43 | debug!("parse_conmgr_file: processing line '{}'", line); 44 | 45 | if let Some(captures) = self.param_re.captures(&line) { 46 | let param = captures.get(1).unwrap().as_str(); 47 | let value = captures.get(2).unwrap().as_str(); 48 | 49 | if param == "Name" { 50 | ssid = String::from(value); 51 | continue; 52 | } 53 | 54 | if param == "Passphrase" { 55 | psk = Some(String::from(value)); 56 | continue; 57 | } 58 | } 59 | 60 | debug!("ignoring line '{}' from '{}'", line, file_path.display()); 61 | continue; 62 | } 63 | Err(why) => { 64 | return Err(Error::from_upstream( 65 | Box::new(why), 66 | &format!("unexpected read error from {}", file_path.display()), 67 | )); 68 | } 69 | } 70 | } 71 | 72 | if !ssid.is_empty() { 73 | Ok(Some(WifiConfig::Params(Params { ssid, psk }))) 74 | } else { 75 | Ok(None) 76 | } 77 | } 78 | } 79 | 80 | pub(crate) fn parse_connmgr_config(ssid_filter: &[String]) -> Result> { 81 | let mut wifis: Vec = Vec::new(); 82 | 83 | if dir_exists(CONNMGR_CONFIG_DIR)? { 84 | let paths = read_dir(CONNMGR_CONFIG_DIR).upstream_with_context(&format!( 85 | "Failed to list directory '{}'", 86 | CONNMGR_CONFIG_DIR 87 | ))?; 88 | 89 | let parser = ConnMgrParser::new(); 90 | 91 | for path in paths { 92 | if let Ok(path) = path { 93 | let dir_path = path.path(); 94 | debug!("got path '{}'", dir_path.display()); 95 | if let Some(dir_name) = dir_path.file_name() { 96 | if dir_name.to_string_lossy().starts_with("wifi_") 97 | && dir_path.metadata().unwrap().is_dir() 98 | { 99 | debug!("examining connmgr path '{}'", dir_path.display()); 100 | let settings_path = path_append(dir_path, "settings"); 101 | if settings_path.exists() { 102 | debug!("examining connmgr path '{}'", settings_path.display()); 103 | if let Some(wifi) = parser.parse_conmgr_file(&settings_path)? { 104 | if ssid_filter.is_empty() 105 | || ssid_filter 106 | .iter() 107 | .any(|curr| curr.as_str() == wifi.get_ssid()) 108 | { 109 | wifis.push(wifi); 110 | } else { 111 | info!("ignoring wifi config for ssid: '{}'", wifi.get_ssid()); 112 | } 113 | } 114 | } 115 | } else { 116 | debug!( 117 | "no match on '{}' starts_with(wifi_): {} is_dir: {}", 118 | dir_path.display(), 119 | dir_name.to_string_lossy().starts_with("wifi_"), 120 | dir_path.metadata().unwrap().is_dir() 121 | ); 122 | } 123 | } else { 124 | warn!("Not processing invalid path '{}'", path.path().display()); 125 | } 126 | } else { 127 | return Err(Error::with_context( 128 | ErrorKind::Upstream, 129 | &format!( 130 | "Error reading entry from directory '{}'", 131 | CONNMGR_CONFIG_DIR 132 | ), 133 | )); 134 | } 135 | } 136 | } else { 137 | debug!( 138 | "WifiConfig::from_connman: directory not found: '{}'", 139 | CONNMGR_CONFIG_DIR 140 | ); 141 | } 142 | 143 | Ok(wifis) 144 | } 145 | -------------------------------------------------------------------------------- /src/stage1/wifi_config/nwmgr_parser.rs: -------------------------------------------------------------------------------- 1 | use log::{debug, warn}; 2 | use regex::Regex; 3 | use std::fs::{read_dir, read_to_string}; 4 | use std::path::Path; 5 | 6 | use crate::stage1::wifi_config::NwmgrFile; 7 | use crate::{ 8 | common::{dir_exists, Error, ErrorKind, Result, ToError}, 9 | stage1::wifi_config::WifiConfig, 10 | }; 11 | 12 | pub(crate) const NWMGR_CONFIG_DIR: &str = "/etc/NetworkManager/system-connections"; 13 | 14 | #[derive(Debug, PartialEq, Clone)] 15 | enum NwMgrSection { 16 | Connection, 17 | Wifi, 18 | Other, 19 | } 20 | 21 | enum ParseResult { 22 | TermFound, 23 | TermNoWifi, 24 | Continue, 25 | } 26 | 27 | struct ParserState { 28 | skip_re: Regex, 29 | section_re: Regex, 30 | param_re: Regex, 31 | // id_re: Regex, 32 | section: NwMgrSection, 33 | is_wifi: bool, 34 | ssid: Option, 35 | line: usize, 36 | } 37 | 38 | impl ParserState { 39 | fn new() -> ParserState { 40 | ParserState { 41 | skip_re: Regex::new(r##"^(\s*#.*|\s*)$"##).unwrap(), 42 | section_re: Regex::new(r##"^\s*\[([^]]+)]"##).unwrap(), 43 | param_re: Regex::new(r##"^\s*([^= #]+)\s*=\s*(\S.*)$"##).unwrap(), 44 | // id_re: Regex::new(r##"^\s*id\s*=.*"##).unwrap(), 45 | section: NwMgrSection::Other, 46 | line: 0, 47 | is_wifi: false, 48 | ssid: None, 49 | } 50 | } 51 | 52 | fn reset(&mut self) { 53 | self.ssid = None; 54 | self.line = 0; 55 | self.is_wifi = false; 56 | self.section = NwMgrSection::Other; 57 | } 58 | 59 | fn is_valid_ssid(ssid: &str, ssid_filter: &[String]) -> bool { 60 | ssid_filter.is_empty() || ssid_filter.iter().any(|curr| curr.as_str() == ssid) 61 | } 62 | 63 | fn parse_file>( 64 | &mut self, 65 | cfg_file: P, 66 | ssid_filter: &[String], 67 | wifis: &mut Vec, 68 | ) -> Result<()> { 69 | let cfg_file = cfg_file.as_ref(); 70 | if cfg_file.is_file() { 71 | for line in read_to_string(cfg_file) 72 | .upstream_with_context(&format!("failed to read file: '{}'", cfg_file.display()))? 73 | .lines() 74 | { 75 | match self.parse_line(line) { 76 | ParseResult::Continue => { 77 | continue; 78 | } 79 | ParseResult::TermFound => { 80 | if let Some(ssid) = self.ssid.take() { 81 | if ParserState::is_valid_ssid(ssid.as_str(), ssid_filter) { 82 | wifis.push(WifiConfig::NwMgrFile(NwmgrFile { 83 | ssid, 84 | file: cfg_file.to_path_buf(), 85 | })); 86 | } 87 | return Ok(()); 88 | } 89 | } 90 | ParseResult::TermNoWifi => { 91 | return Ok(()); 92 | } 93 | } 94 | } 95 | 96 | if self.is_wifi && self.ssid.is_some() {} 97 | } 98 | Ok(()) 99 | } 100 | 101 | fn is_id_line(&mut self, line: &str) -> bool { 102 | if let Some(captures) = self.section_re.captures(line) { 103 | self.section = match captures.get(1).unwrap().as_str() { 104 | "connection" => NwMgrSection::Connection, 105 | _ => NwMgrSection::Other, 106 | }; 107 | false 108 | } else if self.section == NwMgrSection::Connection { 109 | if let Some(captures) = self.param_re.captures(line) { 110 | captures.get(1).unwrap().as_str() == "id" 111 | } else { 112 | false 113 | } 114 | } else { 115 | false 116 | } 117 | } 118 | 119 | fn parse_line(&mut self, line: &str) -> ParseResult { 120 | self.line += 1; 121 | 122 | if self.skip_re.is_match(line) { 123 | ParseResult::Continue 124 | } else if let Some(captures) = self.section_re.captures(line) { 125 | self.section = match captures.get(1).unwrap().as_str() { 126 | "connection" => NwMgrSection::Connection, 127 | "wifi" => NwMgrSection::Wifi, 128 | _ => NwMgrSection::Other, 129 | }; 130 | ParseResult::Continue 131 | } else if let Some(captures) = self.param_re.captures(line) { 132 | let param = captures.get(1).unwrap().as_str(); 133 | let value = captures.get(2).unwrap().as_str(); 134 | 135 | match self.section { 136 | NwMgrSection::Connection => { 137 | if param == "type" { 138 | if value == "wifi" { 139 | self.is_wifi = true; 140 | if self.ssid.is_some() { 141 | ParseResult::TermFound 142 | } else { 143 | ParseResult::Continue 144 | } 145 | } else { 146 | ParseResult::TermNoWifi 147 | } 148 | } else { 149 | ParseResult::Continue 150 | } 151 | } 152 | NwMgrSection::Wifi => { 153 | if param == "ssid" { 154 | debug!("Found ssid: '{}'", value); 155 | self.ssid = Some(String::from(value)); 156 | if self.is_wifi { 157 | ParseResult::TermFound 158 | } else { 159 | ParseResult::Continue 160 | } 161 | } else { 162 | ParseResult::Continue 163 | } 164 | } 165 | NwMgrSection::Other => ParseResult::Continue, 166 | } 167 | } else { 168 | warn!("Ignoring line: '{}'", line); 169 | ParseResult::Continue 170 | } 171 | } 172 | } 173 | 174 | pub(crate) fn replace_nwmgr_id(content: &str, id: &str) -> Result { 175 | let mut res = String::new(); 176 | let mut parser = ParserState::new(); 177 | let mut found = false; 178 | for line in content.lines() { 179 | if !found && parser.is_id_line(line) { 180 | res.push_str(&format!("id={}\n", id)); 181 | found = true; 182 | continue; 183 | } 184 | res.push_str(&format!("{}\n", line)); 185 | } 186 | if found { 187 | Ok(res) 188 | } else { 189 | Err(Error::with_context( 190 | ErrorKind::InvState, 191 | "No NetworkManager connection Id found", 192 | )) 193 | } 194 | } 195 | 196 | pub(crate) fn parse_nwmgr_config(ssid_filter: &[String]) -> Result> { 197 | if dir_exists(NWMGR_CONFIG_DIR)? { 198 | let mut wifis: Vec = Vec::new(); 199 | let paths = read_dir(NWMGR_CONFIG_DIR) 200 | .upstream_with_context(&format!("Failed to list directory '{}'", NWMGR_CONFIG_DIR))?; 201 | 202 | let mut parser = ParserState::new(); 203 | for dir_entry in paths { 204 | match dir_entry { 205 | Ok(dir_entry) => { 206 | parser.parse_file(dir_entry.path(), ssid_filter, &mut wifis)?; 207 | parser.reset(); 208 | } 209 | Err(why) => { 210 | return Err(Error::with_all( 211 | ErrorKind::InvParam, 212 | &format!("Failed to read directory entry of '{}',", NWMGR_CONFIG_DIR,), 213 | Box::new(why), 214 | )); 215 | } 216 | } 217 | } 218 | Ok(wifis) 219 | } else { 220 | Err(Error::with_context( 221 | ErrorKind::FileNotFound, 222 | &format!( 223 | "Network manager configuration directory could not be found: '{}'", 224 | NWMGR_CONFIG_DIR 225 | ), 226 | )) 227 | } 228 | } 229 | -------------------------------------------------------------------------------- /src/stage1/wifi_config/wpa_parser.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | common::{Error, Result, ToError}, 3 | stage1::wifi_config::{Params, WifiConfig}, 4 | ErrorKind, 5 | }; 6 | 7 | use regex::Regex; 8 | 9 | use log::{debug, info, trace, warn}; 10 | use std::fs::File; 11 | use std::io::{BufRead, BufReader}; 12 | use std::path::Path; 13 | 14 | use crate::common::file_exists; 15 | 16 | pub(crate) const WPA_CONFIG_FILE: &str = "/etc/wpa_supplicant/wpa_supplicant.conf"; 17 | 18 | #[derive(Debug, PartialEq, Clone)] 19 | enum WpaState { 20 | Init, 21 | Network, 22 | } 23 | 24 | pub(crate) struct WpaParser<'a> { 25 | ssid_filter: &'a [String], 26 | skip_re: Regex, 27 | net_start_re: Regex, 28 | net_param1_re: Regex, 29 | net_param2_re: Regex, 30 | net_end_re: Regex, 31 | state: WpaState, 32 | last_state: WpaState, 33 | ssid: Option, 34 | psk: Option, 35 | } 36 | 37 | impl<'a> WpaParser<'a> { 38 | pub fn parse_config(ssid_filter: &[String]) -> Result> { 39 | trace!("from_wpa: entered with {:?}", ssid_filter); 40 | 41 | if file_exists(WPA_CONFIG_FILE) { 42 | debug!("parse_config: scanning '{}'", WPA_CONFIG_FILE); 43 | 44 | let mut parser = WpaParser::new(ssid_filter); 45 | parser.parse_file(WPA_CONFIG_FILE) 46 | } else { 47 | Err(Error::with_context( 48 | ErrorKind::FileNotFound, 49 | &format!("parse_config: file not found: '{}'", WPA_CONFIG_FILE), 50 | )) 51 | } 52 | } 53 | 54 | pub fn new(ssid_filter: &'a [String]) -> WpaParser { 55 | WpaParser { 56 | ssid_filter, 57 | skip_re: Regex::new(r##"^(\s*#.*|\s*)$"##).unwrap(), 58 | net_start_re: Regex::new(r#"^\s*network\s*=\s*\{\s*$"#).unwrap(), 59 | net_param1_re: Regex::new(r#"^\s*(\S+)\s*=\s*"([^"]+)"\s*$"#).unwrap(), 60 | net_param2_re: Regex::new(r#"^\s*(\S+)\s*=\s*(\S+)\s*$"#).unwrap(), 61 | net_end_re: Regex::new(r#"^\s*}\s*$"#).unwrap(), 62 | state: WpaState::Init, 63 | last_state: WpaState::Init, 64 | ssid: None, 65 | psk: None, 66 | } 67 | } 68 | 69 | pub fn parse_file>(&mut self, wpa_path: P) -> Result> { 70 | let mut wifis: Vec = Vec::new(); 71 | let wpa_path = wpa_path.as_ref(); 72 | let file = File::open(wpa_path) 73 | .upstream_with_context(&format!("failed to open file {}", wpa_path.display()))?; 74 | 75 | for line in BufReader::new(file).lines() { 76 | if self.last_state != self.state { 77 | debug!("parse_file: {:?} -> {:?}", self.last_state, self.state); 78 | self.last_state = self.state.clone() 79 | } 80 | 81 | match line { 82 | Ok(line) => { 83 | if self.skip_re.is_match(&line) { 84 | debug!("skipping line: '{}'", line); 85 | continue; 86 | } 87 | 88 | debug!("parse_file: processing line '{}'", line); 89 | match self.state { 90 | WpaState::Init => { 91 | self.in_init_state(&line); 92 | } 93 | WpaState::Network => self.in_network_state(&line, &mut wifis), 94 | } 95 | } 96 | Err(why) => { 97 | return Err(Error::from_upstream( 98 | Box::new(why), 99 | &format!("unexpected read error from {}", wpa_path.display()), 100 | )); 101 | } 102 | } 103 | } 104 | Ok(wifis) 105 | } 106 | 107 | fn init_state(&mut self) { 108 | self.state = WpaState::Init; 109 | self.last_state = WpaState::Init; 110 | self.ssid = None; 111 | self.psk = None; 112 | } 113 | 114 | fn in_init_state(&mut self, line: &str) { 115 | if self.skip_re.is_match(line) { 116 | debug!("skipping line: '{}'", line); 117 | return; 118 | } 119 | 120 | if self.net_start_re.is_match(line) { 121 | self.state = WpaState::Network; 122 | } else { 123 | warn!("skipping line '{}' in state {:?} ", &line, self.state); 124 | } 125 | } 126 | 127 | fn in_network_state(&mut self, line: &str, wifis: &mut Vec) { 128 | if self.skip_re.is_match(line) { 129 | debug!("skipping line: '{}'", line); 130 | return; 131 | } 132 | 133 | if self.net_end_re.is_match(line) { 134 | self.end_network(wifis); 135 | return; 136 | } 137 | 138 | let mut captures = self.net_param1_re.captures(line); 139 | if captures.is_none() { 140 | captures = self.net_param2_re.captures(line) 141 | } 142 | 143 | if let Some(captures) = captures { 144 | if !self.set_wpa_param( 145 | captures.get(1).unwrap().as_str(), 146 | captures.get(2).unwrap().as_str(), 147 | ) { 148 | debug!("in state {:?} ignoring line '{}'", self.state, line); 149 | } 150 | } else { 151 | warn!("in state {:?} ignoring line '{}'", self.state, line); 152 | } 153 | } 154 | 155 | fn end_network(&mut self, wifis: &mut Vec) { 156 | debug!("in state {:?} found end of network", self.state); 157 | 158 | if let Some(ssid) = self.ssid.take() { 159 | // TODO: check if ssid is in filter list 160 | 161 | let mut valid = self.ssid_filter.is_empty(); 162 | if !valid { 163 | if let Some(_pos) = self.ssid_filter.iter().position(|r| r.as_str() == ssid) { 164 | valid = true; 165 | } 166 | } 167 | 168 | if valid { 169 | if let Some(_pos) = wifis.iter().position(|r| r.get_ssid() == ssid) { 170 | debug!("Network '{}' is already contained in wifi list, skipping duplicate definition", ssid); 171 | } else { 172 | wifis.push(WifiConfig::Params(Params { 173 | ssid, 174 | psk: self.psk.take(), 175 | })); 176 | } 177 | } else { 178 | info!("ignoring wifi config for ssid: '{}'", ssid); 179 | } 180 | } else { 181 | warn!("empty network config encountered"); 182 | } 183 | 184 | self.init_state(); 185 | } 186 | 187 | fn set_wpa_param(&mut self, param: &str, value: &str) -> bool { 188 | match param { 189 | "ssid" => { 190 | debug!("in state {:?} set ssid to '{}'", self.state, value); 191 | self.ssid = Some(String::from(value)); 192 | true 193 | } 194 | "psk" => { 195 | debug!("in state {:?} set psk to '{}'", self.state, value); 196 | self.psk = Some(String::from(value)); 197 | true 198 | } 199 | _ => false, 200 | } 201 | } 202 | } 203 | -------------------------------------------------------------------------------- /test_data/part.img.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/balena-os/takeover/27d761781144004720a5547c37a147fd03a99174/test_data/part.img.gz -------------------------------------------------------------------------------- /versionist.conf.js: -------------------------------------------------------------------------------- 1 | var execSync = require('child_process').execSync; 2 | 3 | var getAuthor = (commitHash) => { 4 | return execSync(`git show --quiet --format="%an" ${commitHash}`, { encoding: 'utf8' }).replace('\n', ''); 5 | } 6 | 7 | module.exports = { 8 | // This setup allows the editing and parsing of footer tags to get version and type information, 9 | // as well as ensuring tags of the type 'v..' are used. 10 | // It increments in a semver compatible fashion and allows the updating of NPM package info. 11 | editChangelog: true, 12 | parseFooterTags: true, 13 | getGitReferenceFromVersion: 'v-prefix', 14 | incrementVersion: 'semver', 15 | updateVersion: 'cargo', 16 | 17 | // Always add the entry to the top of the Changelog, below the header. 18 | addEntryToChangelog: { 19 | preset: 'prepend', 20 | fromLine: 6 21 | }, 22 | 23 | // Only include a commit when there is a footer tag of 'change-type'. 24 | // Ensures commits which do not up versions are not included. 25 | includeCommitWhen: (commit) => { 26 | return !!commit.footer['change-type']; 27 | }, 28 | 29 | // Determine the type from 'change-type:' tag. 30 | // Should no explicit change type be made, then no changes are assumed. 31 | getIncrementLevelFromCommit: (commit) => { 32 | if (commit.footer['change-type']) { 33 | return commit.footer['change-type'].trim(); 34 | } 35 | }, 36 | 37 | // If a 'changelog-entry' tag is found, use this as the subject rather than the 38 | // first line of the commit. 39 | transformTemplateData: (data) => { 40 | data.commits.forEach((commit) => { 41 | commit.subject = commit.footer['changelog-entry'] || commit.subject; 42 | commit.author = getAuthor(commit.hash); 43 | }); 44 | 45 | return data; 46 | }, 47 | 48 | template: [ 49 | '## v{{version}} - {{moment date "Y-MM-DD"}}', 50 | '', 51 | '{{#each commits}}', 52 | '{{#if this.author}}', 53 | '* {{capitalize this.subject}} [{{this.author}}]', 54 | '{{else}}', 55 | '* {{capitalize this.subject}}', 56 | '{{/if}}', 57 | '{{/each}}' 58 | ].join('\n') 59 | }; 60 | --------------------------------------------------------------------------------