├── .gitattributes ├── .github ├── dependabot.yaml └── workflows │ └── ci.yml ├── .gitignore ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── bmap-parser ├── Cargo.toml ├── src │ ├── bmap.rs │ ├── bmap │ │ └── xml.rs │ ├── discarder.rs │ └── lib.rs └── tests │ ├── copy.rs │ ├── data │ ├── simple.bmap │ ├── test.img.bmap │ └── test.img.gz │ └── parse.rs └── bmap-rs ├── Cargo.toml └── src └── main.rs /.gitattributes: -------------------------------------------------------------------------------- 1 | *.img filter=lfs diff=lfs merge=lfs -text 2 | *.gz filter=lfs diff=lfs merge=lfs -text 3 | -------------------------------------------------------------------------------- /.github/dependabot.yaml: -------------------------------------------------------------------------------- 1 | # Please see the documentation for all configuration options: 2 | # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 3 | --- 4 | version: 2 5 | updates: 6 | - package-ecosystem: "github-actions" 7 | directory: "/" 8 | schedule: 9 | interval: "daily" 10 | labels: 11 | - "dependencies" 12 | 13 | - package-ecosystem: "cargo" 14 | directory: "/" 15 | schedule: 16 | interval: "daily" 17 | labels: 18 | - "dependencies" 19 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches-ignore: 4 | - '*.tmp' 5 | pull_request: 6 | workflow_dispatch: 7 | name: CI 8 | 9 | env: 10 | RUST_BACKTRACE: 1 11 | 12 | jobs: 13 | check: 14 | name: cargo check 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v4 18 | - uses: dtolnay/rust-toolchain@master # avoid the tag here to prevent dependabot from updating it 19 | with: 20 | toolchain: "1.70" 21 | - run: cargo check --all-targets --all-features 22 | fmt: 23 | name: cargo fmt 24 | runs-on: ubuntu-latest 25 | steps: 26 | - uses: actions/checkout@v4 27 | - uses: dtolnay/rust-toolchain@master # avoid the tag here to prevent dependabot from updating it 28 | with: 29 | toolchain: "1.70" 30 | components: rustfmt 31 | - run: cargo fmt --all --check 32 | test: 33 | name: cargo test 34 | runs-on: ubuntu-latest 35 | steps: 36 | - uses: actions/checkout@v4 37 | with: 38 | lfs: 'true' 39 | - uses: dtolnay/rust-toolchain@master # avoid the tag here to prevent dependabot from updating it 40 | with: 41 | toolchain: "1.70" 42 | - run: cargo test --all-targets --all-features 43 | 44 | clippy: 45 | name: cargo clippy 46 | runs-on: ubuntu-latest 47 | steps: 48 | - uses: actions/checkout@v4 49 | - uses: dtolnay/rust-toolchain@master # avoid the tag here to prevent dependabot from updating it 50 | with: 51 | toolchain: "1.70" 52 | components: clippy 53 | - run: cargo clippy --all-targets --all-features -- -D warnings 54 | 55 | # Job to key success status against 56 | allgreen: 57 | name: allgreen 58 | if: always() 59 | needs: 60 | - check 61 | - fmt 62 | - test 63 | - clippy 64 | runs-on: ubuntu-latest 65 | steps: 66 | - name: Decide whether the needed jobs succeeded or failed 67 | uses: re-actors/alls-green@release/v1 68 | with: 69 | jobs: ${{ toJSON(needs) }} 70 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to bmap-rs 2 | 3 | :+1: Thank you for contributing! :+1: 4 | These are mostly guidelines, not rules. Use your best judgment and follow 5 | these guidelines when contributing to the project. 6 | 7 | ## How to Contribute 8 | 9 | - Bugs: Tracked as [GitHub issues](https://github.com/collabora/bmap-rs/issues). 10 | - Enhancements: RFE suggestions are tracked as 11 | [GitHub issues](https://github.com/collabora/bmap-rs/issues). 12 | - Code: Managed on [GitHub](https://github.com/collabora/bmap-rs) through 13 | [Pull Requests](https://github.com/collabora/bmap-rs/pulls). 14 | - All PRs should pass CI before being merged. 15 | - Maintainers should not press merge on a pull request, instead they should write `bors r+` 16 | to a PR. We use [bors](https://github.com/bors-ng/bors-ng) to ensure main branch 17 | never breaks. 18 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "bmap-parser", 4 | "bmap-rs", 5 | ] 6 | 7 | # Most of the actual work is CPU-heavy, in particular the rust sha2 8 | # implementation so use full optimisations otherwise everything will run quite 9 | # slow 10 | [profile.dev] 11 | opt-level = 3 12 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015 The Rust Project Developers 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # bmap-rs 2 | 3 | The bmap-rs project aims to implement tools related to bmap. The project is written in 4 | rust. The inspiration for it is an existing project that is written in python called 5 | [bmap-tools](https://salsa.debian.org/debian/bmap-tools). 6 | 7 | Right now the implemented function is copying system images files using bmap, which is 8 | safer and faster than regular cp ro dd. That can be used to flash images into block 9 | devices. 10 | 11 | ## Usage 12 | bmap-rs supports 1 subcommand: 13 | - "copy" - copy a file to another file using a bmap file. 14 | ```bash 15 | bmap-rs copy 16 | ``` 17 | 18 | The bmap file is automatically searched in the source directory. The recommendation is 19 | to name it as the source but with bmap extension. 20 | 21 | ## License 22 | bmap-rs is licensed under dual Apache-2.0 and MIT licenses. 23 | -------------------------------------------------------------------------------- /bmap-parser/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bmap-parser" 3 | version = "0.2.0" 4 | authors = ["Sjoerd Simons "] 5 | edition = "2018" 6 | license = "MIT AND Apache-2.0" 7 | description = "bmap-parser is a library for Rust that allows you to copy files or flash block devices safely" 8 | repository = "https://github.com/collabora/bmap-rs" 9 | readme = "../README.md" 10 | 11 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 12 | 13 | [dependencies] 14 | thiserror = "1.0.24" 15 | quick-xml = { version = "0.31.0", features = [ "serialize" ] } 16 | serde = { version = "1.0.147", features = [ "derive" ] } 17 | anyhow = { version = "1.0.40", optional = true } 18 | sha2 = { version = "0.10.6", features = [ "asm" ] } 19 | strum = { version = "0.26.1", features = [ "derive"] } 20 | digest = "0.10.5" 21 | flate2 = "1.0.20" 22 | async-trait = "0.1.58" 23 | futures = "0.3.25" 24 | -------------------------------------------------------------------------------- /bmap-parser/src/bmap.rs: -------------------------------------------------------------------------------- 1 | use strum::{Display, EnumDiscriminants, EnumString}; 2 | use thiserror::Error; 3 | mod xml; 4 | 5 | #[derive(Copy, Clone, Debug, PartialEq, Eq, EnumString, Display)] 6 | #[strum(serialize_all = "lowercase")] 7 | #[non_exhaustive] 8 | pub enum HashType { 9 | Sha256, 10 | } 11 | 12 | #[derive(Copy, Clone, Debug, PartialEq, Eq, EnumDiscriminants)] 13 | #[non_exhaustive] 14 | pub enum HashValue { 15 | Sha256([u8; 32]), 16 | } 17 | 18 | impl HashValue { 19 | pub fn to_type(&self) -> HashType { 20 | match self { 21 | HashValue::Sha256(_) => HashType::Sha256, 22 | } 23 | } 24 | 25 | pub fn as_slice(&self) -> &[u8] { 26 | match self { 27 | HashValue::Sha256(v) => v, 28 | } 29 | } 30 | } 31 | 32 | #[derive(Clone, Debug, PartialEq, Eq)] 33 | pub struct BlockRange { 34 | offset: u64, 35 | length: u64, 36 | checksum: HashValue, 37 | } 38 | 39 | impl BlockRange { 40 | pub fn checksum(&self) -> HashValue { 41 | self.checksum 42 | } 43 | 44 | pub fn offset(&self) -> u64 { 45 | self.offset 46 | } 47 | 48 | pub fn length(&self) -> u64 { 49 | self.length 50 | } 51 | } 52 | 53 | #[derive(Clone, Debug)] 54 | pub struct Bmap { 55 | image_size: u64, 56 | block_size: u64, 57 | blocks: u64, 58 | mapped_blocks: u64, 59 | checksum_type: HashType, 60 | blockmap: Vec, 61 | } 62 | 63 | impl Bmap { 64 | pub fn builder() -> BmapBuilder { 65 | BmapBuilder::default() 66 | } 67 | 68 | /// Build from a .bmap xml file 69 | pub fn from_xml(xml: &str) -> Result { 70 | xml::from_xml(xml) 71 | } 72 | 73 | /// Image size in bytes 74 | pub fn image_size(&self) -> u64 { 75 | self.image_size 76 | } 77 | 78 | /// block size in bytes 79 | pub const fn block_size(&self) -> u64 { 80 | self.block_size 81 | } 82 | 83 | /// number of blocks in the image 84 | pub fn blocks(&self) -> u64 { 85 | self.blocks 86 | } 87 | 88 | /// number of mapped blocks in the image 89 | pub fn mapped_blocks(&self) -> u64 { 90 | self.mapped_blocks 91 | } 92 | 93 | /// checksum type used 94 | pub fn checksum_type(&self) -> HashType { 95 | self.checksum_type 96 | } 97 | 98 | /// Iterator over the block map 99 | pub fn block_map(&self) -> impl ExactSizeIterator + Iterator { 100 | self.blockmap.iter() 101 | } 102 | 103 | /// Total mapped size in bytes 104 | pub fn total_mapped_size(&self) -> u64 { 105 | self.block_size * self.mapped_blocks 106 | } 107 | } 108 | 109 | #[derive(Clone, Debug, Error)] 110 | pub enum BmapBuilderError { 111 | #[error("Image size missing")] 112 | MissingImageSize, 113 | #[error("Block size missing")] 114 | MissingBlockSize, 115 | #[error("Blocks missing")] 116 | MissingBlocks, 117 | #[error("Mapped blocks missing")] 118 | MissingMappedBlocks, 119 | #[error("Checksum type missing")] 120 | MissingChecksumType, 121 | #[error("No block ranges")] 122 | NoBlockRanges, 123 | } 124 | 125 | #[derive(Clone, Debug, Default)] 126 | pub struct BmapBuilder { 127 | image_size: Option, 128 | block_size: Option, 129 | blocks: Option, 130 | checksum_type: Option, 131 | mapped_blocks: Option, 132 | blockmap: Vec, 133 | } 134 | 135 | impl BmapBuilder { 136 | pub fn image_size(&mut self, size: u64) -> &mut Self { 137 | self.image_size = Some(size); 138 | self 139 | } 140 | 141 | pub fn block_size(&mut self, block_size: u64) -> &mut Self { 142 | self.block_size = Some(block_size); 143 | self 144 | } 145 | 146 | pub fn blocks(&mut self, blocks: u64) -> &mut Self { 147 | self.blocks = Some(blocks); 148 | self 149 | } 150 | 151 | pub fn mapped_blocks(&mut self, blocks: u64) -> &mut Self { 152 | self.mapped_blocks = Some(blocks); 153 | self 154 | } 155 | 156 | pub fn checksum_type(&mut self, checksum_type: HashType) -> &mut Self { 157 | self.checksum_type = Some(checksum_type); 158 | self 159 | } 160 | 161 | pub fn add_block_range(&mut self, start: u64, end: u64, checksum: HashValue) -> &mut Self { 162 | let bs = self.block_size.expect("Blocksize needs to be set first"); 163 | let total = self.image_size.expect("Image size needs to be set first"); 164 | let offset = start * bs; 165 | let length = (total - offset).min((end - start + 1) * bs); 166 | self.add_byte_range(offset, length, checksum) 167 | } 168 | 169 | pub fn add_byte_range(&mut self, offset: u64, length: u64, checksum: HashValue) -> &mut Self { 170 | let range = BlockRange { 171 | offset, 172 | length, 173 | checksum, 174 | }; 175 | self.blockmap.push(range); 176 | self 177 | } 178 | 179 | pub fn build(self) -> Result { 180 | let image_size = self.image_size.ok_or(BmapBuilderError::MissingImageSize)?; 181 | let block_size = self.block_size.ok_or(BmapBuilderError::MissingBlockSize)?; 182 | let blocks = self.blocks.ok_or(BmapBuilderError::MissingBlocks)?; 183 | let mapped_blocks = self 184 | .mapped_blocks 185 | .ok_or(BmapBuilderError::MissingMappedBlocks)?; 186 | let checksum_type = self 187 | .checksum_type 188 | .ok_or(BmapBuilderError::MissingChecksumType)?; 189 | let blockmap = self.blockmap; 190 | 191 | Ok(Bmap { 192 | image_size, 193 | block_size, 194 | blocks, 195 | mapped_blocks, 196 | checksum_type, 197 | blockmap, 198 | }) 199 | } 200 | } 201 | 202 | #[cfg(test)] 203 | mod test { 204 | use super::*; 205 | use std::str::FromStr; 206 | 207 | #[test] 208 | fn hashes() { 209 | assert_eq!("sha256", &HashType::Sha256.to_string()); 210 | assert_eq!(HashType::Sha256, HashType::from_str("sha256").unwrap()); 211 | let h = HashValue::Sha256([0; 32]); 212 | assert_eq!(HashType::Sha256, h.to_type()); 213 | } 214 | } 215 | -------------------------------------------------------------------------------- /bmap-parser/src/bmap/xml.rs: -------------------------------------------------------------------------------- 1 | use crate::bmap::{BmapBuilder, BmapBuilderError, HashType, HashValue}; 2 | use quick_xml::de::{from_str, DeError}; 3 | use serde::Deserialize; 4 | use std::str::FromStr; 5 | use thiserror::Error; 6 | 7 | #[derive(Debug, Deserialize)] 8 | struct Range { 9 | #[serde(rename = "@chksum")] 10 | chksum: String, 11 | #[serde(rename = "$value")] 12 | range: String, 13 | } 14 | 15 | #[derive(Debug, Deserialize)] 16 | struct BlockMap { 17 | #[serde(rename = "Range")] 18 | ranges: Vec, 19 | } 20 | 21 | #[allow(dead_code)] 22 | #[derive(Debug, Deserialize)] 23 | struct Bmap { 24 | #[serde(rename = "@version")] 25 | version: String, 26 | #[serde(rename = "ImageSize")] 27 | image_size: u64, 28 | #[serde(rename = "BlockSize")] 29 | block_size: u64, 30 | #[serde(rename = "BlocksCount")] 31 | blocks_count: u64, 32 | #[serde(rename = "MappedBlocksCount")] 33 | mapped_blocks_count: u64, 34 | #[serde(rename = "ChecksumType")] 35 | checksum_type: String, 36 | #[serde(rename = "BmapFileChecksum")] 37 | bmap_file_checksum: String, 38 | #[serde(rename = "BlockMap")] 39 | block_map: BlockMap, 40 | } 41 | 42 | #[derive(Debug, Error)] 43 | pub enum XmlError { 44 | #[error("Failed to parse bmap XML: {0}")] 45 | XmlParsError(#[from] DeError), 46 | #[error("Invalid bmap file: {0}")] 47 | InvalidFIleError(#[from] BmapBuilderError), 48 | #[error("Unknown checksum type: {0}")] 49 | UnknownChecksumType(String), 50 | #[error("Invalid checksum: {0}")] 51 | InvalidChecksum(String), 52 | } 53 | 54 | const fn hexdigit_to_u8(c: u8) -> Option { 55 | match c { 56 | b'a'..=b'f' => Some(c - b'a' + 0xa), 57 | b'A'..=b'F' => Some(c - b'A' + 0xa), 58 | b'0'..=b'9' => Some(c - b'0'), 59 | _ => None, 60 | } 61 | } 62 | 63 | fn str_to_digest(s: String, digest: &mut [u8]) -> Result<(), XmlError> { 64 | let l = digest.len(); 65 | if s.len() != l * 2 { 66 | return Err(XmlError::InvalidChecksum(format!( 67 | "No enough chars: {} {}", 68 | s, 69 | s.len() 70 | ))); 71 | } 72 | 73 | for (i, chunk) in s.as_bytes().chunks(2).enumerate() { 74 | let hi = match hexdigit_to_u8(chunk[0]) { 75 | Some(v) => v, 76 | None => return Err(XmlError::InvalidChecksum(s)), 77 | }; 78 | let lo = match hexdigit_to_u8(chunk[1]) { 79 | Some(v) => v, 80 | None => return Err(XmlError::InvalidChecksum(s)), 81 | }; 82 | digest[i] = hi << 4 | lo; 83 | } 84 | 85 | Ok(()) 86 | } 87 | 88 | pub(crate) fn from_xml(xml: &str) -> Result { 89 | let b: Bmap = from_str(xml)?; 90 | let mut builder = BmapBuilder::default(); 91 | let hash_type = b.checksum_type; 92 | let hash_type = 93 | HashType::from_str(&hash_type).map_err(|_| XmlError::UnknownChecksumType(hash_type))?; 94 | builder 95 | .image_size(b.image_size) 96 | .block_size(b.block_size) 97 | .blocks(b.blocks_count) 98 | .checksum_type(hash_type) 99 | .mapped_blocks(b.mapped_blocks_count); 100 | 101 | for range in b.block_map.ranges { 102 | let mut split = range.range.trim().splitn(2, '-'); 103 | let start = match split.next() { 104 | Some(s) => s.parse().unwrap(), 105 | None => unimplemented!("woops"), 106 | }; 107 | let end = match split.next() { 108 | Some(s) => s.parse().unwrap(), 109 | None => start, 110 | }; 111 | 112 | let checksum = match hash_type { 113 | HashType::Sha256 => { 114 | let mut v = [0; 32]; 115 | str_to_digest(range.chksum, &mut v)?; 116 | HashValue::Sha256(v) 117 | } 118 | }; 119 | builder.add_block_range(start, end, checksum); 120 | } 121 | 122 | builder.build().map_err(std::convert::Into::into) 123 | } 124 | -------------------------------------------------------------------------------- /bmap-parser/src/discarder.rs: -------------------------------------------------------------------------------- 1 | use crate::{AsyncSeekForward, SeekForward}; 2 | use async_trait::async_trait; 3 | use futures::io::{AsyncRead, AsyncReadExt}; 4 | use std::io::Read; 5 | use std::io::Result as IOResult; 6 | use std::pin::Pin; 7 | use std::task::{Context, Poll}; 8 | 9 | /// Adaptor that implements SeekForward on types only implementing Read by discarding data 10 | pub struct Discarder { 11 | reader: R, 12 | } 13 | 14 | impl Discarder { 15 | pub fn new(reader: R) -> Self { 16 | Self { reader } 17 | } 18 | 19 | pub fn into_inner(self) -> R { 20 | self.reader 21 | } 22 | } 23 | 24 | impl Read for Discarder { 25 | fn read(&mut self, buf: &mut [u8]) -> IOResult { 26 | self.reader.read(buf) 27 | } 28 | } 29 | 30 | impl SeekForward for Discarder { 31 | fn seek_forward(&mut self, forward: u64) -> IOResult<()> { 32 | let mut buf = [0; 4096]; 33 | let mut left = forward as usize; 34 | while left > 0 { 35 | let toread = left.min(buf.len()); 36 | let r = self.reader.read(&mut buf[0..toread])?; 37 | left -= r; 38 | } 39 | Ok(()) 40 | } 41 | } 42 | 43 | pub struct AsyncDiscarder { 44 | reader: R, 45 | } 46 | 47 | impl AsyncDiscarder { 48 | pub fn new(reader: R) -> Self { 49 | Self { reader } 50 | } 51 | 52 | pub fn into_inner(self) -> R { 53 | self.reader 54 | } 55 | } 56 | 57 | impl AsyncRead for AsyncDiscarder { 58 | fn poll_read( 59 | mut self: Pin<&mut Self>, 60 | cx: &mut Context<'_>, 61 | buf: &mut [u8], 62 | ) -> Poll> { 63 | Pin::new(&mut self.reader).poll_read(cx, buf) 64 | } 65 | } 66 | 67 | #[async_trait] 68 | impl AsyncSeekForward for AsyncDiscarder { 69 | async fn async_seek_forward(&mut self, forward: u64) -> IOResult<()> { 70 | let mut buf = [0; 4096]; 71 | let mut left = forward as usize; 72 | while left > 0 { 73 | let toread = left.min(buf.len()); 74 | let r = self.read(&mut buf[0..toread]).await?; 75 | left -= r; 76 | } 77 | Ok(()) 78 | } 79 | } 80 | 81 | #[cfg(test)] 82 | mod test { 83 | use super::*; 84 | use std::slice; 85 | 86 | #[test] 87 | fn discard() { 88 | let mut data = Vec::with_capacity(256); 89 | for byte in 0u8..=255 { 90 | data.push(byte); 91 | } 92 | 93 | let mut discarder = Discarder::new(data.as_slice()); 94 | let _ = &[0u64, 5, 16, 31, 63, 200, 255] 95 | .iter() 96 | .fold(0, |pos, offset| { 97 | let mut byte: u8 = 1; 98 | discarder.seek_forward(offset - pos).unwrap(); 99 | assert_eq!(1, discarder.read(slice::from_mut(&mut byte)).unwrap()); 100 | assert_eq!(*offset, byte as u64); 101 | *offset + 1 102 | }); 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /bmap-parser/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod bmap; 2 | pub use crate::bmap::*; 3 | mod discarder; 4 | pub use crate::discarder::*; 5 | use async_trait::async_trait; 6 | use futures::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, AsyncWrite, AsyncWriteExt}; 7 | use futures::TryFutureExt; 8 | use sha2::{Digest, Sha256}; 9 | use thiserror::Error; 10 | 11 | use std::io::Result as IOResult; 12 | use std::io::{Read, Seek, SeekFrom, Write}; 13 | 14 | /// Trait that can only seek further forwards 15 | pub trait SeekForward { 16 | fn seek_forward(&mut self, offset: u64) -> IOResult<()>; 17 | } 18 | 19 | impl SeekForward for T { 20 | fn seek_forward(&mut self, forward: u64) -> IOResult<()> { 21 | self.seek(SeekFrom::Current(forward as i64))?; 22 | Ok(()) 23 | } 24 | } 25 | 26 | #[async_trait] 27 | pub trait AsyncSeekForward { 28 | async fn async_seek_forward(&mut self, offset: u64) -> IOResult<()>; 29 | } 30 | 31 | #[async_trait] 32 | impl AsyncSeekForward for T { 33 | async fn async_seek_forward(&mut self, forward: u64) -> IOResult<()> { 34 | self.seek(SeekFrom::Current(forward as i64)).await?; 35 | Ok(()) 36 | } 37 | } 38 | 39 | #[derive(Debug, Error)] 40 | pub enum CopyError { 41 | #[error("Failed to Read: {0}")] 42 | ReadError(std::io::Error), 43 | #[error("Failed to Write: {0}")] 44 | WriteError(std::io::Error), 45 | #[error("Checksum error")] 46 | ChecksumError, 47 | #[error("Unexpected EOF on input")] 48 | UnexpectedEof, 49 | } 50 | 51 | pub fn copy(input: &mut I, output: &mut O, map: &Bmap) -> Result<(), CopyError> 52 | where 53 | I: Read + SeekForward, 54 | O: Write + SeekForward, 55 | { 56 | let mut hasher = match map.checksum_type() { 57 | HashType::Sha256 => Sha256::new(), 58 | }; 59 | 60 | // TODO benchmark a reasonable size for this 61 | let mut v = vec![0; 8 * 1024 * 1024]; 62 | 63 | let buf = v.as_mut_slice(); 64 | let mut position = 0; 65 | for range in map.block_map() { 66 | let forward = range.offset() - position; 67 | input.seek_forward(forward).map_err(CopyError::ReadError)?; 68 | output 69 | .seek_forward(forward) 70 | .map_err(CopyError::WriteError)?; 71 | 72 | let mut left = range.length() as usize; 73 | while left > 0 { 74 | let toread = left.min(buf.len()); 75 | let r = input 76 | .read(&mut buf[0..toread]) 77 | .map_err(CopyError::ReadError)?; 78 | if r == 0 { 79 | return Err(CopyError::UnexpectedEof); 80 | } 81 | hasher.update(&buf[0..r]); 82 | output 83 | .write_all(&buf[0..r]) 84 | .map_err(CopyError::WriteError)?; 85 | left -= r; 86 | } 87 | let digest = hasher.finalize_reset(); 88 | if range.checksum().as_slice() != digest.as_slice() { 89 | return Err(CopyError::ChecksumError); 90 | } 91 | 92 | position = range.offset() + range.length(); 93 | } 94 | 95 | Ok(()) 96 | } 97 | 98 | pub async fn copy_async(input: &mut I, output: &mut O, map: &Bmap) -> Result<(), CopyError> 99 | where 100 | I: AsyncRead + AsyncSeekForward + Unpin, 101 | O: AsyncWrite + AsyncSeekForward + Unpin, 102 | { 103 | let mut hasher = match map.checksum_type() { 104 | HashType::Sha256 => Sha256::new(), 105 | }; 106 | 107 | // TODO benchmark a reasonable size for this 108 | let mut v = vec![0; 8 * 1024 * 1024]; 109 | 110 | let buf = v.as_mut_slice(); 111 | let mut position = 0; 112 | for range in map.block_map() { 113 | let forward = range.offset() - position; 114 | input 115 | .async_seek_forward(forward) 116 | .map_err(CopyError::ReadError) 117 | .await?; 118 | output.flush().map_err(CopyError::WriteError).await?; 119 | output 120 | .async_seek_forward(forward) 121 | .map_err(CopyError::WriteError) 122 | .await?; 123 | 124 | let mut left = range.length() as usize; 125 | while left > 0 { 126 | let toread = left.min(buf.len()); 127 | let r = input 128 | .read(&mut buf[0..toread]) 129 | .map_err(CopyError::ReadError) 130 | .await?; 131 | if r == 0 { 132 | return Err(CopyError::UnexpectedEof); 133 | } 134 | hasher.update(&buf[0..r]); 135 | output 136 | .write_all(&buf[0..r]) 137 | .await 138 | .map_err(CopyError::WriteError)?; 139 | left -= r; 140 | } 141 | let digest = hasher.finalize_reset(); 142 | if range.checksum().as_slice() != digest.as_slice() { 143 | return Err(CopyError::ChecksumError); 144 | } 145 | 146 | position = range.offset() + range.length(); 147 | } 148 | Ok(()) 149 | } 150 | 151 | pub fn copy_nobmap(input: &mut I, output: &mut O) -> Result<(), CopyError> 152 | where 153 | I: Read, 154 | O: Write, 155 | { 156 | std::io::copy(input, output).map_err(CopyError::WriteError)?; 157 | Ok(()) 158 | } 159 | 160 | pub async fn copy_async_nobmap(input: &mut I, output: &mut O) -> Result<(), CopyError> 161 | where 162 | I: AsyncRead + AsyncSeekForward + Unpin, 163 | O: AsyncWrite + AsyncSeekForward + Unpin, 164 | { 165 | futures::io::copy(input, output) 166 | .map_err(CopyError::WriteError) 167 | .await?; 168 | Ok(()) 169 | } 170 | -------------------------------------------------------------------------------- /bmap-parser/tests/copy.rs: -------------------------------------------------------------------------------- 1 | use bmap_parser::{Bmap, Discarder, SeekForward}; 2 | use flate2::read::GzDecoder; 3 | use sha2::{Digest, Sha256}; 4 | use std::env; 5 | use std::fs::File; 6 | use std::io::Result as IOResult; 7 | use std::io::{Error, ErrorKind, Read, Write}; 8 | use std::path::PathBuf; 9 | 10 | #[derive(Clone, Debug)] 11 | struct OutputMockRange { 12 | offset: u64, 13 | data: Vec, 14 | } 15 | 16 | impl OutputMockRange { 17 | fn new(offset: u64) -> Self { 18 | Self { 19 | offset, 20 | data: Vec::new(), 21 | } 22 | } 23 | 24 | fn write(&mut self, data: &[u8]) { 25 | self.data.extend_from_slice(data); 26 | } 27 | 28 | fn sha256(&self) -> [u8; 32] { 29 | Sha256::digest(&self.data).into() 30 | } 31 | } 32 | 33 | #[derive(Clone, Debug)] 34 | struct OutputMock { 35 | size: u64, 36 | offset: u64, 37 | ranges: Vec, 38 | } 39 | 40 | impl OutputMock { 41 | fn new(size: u64) -> Self { 42 | Self { 43 | size, 44 | offset: 0, 45 | ranges: Vec::new(), 46 | } 47 | } 48 | 49 | fn add_range(&mut self, offset: u64) -> &mut OutputMockRange { 50 | self.ranges.push(OutputMockRange::new(offset)); 51 | self.ranges.last_mut().unwrap() 52 | } 53 | 54 | fn sha256(&mut self) -> [u8; 32] { 55 | fn pad(hasher: &mut Sha256, mut topad: u64) { 56 | const ZEROES: [u8; 4096] = [0; 4096]; 57 | while topad > 0 { 58 | let len = ZEROES.len() as u64; 59 | let len = len.min(topad); 60 | hasher.update(&ZEROES[0..len as usize]); 61 | topad -= len; 62 | } 63 | } 64 | 65 | let mut hasher = Sha256::new(); 66 | let mut offset = 0; 67 | for range in self.ranges.iter() { 68 | if offset < range.offset { 69 | pad(&mut hasher, range.offset - offset); 70 | offset = range.offset; 71 | } 72 | 73 | hasher.update(&range.data); 74 | offset += range.data.len() as u64; 75 | } 76 | 77 | pad(&mut hasher, self.size - offset); 78 | 79 | hasher.finalize().into() 80 | } 81 | } 82 | 83 | impl Write for OutputMock { 84 | fn write(&mut self, data: &[u8]) -> IOResult { 85 | let maxsize = self.size as usize; 86 | let range = match self.ranges.last_mut() { 87 | Some(last) if last.offset == self.offset => last, 88 | _ => self.add_range(self.offset), 89 | }; 90 | if range.offset as usize + range.data.len() + data.len() > maxsize { 91 | return Err(Error::new(ErrorKind::Other, "Writing outside of space")); 92 | } 93 | range.write(data); 94 | Ok(data.len()) 95 | } 96 | 97 | fn flush(&mut self) -> IOResult<()> { 98 | Ok(()) 99 | } 100 | } 101 | 102 | impl SeekForward for OutputMock { 103 | fn seek_forward(&mut self, forward: u64) -> IOResult<()> { 104 | self.offset += if let Some(last) = self.ranges.last() { 105 | last.data.len() as u64 + forward 106 | } else { 107 | forward 108 | }; 109 | Ok(()) 110 | } 111 | } 112 | 113 | fn setup_data(basename: &str) -> (Bmap, impl Read + SeekForward) { 114 | let mut datadir = PathBuf::new(); 115 | datadir.push(env::var("CARGO_MANIFEST_DIR").unwrap()); 116 | datadir.push("tests/data"); 117 | 118 | let mut bmapfile = datadir.clone(); 119 | bmapfile.push(format!("{}.bmap", basename)); 120 | 121 | let mut b = 122 | File::open(&bmapfile).unwrap_or_else(|_| panic!("Failed to open bmap file:{:?}", bmapfile)); 123 | let mut xml = String::new(); 124 | b.read_to_string(&mut xml).unwrap(); 125 | let bmap = Bmap::from_xml(&xml).unwrap(); 126 | 127 | let mut datafile = datadir.clone(); 128 | datafile.push(format!("{}.gz", basename)); 129 | let g = 130 | File::open(&datafile).unwrap_or_else(|_| panic!("Failed to open data file:{:?}", datafile)); 131 | let gz = GzDecoder::new(g); 132 | let gz = Discarder::new(gz); 133 | 134 | (bmap, gz) 135 | } 136 | 137 | fn sha256_reader(mut reader: R) -> [u8; 32] { 138 | let mut buffer = [0; 4096]; 139 | let mut hasher = Sha256::new(); 140 | loop { 141 | let r = reader.read(&mut buffer).unwrap(); 142 | if r == 0 { 143 | break; 144 | } 145 | hasher.update(&buffer[0..r]); 146 | } 147 | 148 | hasher.finalize().into() 149 | } 150 | 151 | #[test] 152 | fn copy() { 153 | let (bmap, mut input) = setup_data("test.img"); 154 | let mut output = OutputMock::new(bmap.image_size()); 155 | 156 | bmap_parser::copy(&mut input, &mut output, &bmap).unwrap(); 157 | assert_eq!(bmap_parser::HashType::Sha256, bmap.checksum_type()); 158 | assert_eq!(bmap.block_map().len(), output.ranges.len()); 159 | 160 | // Assert that written ranges match the ranges in the map file 161 | for (map, range) in bmap.block_map().zip(output.ranges.iter()) { 162 | assert_eq!(map.offset(), range.offset); 163 | assert_eq!(map.length(), range.data.len() as u64); 164 | assert_eq!(map.checksum().as_slice(), range.sha256()); 165 | } 166 | 167 | let (_, mut input) = setup_data("test.img"); 168 | // Assert that the full gzipped content match the written output 169 | assert_eq!(sha256_reader(&mut input), output.sha256()) 170 | } 171 | -------------------------------------------------------------------------------- /bmap-parser/tests/data/simple.bmap: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4198400 4 | 4096 5 | 1025 6 | 680 7 | sha256 8 | 9 | 0000000000000000000000000000000000000000000000000000000000000000 10 | 11 | 12 | 0 13 | 8-16 14 | 32-64 15 | 128-256 16 | 512-1024 17 | 18 | 19 | -------------------------------------------------------------------------------- /bmap-parser/tests/data/test.img.bmap: -------------------------------------------------------------------------------- 1 | 2 | 23 | 24 | 25 | 26 | 16777216 27 | 28 | 29 | 4096 30 | 31 | 32 | 4096 33 | 34 | 35 | 1026 36 | 37 | 38 | sha256 39 | 40 | 42 | d374877d61522c62fe76f6eaad4aa9e84dc1a74575ea529a9076cfafab23ca77 43 | 44 | 47 | 48 | 256 49 | 1024-1535 50 | 2356-2484 51 | 2560-2687 52 | 3584-3839 53 | 54 | 55 | -------------------------------------------------------------------------------- /bmap-parser/tests/data/test.img.gz: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:2f4ab24d109c4a9b83232c1dd3772b76865ba03dae22f5bb54ad54880bae1ab5 3 | size 4217156 4 | -------------------------------------------------------------------------------- /bmap-parser/tests/parse.rs: -------------------------------------------------------------------------------- 1 | use bmap_parser::Bmap; 2 | use digest::Digest; 3 | use sha2::Sha256; 4 | 5 | #[test] 6 | fn parse() { 7 | let xml = include_str!("data/simple.bmap"); 8 | let bmap = Bmap::from_xml(xml).unwrap(); 9 | 10 | assert_eq!(4096, bmap.block_size()); 11 | assert_eq!(1025, bmap.blocks()); 12 | assert_eq!(1025 * 4096, bmap.image_size()); 13 | assert_eq!(680, bmap.mapped_blocks()); 14 | 15 | let mut block = 0; 16 | for range in bmap.block_map() { 17 | assert_eq!(block * 4096, range.offset()); 18 | assert_eq!((block + 1) * 4096, range.length()); 19 | 20 | let digest = Sha256::digest(format!("{}", block).as_bytes()); 21 | assert_eq!(digest.as_slice(), range.checksum().as_slice()); 22 | 23 | block = if block == 0 { 8 } else { block * 4 }; 24 | } 25 | assert_eq!(2048, block); 26 | } 27 | -------------------------------------------------------------------------------- /bmap-rs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "bmap-rs" 3 | version = "0.2.0" 4 | authors = ["Sjoerd Simons "] 5 | edition = "2018" 6 | license = "MIT AND Apache-2.0" 7 | description = "bmap-rs is an application that handles the use of bmap crate" 8 | repository = "https://github.com/collabora/bmap-rs" 9 | readme = "../README.md" 10 | 11 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 12 | 13 | [dependencies] 14 | bmap-parser = { path = "../bmap-parser", version = "0.2.0"} 15 | anyhow = "1.0.66" 16 | nix = { version = "0.27.1", features = ["fs"] } 17 | flate2 = "1.0.24" 18 | clap = { version = "~4.4.0", features = ["cargo"] } 19 | indicatif = { version = "0.17.1", features = ["tokio"] } 20 | async-compression = { version = "0.4.5", features = ["gzip", "futures-io"] } 21 | tokio = { version = "1.21.2", features = ["rt", "macros", "fs", "rt-multi-thread"] } 22 | reqwest = { version = "0.11.12", features = ["stream"] } 23 | tokio-util = { version = "0.7.4", features = ["compat"] } 24 | futures = "0.3.25" 25 | -------------------------------------------------------------------------------- /bmap-rs/src/main.rs: -------------------------------------------------------------------------------- 1 | use anyhow::{anyhow, bail, ensure, Context, Result}; 2 | use async_compression::futures::bufread::GzipDecoder; 3 | use bmap_parser::{AsyncDiscarder, Bmap, Discarder, SeekForward}; 4 | use clap::{arg, command, Arg, ArgAction, Command}; 5 | use flate2::read::GzDecoder; 6 | use futures::TryStreamExt; 7 | use indicatif::{ProgressBar, ProgressState, ProgressStyle}; 8 | use nix::unistd::ftruncate; 9 | use reqwest::{Response, Url}; 10 | use std::ffi::OsStr; 11 | use std::fmt::Write; 12 | use std::fs::File; 13 | use std::io::Read; 14 | use std::os::unix::io::AsFd; 15 | use std::path::{Path, PathBuf}; 16 | use tokio_util::compat::TokioAsyncReadCompatExt; 17 | 18 | #[derive(Debug)] 19 | enum Image { 20 | Path(PathBuf), 21 | Url(Url), 22 | } 23 | 24 | #[derive(Debug)] 25 | struct Copy { 26 | image: Image, 27 | dest: PathBuf, 28 | nobmap: bool, 29 | } 30 | 31 | #[derive(Debug)] 32 | 33 | enum Subcommand { 34 | Copy(Copy), 35 | } 36 | 37 | #[derive(Debug)] 38 | struct Opts { 39 | command: Subcommand, 40 | } 41 | 42 | impl Opts { 43 | fn parser() -> Opts { 44 | let matches = command!() 45 | .propagate_version(true) 46 | .subcommand_required(true) 47 | .arg_required_else_help(true) 48 | .subcommand( 49 | Command::new("copy") 50 | .about("Copy image to block device or file") 51 | .arg(arg!([IMAGE]).required(true)) 52 | .arg(arg!([DESTINATION]).required(true)) 53 | .arg( 54 | Arg::new("nobmap") 55 | .short('n') 56 | .long("nobmap") 57 | .action(ArgAction::SetTrue), 58 | ), 59 | ) 60 | .get_matches(); 61 | match matches.subcommand() { 62 | Some(("copy", sub_matches)) => Opts { 63 | command: Subcommand::Copy({ 64 | Copy { 65 | image: match Url::parse(sub_matches.get_one::("IMAGE").unwrap()) { 66 | Ok(url) => Image::Url(url), 67 | Err(_) => Image::Path(PathBuf::from( 68 | sub_matches.get_one::("IMAGE").unwrap(), 69 | )), 70 | }, 71 | dest: PathBuf::from(sub_matches.get_one::("DESTINATION").unwrap()), 72 | nobmap: sub_matches.get_flag("nobmap"), 73 | } 74 | }), 75 | }, 76 | _ => unreachable!( 77 | "Exhausted list of subcommands and subcommand_required prevents `None`" 78 | ), 79 | } 80 | } 81 | } 82 | 83 | fn append(path: PathBuf) -> PathBuf { 84 | let mut p = path.into_os_string(); 85 | p.push(".bmap"); 86 | p.into() 87 | } 88 | 89 | fn find_bmap(img: &Path) -> Option { 90 | let mut bmap = img.to_path_buf(); 91 | loop { 92 | bmap = append(bmap); 93 | if bmap.exists() { 94 | return Some(bmap); 95 | } 96 | 97 | // Drop .bmap 98 | bmap.set_extension(""); 99 | bmap.extension()?; 100 | // Drop existing orignal extension part 101 | bmap.set_extension(""); 102 | } 103 | } 104 | 105 | fn find_remote_bmap(mut url: Url) -> Result { 106 | let mut path = PathBuf::from(url.path()); 107 | path.set_extension("bmap"); 108 | url.set_path(path.to_str().unwrap()); 109 | Ok(url) 110 | } 111 | 112 | trait ReadSeekForward: SeekForward + Read {} 113 | impl ReadSeekForward for T {} 114 | 115 | struct Decoder { 116 | inner: Box, 117 | } 118 | 119 | impl Decoder { 120 | fn new(inner: T) -> Self { 121 | Self { 122 | inner: Box::new(inner), 123 | } 124 | } 125 | } 126 | 127 | impl Read for Decoder { 128 | fn read(&mut self, data: &mut [u8]) -> std::io::Result { 129 | self.inner.read(data) 130 | } 131 | } 132 | 133 | impl SeekForward for Decoder { 134 | fn seek_forward(&mut self, forward: u64) -> std::io::Result<()> { 135 | self.inner.seek_forward(forward) 136 | } 137 | } 138 | 139 | fn setup_local_input(path: &Path) -> Result { 140 | let f = File::open(path)?; 141 | match path.extension().and_then(OsStr::to_str) { 142 | Some("gz") => { 143 | let gz = GzDecoder::new(f); 144 | Ok(Decoder::new(Discarder::new(gz))) 145 | } 146 | _ => Ok(Decoder::new(f)), 147 | } 148 | } 149 | 150 | async fn setup_remote_input(url: Url) -> Result { 151 | match PathBuf::from(url.path()) 152 | .extension() 153 | .and_then(OsStr::to_str) 154 | { 155 | Some("gz") => reqwest::get(url).await.map_err(anyhow::Error::new), 156 | None => bail!("No file extension found"), 157 | _ => bail!("Image file format not implemented"), 158 | } 159 | } 160 | 161 | fn setup_progress_bar(bmap: &Bmap) -> ProgressBar { 162 | let pb = ProgressBar::new(bmap.total_mapped_size()); 163 | pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({eta})") 164 | .unwrap() 165 | .with_key("eta", |state: &ProgressState, w: &mut dyn Write| write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap()) 166 | .progress_chars("#>-")); 167 | pb 168 | } 169 | 170 | fn setup_spinner() -> ProgressBar { 171 | let pb = ProgressBar::new_spinner(); 172 | pb.set_style(ProgressStyle::with_template("{spinner:.green} {msg}").unwrap()); 173 | pb 174 | } 175 | 176 | fn setup_output(output: &T, bmap: &Bmap, metadata: std::fs::Metadata) -> Result<()> { 177 | if metadata.is_file() { 178 | ftruncate(output.as_fd(), bmap.image_size() as i64).context("Failed to truncate file")?; 179 | } 180 | Ok(()) 181 | } 182 | 183 | async fn copy(c: Copy) -> Result<()> { 184 | if c.nobmap { 185 | return match c.image { 186 | Image::Path(path) => copy_local_input_nobmap(path, c.dest), 187 | Image::Url(url) => copy_remote_input_nobmap(url, c.dest).await, 188 | }; 189 | } 190 | match c.image { 191 | Image::Path(path) => copy_local_input(path, c.dest), 192 | Image::Url(url) => copy_remote_input(url, c.dest).await, 193 | } 194 | } 195 | 196 | fn copy_local_input(source: PathBuf, destination: PathBuf) -> Result<()> { 197 | ensure!(source.exists(), "Image file doesn't exist"); 198 | let bmap = find_bmap(&source).ok_or_else(|| anyhow!("Couldn't find bmap file"))?; 199 | println!("Found bmap file: {}", bmap.display()); 200 | 201 | let mut b = File::open(&bmap).context("Failed to open bmap file")?; 202 | let mut xml = String::new(); 203 | b.read_to_string(&mut xml)?; 204 | 205 | let bmap = Bmap::from_xml(&xml)?; 206 | let output = std::fs::OpenOptions::new() 207 | .write(true) 208 | .create(true) 209 | .open(destination)?; 210 | 211 | setup_output(&output, &bmap, output.metadata()?)?; 212 | 213 | let mut input = setup_local_input(&source)?; 214 | let pb = setup_progress_bar(&bmap); 215 | bmap_parser::copy(&mut input, &mut pb.wrap_write(&output), &bmap)?; 216 | pb.finish_and_clear(); 217 | 218 | println!("Done: Syncing..."); 219 | output.sync_all()?; 220 | 221 | Ok(()) 222 | } 223 | 224 | async fn copy_remote_input(source: Url, destination: PathBuf) -> Result<()> { 225 | let bmap_url = find_remote_bmap(source.clone())?; 226 | 227 | let xml = reqwest::get(bmap_url.clone()).await?.text().await?; 228 | println!("Found bmap file: {}", bmap_url); 229 | 230 | let bmap = Bmap::from_xml(&xml)?; 231 | let mut output = tokio::fs::OpenOptions::new() 232 | .write(true) 233 | .create(true) 234 | .open(destination) 235 | .await?; 236 | 237 | setup_output(&output, &bmap, output.metadata().await?)?; 238 | 239 | let res = setup_remote_input(source).await?; 240 | let stream = res 241 | .bytes_stream() 242 | .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) 243 | .into_async_read(); 244 | let reader = GzipDecoder::new(stream); 245 | let mut input = AsyncDiscarder::new(reader); 246 | let pb = setup_progress_bar(&bmap); 247 | bmap_parser::copy_async( 248 | &mut input, 249 | &mut pb.wrap_async_write(&mut output).compat(), 250 | &bmap, 251 | ) 252 | .await?; 253 | pb.finish_and_clear(); 254 | 255 | println!("Done: Syncing..."); 256 | output.sync_all().await?; 257 | Ok(()) 258 | } 259 | 260 | fn copy_local_input_nobmap(source: PathBuf, destination: PathBuf) -> Result<()> { 261 | ensure!(source.exists(), "Image file doesn't exist"); 262 | 263 | let output = std::fs::OpenOptions::new() 264 | .write(true) 265 | .create(true) 266 | .open(destination)?; 267 | 268 | let mut input = setup_local_input(&source)?; 269 | 270 | let pb = setup_spinner(); 271 | bmap_parser::copy_nobmap(&mut input, &mut pb.wrap_write(&output))?; 272 | pb.finish_and_clear(); 273 | 274 | println!("Done: Syncing..."); 275 | output.sync_all().expect("Sync failure"); 276 | 277 | Ok(()) 278 | } 279 | 280 | async fn copy_remote_input_nobmap(source: Url, destination: PathBuf) -> Result<()> { 281 | let mut output = tokio::fs::OpenOptions::new() 282 | .write(true) 283 | .create(true) 284 | .open(destination) 285 | .await?; 286 | 287 | let res = setup_remote_input(source).await?; 288 | let stream = res 289 | .bytes_stream() 290 | .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) 291 | .into_async_read(); 292 | let reader = GzipDecoder::new(stream); 293 | let mut input = AsyncDiscarder::new(reader); 294 | let pb = setup_spinner(); 295 | bmap_parser::copy_async_nobmap(&mut input, &mut pb.wrap_async_write(&mut output).compat()) 296 | .await?; 297 | pb.finish_and_clear(); 298 | 299 | println!("Done: Syncing..."); 300 | output.sync_all().await?; 301 | Ok(()) 302 | } 303 | 304 | #[tokio::main] 305 | async fn main() -> Result<()> { 306 | let opts = Opts::parser(); 307 | 308 | match opts.command { 309 | Subcommand::Copy(c) => copy(c).await, 310 | } 311 | } 312 | --------------------------------------------------------------------------------