├── .ci ├── common.sh ├── test-download-example.sh ├── test-reverse-example.sh └── test-ssh-mount-example.sh ├── .dockerignore ├── .github └── workflows │ ├── docker.yml │ └── rust-ci.yml ├── .gitignore ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── buildkit-frontend ├── CHANGELOG.md ├── Cargo.toml ├── README.md ├── examples │ ├── download.dockerfile │ ├── download.input │ ├── download.output │ ├── download.rs │ ├── reverse.dockerfile │ ├── reverse.input │ ├── reverse.output │ ├── reverse.rs │ ├── ssh-mount.dockerfile │ ├── ssh-mount.input │ ├── ssh-mount.output │ └── ssh-mount.rs ├── src │ ├── bridge.rs │ ├── error.rs │ ├── lib.rs │ ├── oci.rs │ ├── options │ │ ├── common.rs │ │ ├── default.rs │ │ ├── deserializer.rs │ │ └── mod.rs │ ├── stdio.rs │ └── utils.rs └── tests │ ├── oci-image-spec-min.json │ └── oci-image-spec.json ├── buildkit-llb ├── CHANGELOG.md ├── Cargo.toml ├── README.md ├── examples │ ├── highly-parallel.rs │ ├── network.rs │ ├── scratch-owned.rs │ └── scratch.rs └── src │ ├── lib.rs │ ├── ops │ ├── exec │ │ ├── command.rs │ │ ├── context.rs │ │ ├── mod.rs │ │ └── mount.rs │ ├── fs │ │ ├── copy.rs │ │ ├── mkdir.rs │ │ ├── mkfile.rs │ │ ├── mod.rs │ │ ├── path.rs │ │ └── sequence.rs │ ├── mod.rs │ ├── source │ │ ├── git.rs │ │ ├── http.rs │ │ ├── image.rs │ │ ├── local.rs │ │ └── mod.rs │ └── terminal.rs │ ├── serialization │ ├── id.rs │ ├── mod.rs │ ├── operation.rs │ └── output.rs │ └── utils.rs └── buildkit-proto ├── CHANGELOG.md ├── Cargo.toml ├── README.md ├── build.rs ├── proto └── github.com │ ├── gogo │ ├── googleapis │ │ └── google │ │ │ └── rpc │ │ │ └── status.proto │ └── protobuf │ │ └── gogoproto │ │ └── gogo.proto │ ├── moby │ └── buildkit │ │ ├── api │ │ └── types │ │ │ └── worker.proto │ │ ├── frontend │ │ └── gateway │ │ │ └── pb │ │ │ └── gateway.proto │ │ ├── solver │ │ └── pb │ │ │ └── ops.proto │ │ └── util │ │ └── apicaps │ │ └── pb │ │ └── caps.proto │ └── tonistiigi │ └── fsutil │ └── types │ └── stat.proto ├── src └── lib.rs └── update.sh /.ci/common.sh: -------------------------------------------------------------------------------- 1 | export DOCKER_BUILDKIT="1" 2 | 3 | export WORKSPACE_DIR=$(readlink -f "$(dirname $0)/..") 4 | export EXAMPLES_DIR="$WORKSPACE_DIR/buildkit-frontend/examples" 5 | -------------------------------------------------------------------------------- /.ci/test-download-example.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source $(dirname $0)/common.sh 3 | 4 | FRONTEND_LABEL="rust-buildkit:download-frontend" 5 | OUTPUT_LABEL="rust-buildkit:download-image" 6 | 7 | set -ex 8 | 9 | docker build -t $FRONTEND_LABEL -f $EXAMPLES_DIR/download.dockerfile $WORKSPACE_DIR 10 | docker build -t $OUTPUT_LABEL -f $EXAMPLES_DIR/download.input $WORKSPACE_DIR 11 | 12 | diff --strip-trailing-cr --color=always <(cat $EXAMPLES_DIR/download.output) <(docker run --rm $OUTPUT_LABEL) 13 | -------------------------------------------------------------------------------- /.ci/test-reverse-example.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source $(dirname $0)/common.sh 3 | 4 | FRONTEND_LABEL="rust-buildkit:reverse-frontend" 5 | OUTPUT_LABEL="rust-buildkit:reverse-image" 6 | 7 | set -ex 8 | 9 | docker build -t $FRONTEND_LABEL -f $EXAMPLES_DIR/reverse.dockerfile $WORKSPACE_DIR 10 | docker build -t $OUTPUT_LABEL -f $EXAMPLES_DIR/reverse.input $WORKSPACE_DIR 11 | 12 | diff --strip-trailing-cr --color=always <(cat $EXAMPLES_DIR/reverse.output) <(docker run --rm $OUTPUT_LABEL) 13 | -------------------------------------------------------------------------------- /.ci/test-ssh-mount-example.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source $(dirname $0)/common.sh 3 | 4 | FRONTEND_LABEL="rust-buildkit:ssh-mount-frontend" 5 | OUTPUT_LABEL="rust-buildkit:ssh-mount-image" 6 | 7 | set -ex 8 | 9 | docker build -t $FRONTEND_LABEL -f $EXAMPLES_DIR/ssh-mount.dockerfile $WORKSPACE_DIR 10 | docker build -t $OUTPUT_LABEL -f $EXAMPLES_DIR/ssh-mount.input $WORKSPACE_DIR --ssh=default 11 | 12 | diff --strip-trailing-cr --color=always <(cat $EXAMPLES_DIR/ssh-mount.output) <(docker run --rm $OUTPUT_LABEL) 13 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | .ci 4 | -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | name: Docker integration 2 | on: 3 | pull_request: 4 | branches: 5 | - master 6 | 7 | push: 8 | branches: 9 | - master 10 | 11 | jobs: 12 | test: 13 | name: Run examples 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v1 17 | - uses: webfactory/ssh-agent@v0.2.0 18 | with: 19 | ssh-private-key: ${{ secrets.SSH_PRIVATE_KEY }} 20 | 21 | - run: docker info 22 | - run: .ci/test-reverse-example.sh 23 | - run: .ci/test-download-example.sh 24 | - run: .ci/test-ssh-mount-example.sh 25 | -------------------------------------------------------------------------------- /.github/workflows/rust-ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | pull_request: 4 | branches: 5 | - master 6 | 7 | push: 8 | branches: 9 | - master 10 | 11 | jobs: 12 | test: 13 | name: Unit tests 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout sources 17 | uses: actions/checkout@v2 18 | 19 | - name: Install Rust stable toolchain 20 | uses: actions-rs/toolchain@v1 21 | with: 22 | profile: minimal 23 | toolchain: stable 24 | override: true 25 | 26 | - name: Run cargo test 27 | uses: actions-rs/cargo@v1 28 | with: 29 | command: test 30 | 31 | lints: 32 | name: Lints 33 | runs-on: ubuntu-latest 34 | steps: 35 | - name: Checkout sources 36 | uses: actions/checkout@v2 37 | 38 | - name: Install stable toolchain 39 | uses: actions-rs/toolchain@v1 40 | with: 41 | profile: minimal 42 | toolchain: stable 43 | override: true 44 | components: rustfmt, clippy 45 | 46 | - name: Run cargo fmt 47 | uses: actions-rs/cargo@v1 48 | with: 49 | command: fmt 50 | args: --all -- --check 51 | 52 | - name: Run cargo clippy 53 | uses: actions-rs/clippy-check@v1 54 | with: 55 | token: ${{ secrets.GITHUB_TOKEN }} 56 | args: --all-features 57 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "buildkit-proto", 4 | "buildkit-llb", 5 | "buildkit-frontend", 6 | ] 7 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019 Denys Zariaiev 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | BuildKit binding for Rust 2 | ======= 3 | 4 | [![Actions Status]][Actions Link] 5 | 6 | # Project structure 7 | 8 | This repository contains three important building blocks to implement BuildKit frontends in Rust: 9 | 10 | * [![buildkit-llb Crates Badge]][buildkit-llb Crates Link] 11 | [![buildkit-llb Docs Badge]][buildkit-llb Docs Link] 12 | [`buildkit-llb`](buildkit-llb/README.md) - high-level API to create BuildKit LLB graphs, 13 | 14 | * [![buildkit-frontend Crates Badge]][buildkit-frontend Crates Link] 15 | [![buildkit-frontend Docs Badge]][buildkit-frontend Docs Link] 16 | [`buildkit-frontend`](buildkit-frontend/README.md) - foundation and utilities for BuildKit frontends, 17 | 18 | * [![buildkit-proto Crates Badge]][buildkit-proto Crates Link] 19 | [![buildkit-proto Docs Badge]][buildkit-proto Docs Link] 20 | [`buildkit-proto`](buildkit-proto/README.md) - low-level protobuf interfaces to BuildKit. 21 | 22 | [Actions Link]: https://github.com/denzp/rust-buildkit/actions 23 | [Actions Status]: https://github.com/denzp/rust-buildkit/workflows/CI/badge.svg 24 | [buildkit-llb Docs Badge]: https://docs.rs/buildkit-llb/badge.svg 25 | [buildkit-llb Docs Link]: https://docs.rs/buildkit-llb/ 26 | [buildkit-llb Crates Badge]: https://img.shields.io/crates/v/buildkit-llb.svg 27 | [buildkit-llb Crates Link]: https://crates.io/crates/buildkit-llb 28 | [buildkit-frontend Docs Badge]: https://docs.rs/buildkit-frontend/badge.svg 29 | [buildkit-frontend Docs Link]: https://docs.rs/buildkit-frontend/ 30 | [buildkit-frontend Crates Badge]: https://img.shields.io/crates/v/buildkit-frontend.svg 31 | [buildkit-frontend Crates Link]: https://crates.io/crates/buildkit-frontend 32 | [buildkit-proto Docs Badge]: https://docs.rs/buildkit-proto/badge.svg 33 | [buildkit-proto Docs Link]: https://docs.rs/buildkit-proto/ 34 | [buildkit-proto Crates Badge]: https://img.shields.io/crates/v/buildkit-proto.svg 35 | [buildkit-proto Crates Link]: https://crates.io/crates/buildkit-proto 36 | -------------------------------------------------------------------------------- /buildkit-frontend/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | ## [Unreleased] 8 | 9 | ## [0.3.0] - 2020-03-04 10 | ### Changed 11 | - Use `tonic` for gRPC. 12 | 13 | ## [0.2.2] - 2019-10-30 14 | ### Added 15 | - `Bridge::solve_with_cache` alternative that can use remote caching. 16 | 17 | ### Changed 18 | - Frontend can accept custom options that implement `serde::DeserializeOwned`. 19 | 20 | ## [0.2.1] - 2019-10-19 21 | ### Added 22 | - `Options::iter` method to get a list for values. 23 | 24 | ## [0.2.0] - 2019-10-06 25 | ### Added 26 | - Example frontends and integration testing. 27 | 28 | ### Changed 29 | - Define `Frontend` trait with `async_trait` proc-macro. 30 | 31 | ## [0.1.0] - 2019-09-30 32 | Initial release. 33 | -------------------------------------------------------------------------------- /buildkit-frontend/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildkit-frontend" 3 | version = "0.3.0" 4 | authors = ["Denys Zariaiev "] 5 | edition = "2018" 6 | 7 | description = "Foundation for BuildKit frontends implemented in Rust" 8 | documentation = "https://docs.rs/buildkit-frontend" 9 | repository = "https://github.com/denzp/rust-buildkit" 10 | readme = "README.md" 11 | keywords = ["buildkit", "docker", "bridge"] 12 | categories = ["development-tools::build-utils", "api-bindings"] 13 | license = "MIT/Apache-2.0" 14 | 15 | [dependencies] 16 | bytes = "0.5" 17 | either = "1.5" 18 | failure = "0.1" 19 | futures = "0.3" 20 | libc = "0.2" 21 | log = "0.4" 22 | mio = "0.6" 23 | pin-project = "0.4" 24 | serde_json = "1.0" 25 | tonic = "0.1" 26 | tower = "0.3" 27 | 28 | [dependencies.tokio] 29 | version = "0.2" 30 | default-features = false 31 | features = ["io-std"] 32 | 33 | [dependencies.serde] 34 | version = "1.0" 35 | features = ["derive"] 36 | 37 | [dependencies.chrono] 38 | version = "0.4" 39 | features = ["serde"] 40 | 41 | [dependencies.buildkit-proto] 42 | version = "0.2" 43 | path = "../buildkit-proto" 44 | 45 | [dependencies.buildkit-llb] 46 | version = "0.2" 47 | path = "../buildkit-llb" 48 | 49 | [dev-dependencies] 50 | async-trait = "0.1" 51 | env_logger = "0.6" 52 | pretty_assertions = "0.6" 53 | regex = "1.3" 54 | url = "2.1" 55 | 56 | [dev-dependencies.tokio] 57 | version = "0.2" 58 | features = ["macros", "rt-core", "rt-threaded"] 59 | -------------------------------------------------------------------------------- /buildkit-frontend/README.md: -------------------------------------------------------------------------------- 1 | `buildkit-frontend` - foundation for BuildKit frontends implemented in Rust 2 | ======= 3 | 4 | [![Actions Status]][Actions Link] 5 | [![buildkit-frontend Crates Badge]][buildkit-frontend Crates Link] 6 | [![buildkit-frontend Docs Badge]][buildkit-frontend Docs Link] 7 | 8 | # Usage 9 | 10 | Please check [`cargo-wharf`][cargo-wharf Link] as an example. 11 | 12 | # License 13 | 14 | `buildkit-frontend` is primarily distributed under the terms of both the MIT license and 15 | the Apache License (Version 2.0), with portions covered by various BSD-like 16 | licenses. 17 | 18 | See LICENSE-APACHE, and LICENSE-MIT for details. 19 | 20 | # Contribution 21 | 22 | Unless you explicitly state otherwise, any contribution intentionally submitted 23 | for inclusion in `buildkit-lfrontendlb` by you, as defined in the Apache-2.0 license, 24 | shall be dual licensed as above, without any additional terms or conditions. 25 | 26 | [Actions Link]: https://github.com/denzp/rust-buildkit/actions 27 | [Actions Status]: https://github.com/denzp/rust-buildkit/workflows/CI/badge.svg 28 | [buildkit-frontend Docs Badge]: https://docs.rs/buildkit-frontend/badge.svg 29 | [buildkit-frontend Docs Link]: https://docs.rs/buildkit-frontend/ 30 | [buildkit-frontend Crates Badge]: https://img.shields.io/crates/v/buildkit-frontend.svg 31 | [buildkit-frontend Crates Link]: https://crates.io/crates/buildkit-frontend 32 | [cargo-wharf Link]: https://github.com/denzp/cargo-wharf 33 | -------------------------------------------------------------------------------- /buildkit-frontend/examples/download.dockerfile: -------------------------------------------------------------------------------- 1 | # syntax = docker/dockerfile:1.1-experimental 2 | 3 | FROM clux/muslrust:stable as builder 4 | USER root 5 | 6 | WORKDIR /rust-src 7 | COPY . /rust-src 8 | 9 | RUN --mount=type=cache,target=/rust-src/target \ 10 | --mount=type=cache,target=/root/.cargo/git \ 11 | --mount=type=cache,target=/root/.cargo/registry \ 12 | ["cargo", "build", "--release", "--target", "x86_64-unknown-linux-musl", "--example", "download"] 13 | 14 | RUN --mount=type=cache,target=/rust-src/target \ 15 | ["cp", "/rust-src/target/x86_64-unknown-linux-musl/release/examples/download", "/usr/local/bin/download"] 16 | 17 | FROM scratch 18 | COPY --from=builder /usr/local/bin/download /usr/local/bin/download 19 | ENTRYPOINT ["/usr/local/bin/download"] 20 | -------------------------------------------------------------------------------- /buildkit-frontend/examples/download.input: -------------------------------------------------------------------------------- 1 | # syntax = rust-buildkit:download-frontend 2 | 3 | Download "https://raw.githubusercontent.com/denzp/rust-buildkit/7a72b7f4ef2f57500c7b82db7abac6ae1b8ab982/Cargo.toml" as "Cargo.toml". 4 | Download "https://raw.githubusercontent.com/denzp/rust-buildkit/7a72b7f4ef2f57500c7b82db7abac6ae1b8ab982/.github/workflows/rust-ci.yml" as "rust-ci.yml". 5 | 6 | # Our toy frontend can also handle comments! 7 | Download "https://raw.githubusercontent.com/denzp/rust-buildkit/7a72b7f4ef2f57500c7b82db7abac6ae1b8ab982/buildkit-llb/examples/scratch.rs" as "scratch.rs". 8 | -------------------------------------------------------------------------------- /buildkit-frontend/examples/download.output: -------------------------------------------------------------------------------- 1 | c56ccdd00d29b2e5b17964d4339d780ba3fed697471ad9c966a3a7d9c1897ff5 Cargo.toml 2 | af942adce88f1661f307f58615d22f37ea9a75052d2fdbdc4c5de0e9fd19cb61 rust-ci.yml 3 | 04684fa2764212bd9193d40ca06cd3222355d41c9e7d1388c1b4f0f885744374 scratch.rs 4 | -------------------------------------------------------------------------------- /buildkit-frontend/examples/download.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use async_trait::async_trait; 4 | use failure::Error; 5 | use regex::Regex; 6 | use serde::Deserialize; 7 | use url::Url; 8 | 9 | use buildkit_frontend::oci::*; 10 | use buildkit_frontend::options::common::CacheOptionsEntry; 11 | use buildkit_frontend::run_frontend; 12 | use buildkit_frontend::{Bridge, Frontend, FrontendOutput, OutputRef}; 13 | 14 | use buildkit_llb::prelude::*; 15 | 16 | #[tokio::main(threaded_scheduler)] 17 | async fn main() { 18 | env_logger::init(); 19 | 20 | if let Err(_) = run_frontend(DownloadFrontend).await { 21 | std::process::exit(1); 22 | } 23 | } 24 | 25 | #[derive(Debug, Deserialize)] 26 | #[serde(rename_all = "kebab-case")] 27 | struct DownloadOptions { 28 | filename: PathBuf, 29 | 30 | /// New approach to specify cache imports. 31 | #[serde(default)] 32 | cache_imports: Vec, 33 | 34 | /// Legacy convention to specify cache imports. 35 | #[serde(default)] 36 | #[serde(deserialize_with = "CacheOptionsEntry::from_legacy_list")] 37 | cache_from: Vec, 38 | } 39 | 40 | struct DownloadFrontend; 41 | 42 | #[async_trait] 43 | impl Frontend for DownloadFrontend { 44 | async fn run(self, bridge: Bridge, options: DownloadOptions) -> Result { 45 | Ok(FrontendOutput::with_spec_and_ref( 46 | Self::image_spec(), 47 | Self::solve(&bridge, options).await?, 48 | )) 49 | } 50 | } 51 | 52 | const OUTPUT_DIR: &str = "/opt"; 53 | 54 | impl DownloadFrontend { 55 | fn image_spec() -> ImageSpecification { 56 | ImageSpecification { 57 | created: None, 58 | author: None, 59 | 60 | architecture: Architecture::Amd64, 61 | os: OperatingSystem::Linux, 62 | 63 | config: Some(ImageConfig { 64 | entrypoint: Some(vec!["/bin/sh".into()]), 65 | cmd: Some(vec!["-c".into(), "/usr/bin/sha256sum *".into()]), 66 | env: None, 67 | user: None, 68 | working_dir: Some(OUTPUT_DIR.into()), 69 | 70 | labels: None, 71 | volumes: None, 72 | exposed_ports: None, 73 | stop_signal: None, 74 | }), 75 | 76 | rootfs: None, 77 | history: None, 78 | } 79 | } 80 | 81 | async fn solve(bridge: &Bridge, options: DownloadOptions) -> Result { 82 | let dockerfile_source = Source::local("dockerfile"); 83 | let dockerfile_layer = bridge 84 | .solve(Terminal::with(dockerfile_source.output())) 85 | .await?; 86 | 87 | let dockerfile_contents = String::from_utf8( 88 | bridge 89 | .read_file(&dockerfile_layer, &options.filename, None) 90 | .await?, 91 | )?; 92 | 93 | bridge 94 | .solve_with_cache( 95 | Terminal::with(Self::construct_llb(dockerfile_contents)?), 96 | options.cache_entries(), 97 | ) 98 | .await 99 | } 100 | 101 | fn construct_llb(dockerfile: String) -> Result, Error> { 102 | let alpine = Source::image("alpine:latest").ref_counted(); 103 | 104 | let builder_rootfs = Command::run("apk") 105 | .args(&["add", "curl"]) 106 | .custom_name("Installing curl") 107 | .mount(Mount::Layer(OutputIdx(0), alpine.output(), "/")) 108 | .ref_counted(); 109 | 110 | Self::extract_files(&dockerfile) 111 | .map(move |result| { 112 | let (url, relative_path) = result?; 113 | let full_path = PathBuf::from(OUTPUT_DIR).join(&relative_path); 114 | 115 | let op = Command::run("curl") 116 | .args(&[&url.to_string(), "-o", &full_path.to_string_lossy()]) 117 | .mount(Mount::ReadOnlyLayer(builder_rootfs.output(0), "/")) 118 | .mount(Mount::Scratch(OutputIdx(0), OUTPUT_DIR)) 119 | .custom_name(format!("Downloading '{}'", relative_path.display())) 120 | .ref_counted() 121 | .output(0); 122 | 123 | Ok((op, relative_path, full_path)) 124 | }) 125 | .try_fold( 126 | FileSystem::sequence().custom_name("Copying assets into output directory"), 127 | |output, result: Result<_, Error>| { 128 | let (op, relative_path, full_path) = result?; 129 | 130 | let (out_index, out_layer) = match output.last_output_index() { 131 | Some(last) => (last + 1, LayerPath::Own(OwnOutputIdx(last), &full_path)), 132 | None => (0, LayerPath::Other(alpine.output(), &full_path)), 133 | }; 134 | 135 | Ok(output.append( 136 | FileSystem::copy() 137 | .from(LayerPath::Other(op, &relative_path)) 138 | .to(OutputIdx(out_index), out_layer) 139 | .create_path(true), 140 | )) 141 | }, 142 | ) 143 | .map(|llb| llb.ref_counted().last_output().unwrap()) 144 | } 145 | 146 | fn extract_files( 147 | dockerfile: &str, 148 | ) -> impl Iterator> + '_ { 149 | let cmd_regex = Regex::new(r#"Download\s+"(.+)"\s+as\s+"(.+)""#).unwrap(); 150 | 151 | dockerfile.lines().filter_map(move |line| { 152 | let captures = cmd_regex.captures(&line)?; 153 | Some(Url::parse(&captures[1]).map(|url| (url, captures[2].into()))) 154 | }) 155 | } 156 | } 157 | 158 | impl DownloadOptions { 159 | pub fn cache_entries(&self) -> &[CacheOptionsEntry] { 160 | if !self.cache_imports.is_empty() { 161 | return &self.cache_imports; 162 | } 163 | 164 | &self.cache_from 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /buildkit-frontend/examples/reverse.dockerfile: -------------------------------------------------------------------------------- 1 | # syntax = docker/dockerfile:1.1-experimental 2 | 3 | FROM clux/muslrust:stable as builder 4 | USER root 5 | 6 | WORKDIR /rust-src 7 | COPY . /rust-src 8 | 9 | RUN --mount=type=cache,target=/rust-src/target \ 10 | --mount=type=cache,target=/root/.cargo/git \ 11 | --mount=type=cache,target=/root/.cargo/registry \ 12 | ["cargo", "build", "--release", "--target", "x86_64-unknown-linux-musl", "--example", "reverse"] 13 | 14 | RUN --mount=type=cache,target=/rust-src/target \ 15 | ["cp", "/rust-src/target/x86_64-unknown-linux-musl/release/examples/reverse", "/usr/local/bin/reverse"] 16 | 17 | FROM scratch 18 | COPY --from=builder /usr/local/bin/reverse /usr/local/bin/reverse 19 | ENTRYPOINT ["/usr/local/bin/reverse"] 20 | -------------------------------------------------------------------------------- /buildkit-frontend/examples/reverse.input: -------------------------------------------------------------------------------- 1 | # syntax = rust-buildkit:reverse-frontend 2 | 3 | Hello from the reverse example "dockerfile". 4 | Every line from this file has to be printed in reverse when you run the resulting image. 5 | -------------------------------------------------------------------------------- /buildkit-frontend/examples/reverse.output: -------------------------------------------------------------------------------- 1 | dnetnorf-esrever:tikdliub-tsur = xatnys # 2 | 3 | ."elifrekcod" elpmaxe esrever eht morf olleH 4 | .egami gnitluser eht nur uoy nehw esrever ni detnirp eb ot sah elif siht morf enil yrevE 5 | -------------------------------------------------------------------------------- /buildkit-frontend/examples/reverse.rs: -------------------------------------------------------------------------------- 1 | use std::iter::once; 2 | 3 | use async_trait::async_trait; 4 | use failure::Error; 5 | 6 | use buildkit_frontend::oci::*; 7 | use buildkit_frontend::run_frontend; 8 | use buildkit_frontend::{Bridge, Frontend, FrontendOutput, Options, OutputRef}; 9 | 10 | use buildkit_llb::prelude::*; 11 | 12 | #[tokio::main] 13 | async fn main() { 14 | env_logger::init(); 15 | 16 | if let Err(_) = run_frontend(ReverseFrontend).await { 17 | std::process::exit(1); 18 | } 19 | } 20 | 21 | struct ReverseFrontend; 22 | 23 | #[async_trait] 24 | impl Frontend for ReverseFrontend { 25 | async fn run(self, bridge: Bridge, options: Options) -> Result { 26 | Ok(FrontendOutput::with_spec_and_ref( 27 | Self::image_spec(), 28 | Self::solve(&bridge, options.get("filename").unwrap()).await?, 29 | )) 30 | } 31 | } 32 | 33 | const OUTPUT_FILENAME: &str = "/reverse.dockerfile"; 34 | 35 | impl ReverseFrontend { 36 | fn image_spec() -> ImageSpecification { 37 | ImageSpecification { 38 | created: None, 39 | author: None, 40 | 41 | architecture: Architecture::Amd64, 42 | os: OperatingSystem::Linux, 43 | 44 | config: Some(ImageConfig { 45 | entrypoint: None, 46 | cmd: Some(vec!["/bin/cat".into(), OUTPUT_FILENAME.into()]), 47 | env: None, 48 | user: None, 49 | working_dir: None, 50 | 51 | labels: None, 52 | volumes: None, 53 | exposed_ports: None, 54 | stop_signal: None, 55 | }), 56 | 57 | rootfs: None, 58 | history: None, 59 | } 60 | } 61 | 62 | async fn solve(bridge: &Bridge, dockerfile_path: &str) -> Result { 63 | let dockerfile_source = Source::local("dockerfile"); 64 | let dockerfile_layer = bridge 65 | .solve(Terminal::with(dockerfile_source.output())) 66 | .await?; 67 | 68 | let dockerfile_contents = bridge 69 | .read_file(&dockerfile_layer, dockerfile_path, None) 70 | .await?; 71 | 72 | let transformed_contents: String = { 73 | String::from_utf8_lossy(&dockerfile_contents) 74 | .lines() 75 | .into_iter() 76 | .map(|line| { 77 | line.trim() 78 | .chars() 79 | .rev() 80 | .chain(once('\n')) 81 | .collect::() 82 | }) 83 | .collect() 84 | }; 85 | 86 | let llb = { 87 | let alpine = Source::image("alpine:latest").ref_counted(); 88 | let destination = LayerPath::Other(alpine.output(), OUTPUT_FILENAME); 89 | 90 | FileSystem::mkfile(OutputIdx(0), destination) 91 | .data(transformed_contents.into_bytes()) 92 | .into_operation() 93 | .ref_counted() 94 | .output(0) 95 | }; 96 | 97 | bridge.solve(Terminal::with(llb)).await 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /buildkit-frontend/examples/ssh-mount.dockerfile: -------------------------------------------------------------------------------- 1 | # syntax = docker/dockerfile:1.1-experimental 2 | 3 | FROM clux/muslrust:stable as builder 4 | USER root 5 | 6 | WORKDIR /rust-src 7 | COPY . /rust-src 8 | 9 | RUN --mount=type=cache,target=/rust-src/target \ 10 | --mount=type=cache,target=/root/.cargo/git \ 11 | --mount=type=cache,target=/root/.cargo/registry \ 12 | ["cargo", "build", "--release", "--target", "x86_64-unknown-linux-musl", "--example", "ssh-mount"] 13 | 14 | RUN --mount=type=cache,target=/rust-src/target \ 15 | ["cp", "/rust-src/target/x86_64-unknown-linux-musl/release/examples/ssh-mount", "/usr/local/bin/ssh-mount"] 16 | 17 | FROM scratch 18 | COPY --from=builder /usr/local/bin/ssh-mount /usr/local/bin/ssh-mount 19 | ENTRYPOINT ["/usr/local/bin/ssh-mount"] 20 | -------------------------------------------------------------------------------- /buildkit-frontend/examples/ssh-mount.input: -------------------------------------------------------------------------------- 1 | # syntax = rust-buildkit:ssh-mount-frontend 2 | 3 | REPO: ssh://git@github.com/BurntSushi/ripgrep.git 4 | TAG: 0.10.0 5 | TEST: rg --version | head -1 6 | -------------------------------------------------------------------------------- /buildkit-frontend/examples/ssh-mount.output: -------------------------------------------------------------------------------- 1 | ripgrep 0.10.0 (rev 8a7db1a918) 2 | -------------------------------------------------------------------------------- /buildkit-frontend/examples/ssh-mount.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use failure::{bail, Error}; 3 | 4 | use buildkit_frontend::oci::*; 5 | use buildkit_frontend::run_frontend; 6 | use buildkit_frontend::{Bridge, Frontend, FrontendOutput, Options, OutputRef}; 7 | 8 | use buildkit_llb::prelude::*; 9 | 10 | #[tokio::main(basic_scheduler)] 11 | async fn main() { 12 | env_logger::init(); 13 | 14 | if let Err(_) = run_frontend(ReverseFrontend).await { 15 | std::process::exit(1); 16 | } 17 | } 18 | 19 | struct ReverseFrontend; 20 | 21 | const OUTPUT_FILENAME: &str = "/test.out"; 22 | const PATH: &str = 23 | "/usr/local/cargo/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"; 24 | 25 | #[async_trait] 26 | impl Frontend for ReverseFrontend { 27 | async fn run(self, bridge: Bridge, options: Options) -> Result { 28 | Ok(FrontendOutput::with_spec_and_ref( 29 | Self::image_spec(), 30 | Self::solve(&bridge, options.get("filename").unwrap()).await?, 31 | )) 32 | } 33 | } 34 | 35 | impl ReverseFrontend { 36 | fn image_spec() -> ImageSpecification { 37 | ImageSpecification { 38 | created: None, 39 | author: None, 40 | 41 | architecture: Architecture::Amd64, 42 | os: OperatingSystem::Linux, 43 | 44 | config: Some(ImageConfig { 45 | entrypoint: None, 46 | cmd: Some(vec!["/bin/cat".into(), OUTPUT_FILENAME.into()]), 47 | env: None, 48 | user: None, 49 | working_dir: Some("/output".into()), 50 | 51 | labels: None, 52 | volumes: None, 53 | exposed_ports: None, 54 | stop_signal: None, 55 | }), 56 | 57 | rootfs: None, 58 | history: None, 59 | } 60 | } 61 | 62 | async fn solve(bridge: &Bridge, dockerfile_path: &str) -> Result { 63 | let dockerfile_source = Source::local("dockerfile"); 64 | let dockerfile_layer = bridge 65 | .solve(Terminal::with(dockerfile_source.output())) 66 | .await?; 67 | 68 | let dockerfile_contents = bridge 69 | .read_file(&dockerfile_layer, dockerfile_path, None) 70 | .await?; 71 | 72 | let dockerfile_contents = String::from_utf8_lossy(&dockerfile_contents); 73 | 74 | let mut repo = None; 75 | let mut tag = None; 76 | let mut test = None; 77 | 78 | for line in dockerfile_contents.lines() { 79 | if line.starts_with("REPO:") { 80 | repo = Some(line[5..].trim()); 81 | } 82 | 83 | if line.starts_with("TAG:") { 84 | tag = Some(line[4..].trim()); 85 | } 86 | 87 | if line.starts_with("TEST:") { 88 | test = Some(line[5..].trim()); 89 | } 90 | } 91 | 92 | let rootfs = Source::image("rust:latest"); 93 | let install_command = match (repo, tag) { 94 | (Some(repo), Some(tag)) => Command::run("cargo") 95 | .args(&["install", "--git", repo, "--tag", tag]) 96 | .mount(Mount::Layer(OutputIdx(0), rootfs.output(), "/")) 97 | .mount(Mount::OptionalSshAgent("/tmp/ssh_agent.0")) 98 | .env("PATH", PATH) 99 | .env("RUSTUP_HOME", "/usr/local/rustup") 100 | .env("CARGO_HOME", "/usr/local/cargo") 101 | .env("RUST_VERSION", "1.40.0") 102 | .env("SSH_AUTH_SOCK", "/tmp/ssh_agent.0"), 103 | 104 | _ => { 105 | bail!("Missing REPO or TAG directives!"); 106 | } 107 | }; 108 | 109 | let test_command = if let Some(test) = test { 110 | Command::run("/bin/sh") 111 | .args(&["-c", &format!("{} > {}", test, OUTPUT_FILENAME)]) 112 | .mount(Mount::Layer(OutputIdx(0), install_command.output(0), "/")) 113 | .env("PATH", PATH) 114 | } else { 115 | bail!("Missing TEST directive!"); 116 | }; 117 | 118 | bridge.solve(Terminal::with(test_command.output(0))).await 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /buildkit-frontend/src/bridge.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::path::PathBuf; 3 | use std::sync::Arc; 4 | 5 | use failure::{bail, format_err, Error, ResultExt}; 6 | use log::*; 7 | use tokio::sync::Mutex; 8 | 9 | use tonic::transport::channel::Channel; 10 | use tonic::Request; 11 | 12 | use buildkit_proto::google::rpc::Status; 13 | use buildkit_proto::moby::buildkit::v1::frontend::llb_bridge_client::LlbBridgeClient; 14 | use buildkit_proto::moby::buildkit::v1::frontend::{ 15 | result::Result as RefResult, ReadFileRequest, ResolveImageConfigRequest, Result as Output, 16 | ReturnRequest, SolveRequest, 17 | }; 18 | 19 | pub use buildkit_llb::ops::source::{ImageSource, ResolveMode}; 20 | pub use buildkit_llb::ops::Terminal; 21 | pub use buildkit_proto::moby::buildkit::v1::frontend::FileRange; 22 | 23 | use crate::error::ErrorCode; 24 | use crate::oci::ImageSpecification; 25 | use crate::options::common::CacheOptionsEntry; 26 | use crate::utils::OutputRef; 27 | 28 | #[derive(Clone)] 29 | pub struct Bridge { 30 | client: Arc>>, 31 | } 32 | 33 | impl Bridge { 34 | pub(crate) fn new(channel: Channel) -> Self { 35 | Self { 36 | client: Arc::new(Mutex::new(LlbBridgeClient::new(channel))), 37 | } 38 | } 39 | 40 | pub async fn resolve_image_config( 41 | &self, 42 | image: &ImageSource, 43 | log: Option<&str>, 44 | ) -> Result<(String, ImageSpecification), Error> { 45 | let request = ResolveImageConfigRequest { 46 | r#ref: image.canonical_name(), 47 | platform: None, 48 | resolve_mode: image.resolve_mode().unwrap_or_default().to_string(), 49 | log_name: log.unwrap_or_default().into(), 50 | }; 51 | 52 | debug!("requesting to resolve an image: {:?}", request); 53 | let response = { 54 | self.client 55 | .lock() 56 | .await 57 | .resolve_image_config(Request::new(request)) 58 | .await 59 | .unwrap() 60 | .into_inner() 61 | }; 62 | 63 | Ok(( 64 | response.digest, 65 | serde_json::from_slice(&response.config) 66 | .context("Unable to parse image specification")?, 67 | )) 68 | } 69 | 70 | pub async fn solve<'a, 'b: 'a>(&'a self, graph: Terminal<'b>) -> Result { 71 | self.solve_with_cache(graph, &[]).await 72 | } 73 | 74 | pub async fn solve_with_cache<'a, 'b: 'a>( 75 | &'a self, 76 | graph: Terminal<'b>, 77 | cache: &[CacheOptionsEntry], 78 | ) -> Result { 79 | debug!("serializing a graph to request"); 80 | let request = SolveRequest { 81 | definition: Some(graph.into_definition()), 82 | exporter_attr: vec![], 83 | allow_result_return: true, 84 | cache_imports: cache.iter().cloned().map(Into::into).collect(), 85 | 86 | ..Default::default() 87 | }; 88 | 89 | debug!("solving with cache from: {:?}", cache); 90 | debug!("requesting to solve a graph"); 91 | let response = { 92 | self.client 93 | .lock() 94 | .await 95 | .solve(Request::new(request)) 96 | .await 97 | .context("Unable to solve the graph")? 98 | .into_inner() 99 | .result 100 | .ok_or_else(|| format_err!("Unable to extract solve result"))? 101 | }; 102 | 103 | debug!("got response: {:#?}", response); 104 | 105 | let inner = { 106 | response 107 | .result 108 | .ok_or_else(|| format_err!("Unable to extract solve result"))? 109 | }; 110 | 111 | match inner { 112 | RefResult::Ref(inner) => Ok(OutputRef(inner)), 113 | other => bail!("Unexpected solve response: {:?}", other), 114 | } 115 | } 116 | 117 | pub async fn read_file<'a, 'b: 'a, P>( 118 | &'a self, 119 | layer: &'b OutputRef, 120 | path: P, 121 | range: Option, 122 | ) -> Result, Error> 123 | where 124 | P: Into, 125 | { 126 | let file_path = path.into().display().to_string(); 127 | debug!("requesting a file contents: {:#?}", file_path); 128 | 129 | let request = ReadFileRequest { 130 | r#ref: layer.0.clone(), 131 | file_path, 132 | range, 133 | }; 134 | 135 | let response = { 136 | self.client 137 | .lock() 138 | .await 139 | .read_file(Request::new(request)) 140 | .await 141 | .context("Unable to read the file")? 142 | .into_inner() 143 | .data 144 | }; 145 | 146 | Ok(response) 147 | } 148 | 149 | pub(crate) async fn finish_with_success( 150 | self, 151 | output: OutputRef, 152 | config: Option, 153 | ) -> Result<(), Error> { 154 | let mut metadata = HashMap::new(); 155 | 156 | if let Some(config) = config { 157 | metadata.insert("containerimage.config".into(), serde_json::to_vec(&config)?); 158 | } 159 | 160 | let request = ReturnRequest { 161 | error: None, 162 | result: Some(Output { 163 | result: Some(RefResult::Ref(output.0)), 164 | metadata, 165 | }), 166 | }; 167 | 168 | self.client 169 | .lock() 170 | .await 171 | .r#return(Request::new(request)) 172 | .await?; 173 | 174 | // TODO: gracefully shutdown the HTTP/2 connection 175 | 176 | Ok(()) 177 | } 178 | 179 | pub(crate) async fn finish_with_error(self, code: ErrorCode, message: S) -> Result<(), Error> 180 | where 181 | S: Into, 182 | { 183 | let request = ReturnRequest { 184 | result: None, 185 | error: Some(Status { 186 | code: code as i32, 187 | message: message.into(), 188 | details: vec![], 189 | }), 190 | }; 191 | 192 | debug!("sending an error result: {:#?}", request); 193 | self.client 194 | .lock() 195 | .await 196 | .r#return(Request::new(request)) 197 | .await?; 198 | 199 | // TODO: gracefully shutdown the HTTP/2 connection 200 | 201 | Ok(()) 202 | } 203 | } 204 | -------------------------------------------------------------------------------- /buildkit-frontend/src/error.rs: -------------------------------------------------------------------------------- 1 | /// https://godoc.org/google.golang.org/grpc/codes#Code 2 | pub enum ErrorCode { 3 | /// OK is returned on success. 4 | OK = 0, 5 | 6 | /// Canceled indicates the operation was canceled (typically by the caller). 7 | Canceled = 1, 8 | 9 | /// Unknown error. An example of where this error may be returned is 10 | /// if a Status value received from another address space belongs to 11 | /// an error-space that is not known in this address space. Also 12 | /// errors raised by APIs that do not return enough error information 13 | /// may be converted to this error. 14 | Unknown = 2, 15 | 16 | /// InvalidArgument indicates client specified an invalid argument. 17 | /// Note that this differs from FailedPrecondition. It indicates arguments 18 | /// that are problematic regardless of the state of the system 19 | /// (e.g., a malformed file name). 20 | InvalidArgument = 3, 21 | 22 | /// DeadlineExceeded means operation expired before completion. 23 | /// For operations that change the state of the system, this error may be 24 | /// returned even if the operation has completed successfully. For 25 | /// example, a successful response from a server could have been delayed 26 | /// long enough for the deadline to expire. 27 | DeadlineExceeded = 4, 28 | 29 | /// NotFound means some requested entity (e.g., file or directory) was 30 | /// not found. 31 | NotFound = 5, 32 | 33 | /// AlreadyExists means an attempt to create an entity failed because one 34 | /// already exists. 35 | AlreadyExists = 6, 36 | 37 | /// PermissionDenied indicates the caller does not have permission to 38 | /// execute the specified operation. It must not be used for rejections 39 | /// caused by exhausting some resource (use ResourceExhausted 40 | /// instead for those errors). It must not be 41 | /// used if the caller cannot be identified (use Unauthenticated 42 | /// instead for those errors). 43 | PermissionDenied = 7, 44 | 45 | /// ResourceExhausted indicates some resource has been exhausted, perhaps 46 | /// a per-user quota, or perhaps the entire file system is out of space. 47 | ResourceExhausted = 8, 48 | 49 | /// FailedPrecondition indicates operation was rejected because the 50 | /// system is not in a state required for the operation's execution. 51 | /// For example, directory to be deleted may be non-empty, an rmdir 52 | /// operation is applied to a non-directory, etc. 53 | /// 54 | /// A litmus test that may help a service implementor in deciding 55 | /// between FailedPrecondition, Aborted, and Unavailable: 56 | /// (a) Use Unavailable if the client can retry just the failing call. 57 | /// (b) Use Aborted if the client should retry at a higher-level 58 | /// (e.g., restarting a read-modify-write sequence). 59 | /// (c) Use FailedPrecondition if the client should not retry until 60 | /// the system state has been explicitly fixed. E.g., if an "rmdir" 61 | /// fails because the directory is non-empty, FailedPrecondition 62 | /// should be returned since the client should not retry unless 63 | /// they have first fixed up the directory by deleting files from it. 64 | /// (d) Use FailedPrecondition if the client performs conditional 65 | /// REST Get/Update/Delete on a resource and the resource on the 66 | /// server does not match the condition. E.g., conflicting 67 | /// read-modify-write on the same resource. 68 | FailedPrecondition = 9, 69 | 70 | /// Aborted indicates the operation was aborted, typically due to a 71 | /// concurrency issue like sequencer check failures, transaction aborts, 72 | /// etc. 73 | /// 74 | /// See litmus test above for deciding between FailedPrecondition, 75 | /// Aborted, and Unavailable. 76 | Aborted = 10, 77 | 78 | /// OutOfRange means operation was attempted past the valid range. 79 | /// E.g., seeking or reading past end of file. 80 | /// 81 | /// Unlike InvalidArgument, this error indicates a problem that may 82 | /// be fixed if the system state changes. For example, a 32-bit file 83 | /// system will generate InvalidArgument if asked to read at an 84 | /// offset that is not in the range [0,2^32-1], but it will generate 85 | /// OutOfRange if asked to read from an offset past the current 86 | /// file size. 87 | /// 88 | /// There is a fair bit of overlap between FailedPrecondition and 89 | /// OutOfRange. We recommend using OutOfRange (the more specific 90 | /// error) when it applies so that callers who are iterating through 91 | /// a space can easily look for an OutOfRange error to detect when 92 | /// they are done. 93 | OutOfRange = 11, 94 | 95 | /// Unimplemented indicates operation is not implemented or not 96 | /// supported/enabled in this service. 97 | Unimplemented = 12, 98 | 99 | /// Internal errors. Means some invariants expected by underlying 100 | /// system has been broken. If you see one of these errors, 101 | /// something is very broken. 102 | Internal = 13, 103 | 104 | /// Unavailable indicates the service is currently unavailable. 105 | /// This is a most likely a transient condition and may be corrected 106 | /// by retrying with a backoff. Note that it is not always safe to retry 107 | /// non-idempotent operations. 108 | /// 109 | /// See litmus test above for deciding between FailedPrecondition, 110 | /// Aborted, and Unavailable. 111 | Unavailable = 14, 112 | 113 | /// DataLoss indicates unrecoverable data loss or corruption. 114 | DataLoss = 15, 115 | 116 | /// Unauthenticated indicates the request does not have valid 117 | /// authentication credentials for the operation. 118 | Unauthenticated = 16, 119 | } 120 | -------------------------------------------------------------------------------- /buildkit-frontend/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(warnings)] 2 | #![deny(clippy::all)] 3 | 4 | use failure::{Error, ResultExt}; 5 | use log::*; 6 | use serde::de::DeserializeOwned; 7 | use tonic::transport::Endpoint; 8 | use tower::service_fn; 9 | 10 | mod bridge; 11 | mod error; 12 | mod stdio; 13 | mod utils; 14 | 15 | pub mod oci; 16 | pub mod options; 17 | 18 | use oci::ImageSpecification; 19 | 20 | pub use self::bridge::Bridge; 21 | pub use self::error::ErrorCode; 22 | pub use self::options::Options; 23 | pub use self::stdio::{stdio_connector, StdioSocket}; 24 | pub use self::utils::{ErrorWithCauses, OutputRef}; 25 | 26 | #[tonic::async_trait] 27 | pub trait Frontend 28 | where 29 | O: DeserializeOwned, 30 | { 31 | async fn run(self, bridge: Bridge, options: O) -> Result; 32 | } 33 | 34 | pub struct FrontendOutput { 35 | output: OutputRef, 36 | image_spec: Option, 37 | } 38 | 39 | impl FrontendOutput { 40 | pub fn with_ref(output: OutputRef) -> Self { 41 | Self { 42 | output, 43 | image_spec: None, 44 | } 45 | } 46 | 47 | pub fn with_spec_and_ref(spec: ImageSpecification, output: OutputRef) -> Self { 48 | Self { 49 | output, 50 | image_spec: Some(spec), 51 | } 52 | } 53 | } 54 | 55 | pub async fn run_frontend(frontend: F) -> Result<(), Error> 56 | where 57 | F: Frontend, 58 | O: DeserializeOwned, 59 | { 60 | let channel = { 61 | Endpoint::from_static("http://[::]:50051") 62 | .connect_with_connector(service_fn(stdio_connector)) 63 | .await? 64 | }; 65 | 66 | let bridge = Bridge::new(channel); 67 | 68 | match frontend_entrypoint(&bridge, frontend).await { 69 | Ok(output) => { 70 | bridge 71 | .finish_with_success(output.output, output.image_spec) 72 | .await 73 | .context("Unable to send a success result")?; 74 | } 75 | 76 | Err(error) => { 77 | let error = ErrorWithCauses::multi_line(error); 78 | 79 | error!("Frontend entrypoint failed: {}", error); 80 | 81 | // https://godoc.org/google.golang.org/grpc/codes#Code 82 | bridge 83 | .finish_with_error( 84 | ErrorCode::Unknown, 85 | ErrorWithCauses::single_line(error.into_inner()).to_string(), 86 | ) 87 | .await 88 | .context("Unable to send an error result")?; 89 | } 90 | } 91 | 92 | // TODO: gracefully shutdown the HTTP/2 connection 93 | 94 | Ok(()) 95 | } 96 | 97 | async fn frontend_entrypoint(bridge: &Bridge, frontend: F) -> Result 98 | where 99 | F: Frontend, 100 | O: DeserializeOwned, 101 | { 102 | let options = options::from_env(std::env::vars()).context("Unable to parse options")?; 103 | 104 | debug!("running a frontend entrypoint"); 105 | frontend.run(bridge.clone(), options).await 106 | } 107 | -------------------------------------------------------------------------------- /buildkit-frontend/src/oci.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | use std::convert::TryFrom; 3 | use std::path::PathBuf; 4 | 5 | use chrono::prelude::*; 6 | use serde::{Deserialize, Serialize}; 7 | use serde_json::Value; 8 | 9 | // https://github.com/opencontainers/image-spec/blob/v1.0.1/config.md 10 | 11 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 12 | pub struct ImageSpecification { 13 | /// An combined date and time at which the image was created. 14 | #[serde(skip_serializing_if = "Option::is_none")] 15 | pub created: Option>, 16 | 17 | /// Gives the name and/or email address of the person or entity which created and is responsible for maintaining the image. 18 | #[serde(skip_serializing_if = "Option::is_none")] 19 | pub author: Option, 20 | 21 | /// The CPU architecture which the binaries in this image are built to run on. 22 | pub architecture: Architecture, 23 | 24 | /// The name of the operating system which the image is built to run on. 25 | pub os: OperatingSystem, 26 | 27 | /// The execution parameters which should be used as a base when running a container using the image. 28 | /// This field can be `None`, in which case any execution parameters should be specified at creation of the container. 29 | #[serde(skip_serializing_if = "Option::is_none")] 30 | pub config: Option, 31 | 32 | /// The rootfs key references the layer content addresses used by the image. 33 | #[serde(skip_serializing_if = "Option::is_none")] 34 | pub rootfs: Option, 35 | 36 | /// Describes the history of each layer. 37 | #[serde(skip_serializing_if = "Option::is_none")] 38 | pub history: Option>, 39 | } 40 | 41 | #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] 42 | #[serde(rename_all = "lowercase")] 43 | pub enum Architecture { 44 | /// 64-bit x86, the most mature port 45 | Amd64, 46 | 47 | /// 32-bit x86 48 | I386, 49 | 50 | /// 32-bit ARM 51 | ARM, 52 | 53 | /// 64-bit ARM 54 | ARM64, 55 | 56 | /// PowerPC 64-bit, little-endian 57 | PPC64le, 58 | 59 | /// PowerPC 64-bit, big-endian 60 | PPC64, 61 | 62 | /// MIPS 64-bit, little-endian 63 | Mips64le, 64 | 65 | /// MIPS 64-bit, big-endian 66 | Mips64, 67 | 68 | /// MIPS 32-bit, little-endian 69 | Mipsle, 70 | 71 | /// MIPS 32-bit, big-endian 72 | Mips, 73 | 74 | /// IBM System z 64-bit, big-endian 75 | S390x, 76 | } 77 | 78 | #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] 79 | #[serde(rename_all = "lowercase")] 80 | pub enum OperatingSystem { 81 | Darwin, 82 | Dragonfly, 83 | Freebsd, 84 | Linux, 85 | Netbsd, 86 | Openbsd, 87 | Plan9, 88 | Solaris, 89 | Windows, 90 | } 91 | 92 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 93 | #[serde(from = "RawImageConfig")] 94 | #[serde(into = "RawImageConfig")] 95 | pub struct ImageConfig { 96 | /// The username or UID which is a platform-specific structure that allows specific control over which user the process run as. 97 | pub user: Option, 98 | 99 | /// A set of ports to expose from a container running this image. 100 | pub exposed_ports: Option>, 101 | 102 | /// Environment variables for the process to run with. 103 | pub env: Option>, 104 | 105 | /// A list of arguments to use as the command to execute when the container starts. 106 | pub entrypoint: Option>, 107 | 108 | /// Default arguments to the entrypoint of the container. 109 | pub cmd: Option>, 110 | 111 | /// A set of directories describing where the process is likely write data specific to a container instance. 112 | pub volumes: Option>, 113 | 114 | /// Sets the current working directory of the entrypoint process in the container. 115 | pub working_dir: Option, 116 | 117 | /// The field contains arbitrary metadata for the container. 118 | pub labels: Option>, 119 | 120 | /// The field contains the system call signal that will be sent to the container to exit. 121 | pub stop_signal: Option, 122 | } 123 | 124 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 125 | #[serde(rename_all = "PascalCase")] 126 | struct RawImageConfig { 127 | #[serde(skip_serializing_if = "Option::is_none")] 128 | user: Option, 129 | 130 | #[serde(skip_serializing_if = "Option::is_none")] 131 | exposed_ports: Option>, 132 | 133 | #[serde(skip_serializing_if = "Option::is_none")] 134 | env: Option>, 135 | 136 | #[serde(skip_serializing_if = "Option::is_none")] 137 | entrypoint: Option>, 138 | 139 | #[serde(skip_serializing_if = "Option::is_none")] 140 | cmd: Option>, 141 | 142 | #[serde(skip_serializing_if = "Option::is_none")] 143 | volumes: Option>, 144 | 145 | #[serde(skip_serializing_if = "Option::is_none")] 146 | working_dir: Option, 147 | 148 | #[serde(skip_serializing_if = "Option::is_none")] 149 | labels: Option>, 150 | 151 | #[serde(skip_serializing_if = "Option::is_none")] 152 | stop_signal: Option, 153 | } 154 | 155 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 156 | pub struct ImageRootfs { 157 | /// Must be set to `RootfsType::Layers`. 158 | #[serde(rename = "type")] 159 | pub diff_type: RootfsType, 160 | 161 | /// An array of layer content hashes (DiffIDs), in order from first to last. 162 | pub diff_ids: Vec, 163 | } 164 | 165 | #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] 166 | pub struct LayerHistoryItem { 167 | /// A combined date and time at which the layer was created. 168 | #[serde(skip_serializing_if = "Option::is_none")] 169 | pub created: Option>, 170 | 171 | /// The author of the build point. 172 | #[serde(skip_serializing_if = "Option::is_none")] 173 | pub author: Option, 174 | 175 | /// The command which created the layer. 176 | #[serde(skip_serializing_if = "Option::is_none")] 177 | pub created_by: Option, 178 | 179 | /// A custom message set when creating the layer. 180 | #[serde(skip_serializing_if = "Option::is_none")] 181 | pub comment: Option, 182 | 183 | /// This field is used to mark if the history item created a filesystem diff. 184 | /// It is set to true if this history item doesn't correspond to an actual layer in the rootfs section 185 | /// (for example, Dockerfile's ENV command results in no change to the filesystem). 186 | #[serde(skip_serializing_if = "Option::is_none")] 187 | pub empty_layer: Option, 188 | } 189 | 190 | #[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Serialize, Deserialize)] 191 | #[serde(try_from = "String")] 192 | #[serde(into = "String")] 193 | pub enum ExposedPort { 194 | Tcp(u16), 195 | Udp(u16), 196 | } 197 | 198 | impl TryFrom for ExposedPort { 199 | type Error = std::num::ParseIntError; 200 | 201 | fn try_from(value: String) -> Result { 202 | let postfix_len = value.len() - 4; 203 | 204 | match &value[postfix_len..] { 205 | "/tcp" => Ok(ExposedPort::Tcp(value[..postfix_len].parse()?)), 206 | "/udp" => Ok(ExposedPort::Udp(value[..postfix_len].parse()?)), 207 | 208 | _ => Ok(ExposedPort::Tcp(value.parse()?)), 209 | } 210 | } 211 | } 212 | 213 | impl Into for ExposedPort { 214 | fn into(self) -> String { 215 | match self { 216 | ExposedPort::Tcp(port) => format!("{}/tcp", port), 217 | ExposedPort::Udp(port) => format!("{}/udp", port), 218 | } 219 | } 220 | } 221 | 222 | #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] 223 | #[serde(rename_all = "lowercase")] 224 | pub enum RootfsType { 225 | Layers, 226 | } 227 | 228 | #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] 229 | pub enum Signal { 230 | SIGHUP, 231 | SIGINT, 232 | SIGQUIT, 233 | SIGILL, 234 | SIGTRAP, 235 | SIGABRT, 236 | SIGBUS, 237 | SIGFPE, 238 | SIGKILL, 239 | SIGUSR1, 240 | SIGSEGV, 241 | SIGUSR2, 242 | SIGPIPE, 243 | SIGALRM, 244 | SIGTERM, 245 | SIGSTKFLT, 246 | SIGCHLD, 247 | SIGCONT, 248 | SIGSTOP, 249 | SIGTSTP, 250 | SIGTTIN, 251 | SIGTTOU, 252 | SIGURG, 253 | SIGXCPU, 254 | SIGXFSZ, 255 | SIGVTALRM, 256 | SIGPROF, 257 | SIGWINCH, 258 | SIGIO, 259 | SIGPWR, 260 | SIGSYS, 261 | SIGEMT, 262 | SIGINFO, 263 | } 264 | 265 | impl From for ImageConfig { 266 | fn from(raw: RawImageConfig) -> Self { 267 | Self { 268 | user: raw.user, 269 | entrypoint: raw.entrypoint, 270 | cmd: raw.cmd, 271 | working_dir: raw.working_dir, 272 | labels: raw.labels, 273 | stop_signal: raw.stop_signal, 274 | 275 | env: raw.env.map(|inner| { 276 | inner 277 | .into_iter() 278 | .map(|mut pair| match pair.find('=') { 279 | Some(pos) => { 280 | let value = pair.split_off(pos + 1); 281 | let mut name = pair; 282 | name.pop(); 283 | 284 | (name, value) 285 | } 286 | 287 | None => (pair, String::with_capacity(0)), 288 | }) 289 | .collect() 290 | }), 291 | 292 | exposed_ports: raw 293 | .exposed_ports 294 | .map(|inner| inner.into_iter().map(|(port, _)| port).collect()), 295 | 296 | volumes: raw 297 | .volumes 298 | .map(|inner| inner.into_iter().map(|(volume, _)| volume).collect()), 299 | } 300 | } 301 | } 302 | 303 | impl Into for ImageConfig { 304 | fn into(self) -> RawImageConfig { 305 | RawImageConfig { 306 | user: self.user, 307 | entrypoint: self.entrypoint, 308 | cmd: self.cmd, 309 | working_dir: self.working_dir, 310 | labels: self.labels, 311 | stop_signal: self.stop_signal, 312 | 313 | env: self.env.map(|inner| { 314 | inner 315 | .into_iter() 316 | .map(|(key, value)| format!("{}={}", key, value)) 317 | .collect() 318 | }), 319 | 320 | exposed_ports: self.exposed_ports.map(|inner| { 321 | inner 322 | .into_iter() 323 | .map(|port| (port, Value::Object(Default::default()))) 324 | .collect() 325 | }), 326 | 327 | volumes: self.volumes.map(|inner| { 328 | inner 329 | .into_iter() 330 | .map(|volume| (volume, Value::Object(Default::default()))) 331 | .collect() 332 | }), 333 | } 334 | } 335 | } 336 | 337 | #[test] 338 | fn serialization() { 339 | use pretty_assertions::assert_eq; 340 | 341 | let ref_json = include_str!("../tests/oci-image-spec.json"); 342 | let ref_spec = ImageSpecification { 343 | created: Some("2015-10-31T22:22:56.015925234Z".parse().unwrap()), 344 | author: Some("Alyssa P. Hacker ".into()), 345 | architecture: Architecture::Amd64, 346 | os: OperatingSystem::Linux, 347 | rootfs: Some(ImageRootfs { 348 | diff_type: RootfsType::Layers, 349 | diff_ids: vec![ 350 | "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1".into(), 351 | "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef".into(), 352 | ], 353 | }), 354 | history: Some(vec![ 355 | LayerHistoryItem { 356 | created: Some("2015-10-31T22:22:54.690851953Z".parse().unwrap()), 357 | created_by: Some("/bin/sh -c #(nop) ADD file in /".into()), 358 | author: None, 359 | comment: None, 360 | empty_layer: None, 361 | }, 362 | LayerHistoryItem { 363 | created: Some("2015-10-31T22:22:55.613815829Z".parse().unwrap()), 364 | created_by: Some("/bin/sh -c #(nop) CMD [\"sh\"]".into()), 365 | author: None, 366 | comment: None, 367 | empty_layer: Some(true), 368 | }, 369 | ]), 370 | 371 | config: Some(ImageConfig { 372 | user: Some("alice".into()), 373 | exposed_ports: Some(vec![ExposedPort::Tcp(8080), ExposedPort::Udp(8081)]), 374 | env: Some( 375 | vec![( 376 | String::from("PATH"), 377 | String::from("/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"), 378 | )] 379 | .into_iter() 380 | .collect(), 381 | ), 382 | entrypoint: Some(vec!["/bin/my-app-binary".into()]), 383 | cmd: Some(vec![ 384 | "--foreground".into(), 385 | "--config".into(), 386 | "/etc/my-app.d/default.cfg".into(), 387 | ]), 388 | volumes: Some(vec![ 389 | "/var/job-result-data".into(), 390 | "/var/log/my-app-logs".into(), 391 | ]), 392 | working_dir: Some("/home/alice".into()), 393 | labels: Some( 394 | vec![( 395 | String::from("com.example.project.git.url"), 396 | String::from("https://example.com/project.git"), 397 | )] 398 | .into_iter() 399 | .collect(), 400 | ), 401 | stop_signal: Some(Signal::SIGKILL), 402 | }), 403 | }; 404 | 405 | assert_eq!(serde_json::to_string_pretty(&ref_spec).unwrap(), ref_json); 406 | assert_eq!( 407 | serde_json::from_str::(ref_json).unwrap(), 408 | ref_spec 409 | ); 410 | } 411 | 412 | #[test] 413 | fn min_serialization() { 414 | use pretty_assertions::assert_eq; 415 | 416 | let ref_json = include_str!("../tests/oci-image-spec-min.json"); 417 | let ref_spec = ImageSpecification { 418 | created: None, 419 | author: None, 420 | 421 | architecture: Architecture::Amd64, 422 | os: OperatingSystem::Linux, 423 | rootfs: Some(ImageRootfs { 424 | diff_type: RootfsType::Layers, 425 | diff_ids: vec![ 426 | "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1".into(), 427 | "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef".into(), 428 | ], 429 | }), 430 | 431 | history: None, 432 | config: None, 433 | }; 434 | 435 | assert_eq!(serde_json::to_string_pretty(&ref_spec).unwrap(), ref_json); 436 | assert_eq!( 437 | serde_json::from_str::(ref_json).unwrap(), 438 | ref_spec 439 | ); 440 | } 441 | -------------------------------------------------------------------------------- /buildkit-frontend/src/options/common.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::fmt; 3 | 4 | use buildkit_proto::moby::buildkit::v1::frontend::CacheOptionsEntry as CacheOptionsEntryProto; 5 | use serde::de::{self, Deserializer, SeqAccess, Visitor}; 6 | use serde::Deserialize; 7 | 8 | #[derive(Clone, Debug, Deserialize, PartialEq)] 9 | pub struct CacheOptionsEntry { 10 | #[serde(rename = "Type")] 11 | pub cache_type: CacheType, 12 | 13 | #[serde(rename = "Attrs")] 14 | pub attrs: HashMap, 15 | } 16 | 17 | #[derive(Clone, Debug, Deserialize, PartialEq)] 18 | #[serde(rename_all = "lowercase")] 19 | pub enum CacheType { 20 | Local, 21 | Registry, 22 | Inline, 23 | } 24 | 25 | impl CacheOptionsEntry { 26 | pub fn from_legacy_list<'de, D>(deserializer: D) -> Result, D::Error> 27 | where 28 | D: Deserializer<'de>, 29 | { 30 | struct LegacyVisitor; 31 | 32 | impl<'de> Visitor<'de> for LegacyVisitor { 33 | type Value = Vec; 34 | 35 | fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { 36 | formatter.write_str("sequence") 37 | } 38 | 39 | fn visit_seq(self, map: M) -> Result 40 | where 41 | M: SeqAccess<'de>, 42 | { 43 | Deserialize::deserialize(de::value::SeqAccessDeserializer::new(map)) 44 | } 45 | } 46 | 47 | let legacy_refs = deserializer.deserialize_seq(LegacyVisitor)?; 48 | let new_refs_iter = legacy_refs.into_iter().map(|reference| CacheOptionsEntry { 49 | cache_type: CacheType::Registry, 50 | attrs: vec![(String::from("ref"), reference)].into_iter().collect(), 51 | }); 52 | 53 | Ok(new_refs_iter.collect()) 54 | } 55 | } 56 | 57 | impl Into for CacheOptionsEntry { 58 | fn into(self) -> CacheOptionsEntryProto { 59 | CacheOptionsEntryProto { 60 | r#type: self.cache_type.into(), 61 | attrs: self.attrs, 62 | } 63 | } 64 | } 65 | 66 | impl Into for CacheType { 67 | fn into(self) -> String { 68 | match self { 69 | CacheType::Local => "local".into(), 70 | CacheType::Registry => "registry".into(), 71 | CacheType::Inline => "inline".into(), 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /buildkit-frontend/src/options/default.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | use std::iter::once; 3 | 4 | use either::Either; 5 | use serde::Deserialize; 6 | 7 | #[derive(Debug, PartialEq, Deserialize)] 8 | #[serde(transparent)] 9 | pub struct Options { 10 | inner: BTreeMap, 11 | } 12 | 13 | #[derive(Debug, PartialEq, Deserialize)] 14 | #[serde(untagged)] 15 | enum OptionValue { 16 | Flag(bool), 17 | Single(String), 18 | Multiple(Vec), 19 | } 20 | 21 | impl Options { 22 | pub fn has(&self, name: S) -> bool 23 | where 24 | S: AsRef, 25 | { 26 | match self.inner.get(name.as_ref()) { 27 | Some(container) => match container { 28 | OptionValue::Flag(exists) => *exists, 29 | OptionValue::Single(_) => true, 30 | OptionValue::Multiple(_) => true, 31 | }, 32 | 33 | None => false, 34 | } 35 | } 36 | 37 | pub fn is_flag_set(&self, name: S) -> bool 38 | where 39 | S: AsRef, 40 | { 41 | match self.inner.get(name.as_ref()) { 42 | Some(container) => match container { 43 | OptionValue::Flag(flag) => *flag, 44 | OptionValue::Single(_) => false, 45 | OptionValue::Multiple(_) => false, 46 | }, 47 | 48 | None => false, 49 | } 50 | } 51 | 52 | pub fn has_value(&self, name: S1, value: S2) -> bool 53 | where 54 | S1: AsRef, 55 | S2: AsRef, 56 | { 57 | match self.inner.get(name.as_ref()) { 58 | Some(container) => match container { 59 | OptionValue::Flag(_) => false, 60 | OptionValue::Single(single) => single == value.as_ref(), 61 | OptionValue::Multiple(values) => values.iter().any(|item| item == value.as_ref()), 62 | }, 63 | 64 | None => false, 65 | } 66 | } 67 | 68 | pub fn get(&self, name: S) -> Option<&str> 69 | where 70 | S: AsRef, 71 | { 72 | match self.inner.get(name.as_ref()) { 73 | Some(container) => match container { 74 | OptionValue::Flag(_) => None, 75 | OptionValue::Single(value) => Some(value.as_str()), 76 | OptionValue::Multiple(values) => values.iter().map(String::as_str).next(), 77 | }, 78 | 79 | None => None, 80 | } 81 | } 82 | 83 | pub fn iter(&self, name: S) -> Option> 84 | where 85 | S: AsRef, 86 | { 87 | match self.inner.get(name.as_ref()) { 88 | Some(container) => match container { 89 | OptionValue::Flag(_) => None, 90 | OptionValue::Single(value) => Some(Either::Left(once(value.as_str()))), 91 | OptionValue::Multiple(values) => { 92 | Some(Either::Right(values.iter().map(String::as_str))) 93 | } 94 | }, 95 | 96 | None => None, 97 | } 98 | } 99 | } 100 | 101 | #[cfg(test)] 102 | mod tests { 103 | use super::super::from_env; 104 | use super::*; 105 | 106 | #[test] 107 | fn options_parsing() { 108 | let options = from_env::(into_env(vec![ 109 | "name1", 110 | "name2=true", 111 | "name3=false", 112 | "name4=", 113 | "name5=value", 114 | "name6=de=limiter", 115 | "name7=false,true", 116 | "name8=value1,value2,value3", 117 | "name9=value1,val=ue2,value3", 118 | "build-arg:name10", 119 | "build-arg:name11=value", 120 | ])) 121 | .unwrap(); 122 | 123 | assert_eq!(options.inner["name1"], OptionValue::Flag(true)); 124 | assert_eq!(options.inner["name2"], OptionValue::Flag(true)); 125 | assert_eq!(options.inner["name3"], OptionValue::Flag(false)); 126 | assert_eq!(options.inner["name4"], OptionValue::Flag(true)); 127 | 128 | assert_eq!(options.inner["name5"], OptionValue::Single("value".into())); 129 | assert_eq!( 130 | options.inner["name6"], 131 | OptionValue::Single("de=limiter".into()) 132 | ); 133 | assert_eq!( 134 | options.inner["name7"], 135 | OptionValue::Multiple(vec!["false".into(), "true".into()]) 136 | ); 137 | assert_eq!( 138 | options.inner["name8"], 139 | OptionValue::Multiple(vec!["value1".into(), "value2".into(), "value3".into()]) 140 | ); 141 | assert_eq!( 142 | options.inner["name9"], 143 | OptionValue::Multiple(vec!["value1".into(), "val=ue2".into(), "value3".into()]) 144 | ); 145 | 146 | assert_eq!(options.inner["name10"], OptionValue::Flag(true)); 147 | assert_eq!(options.inner["name11"], OptionValue::Single("value".into())); 148 | } 149 | 150 | #[test] 151 | fn has_method() { 152 | let options = from_env::(into_env(vec![ 153 | "option1", 154 | "option2=true", 155 | "option3=false", 156 | "option4=true,false", 157 | ])) 158 | .unwrap(); 159 | 160 | assert_eq!(options.has("option1"), true); 161 | assert_eq!(options.has("option2"), true); 162 | assert_eq!(options.has("option3"), false); 163 | assert_eq!(options.has("option4"), true); 164 | } 165 | 166 | #[test] 167 | fn has_value_method() { 168 | let options = from_env::(into_env(vec![ 169 | "option1", 170 | "option2=true", 171 | "option3=true,false,any_other", 172 | ])) 173 | .unwrap(); 174 | 175 | assert_eq!(options.has_value("option1", ""), false); 176 | assert_eq!(options.has_value("option1", "any_other"), false); 177 | assert_eq!(options.has_value("option2", ""), false); 178 | assert_eq!(options.has_value("option2", "any_other"), false); 179 | assert_eq!(options.has_value("option3", "true"), true); 180 | assert_eq!(options.has_value("option3", "false"), true); 181 | assert_eq!(options.has_value("option3", "any_other"), true); 182 | assert_eq!(options.has_value("option3", "missing"), false); 183 | } 184 | 185 | #[test] 186 | fn iter_method() { 187 | let options = from_env::(into_env(vec![ 188 | "option1", 189 | "option2=true", 190 | "option3=true,false,any_other", 191 | ])) 192 | .unwrap(); 193 | 194 | assert!(options.iter("option1").is_none()); 195 | assert!(options.iter("option2").is_none()); 196 | assert!(options.iter("option4").is_none()); 197 | 198 | assert!(options.iter("option3").is_some()); 199 | assert_eq!( 200 | options.iter("option3").unwrap().collect::>(), 201 | vec!["true", "false", "any_other"] 202 | ); 203 | } 204 | 205 | fn into_env(args: Vec<&'static str>) -> Vec<(String, String)> { 206 | args.into_iter() 207 | .enumerate() 208 | .map(|(index, option)| { 209 | ( 210 | format!("BUILDKIT_FRONTEND_OPT_{}", index), 211 | String::from(option), 212 | ) 213 | }) 214 | .collect() 215 | } 216 | } 217 | -------------------------------------------------------------------------------- /buildkit-frontend/src/options/deserializer.rs: -------------------------------------------------------------------------------- 1 | use std::io::Cursor; 2 | use std::iter::empty; 3 | 4 | use failure::Error; 5 | use serde::de::value::{MapDeserializer, SeqDeserializer}; 6 | use serde::de::{self, DeserializeOwned, IntoDeserializer, Visitor}; 7 | use serde::forward_to_deserialize_any; 8 | 9 | pub fn from_env(pairs: I) -> Result 10 | where 11 | T: DeserializeOwned, 12 | I: IntoIterator, 13 | { 14 | let owned_pairs = pairs.into_iter().collect::>(); 15 | let pairs = { 16 | owned_pairs.iter().filter_map(|(name, value)| { 17 | if name.starts_with("BUILDKIT_FRONTEND_OPT_") { 18 | Some(value) 19 | } else { 20 | None 21 | } 22 | }) 23 | }; 24 | 25 | let deserializer = EnvDeserializer { 26 | vals: pairs.map(|value| extract_name_and_value(&value)), 27 | }; 28 | 29 | T::deserialize(deserializer).map_err(Error::from) 30 | } 31 | 32 | #[derive(Debug)] 33 | struct EnvDeserializer

{ 34 | vals: P, 35 | } 36 | 37 | #[derive(Debug)] 38 | enum EnvValue<'de> { 39 | Flag, 40 | Json(&'de str), 41 | Text(&'de str), 42 | } 43 | 44 | #[derive(Debug)] 45 | struct EnvItem<'de>(&'de str); 46 | 47 | fn extract_name_and_value(mut raw_value: &str) -> (&str, EnvValue) { 48 | if raw_value.starts_with("build-arg:") { 49 | raw_value = raw_value.trim_start_matches("build-arg:"); 50 | } 51 | 52 | let mut parts = raw_value.splitn(2, '='); 53 | let name = parts.next().unwrap(); 54 | 55 | match parts.next() { 56 | None => (name, EnvValue::Flag), 57 | Some(text) if text.is_empty() => (name, EnvValue::Flag), 58 | Some(text) if &text[0..1] == "[" || &text[0..1] == "{" => (name, EnvValue::Json(text)), 59 | Some(text) => (name, EnvValue::Text(text)), 60 | } 61 | } 62 | 63 | impl<'de> IntoDeserializer<'de, serde::de::value::Error> for EnvValue<'de> { 64 | type Deserializer = Self; 65 | 66 | fn into_deserializer(self) -> Self::Deserializer { 67 | self 68 | } 69 | } 70 | 71 | impl<'de> IntoDeserializer<'de, serde::de::value::Error> for EnvItem<'de> { 72 | type Deserializer = Self; 73 | 74 | fn into_deserializer(self) -> Self::Deserializer { 75 | self 76 | } 77 | } 78 | 79 | impl<'de> EnvItem<'de> { 80 | fn infer>(self, visitor: V) -> Result { 81 | match self.0 { 82 | "true" => visitor.visit_bool(true), 83 | "false" => visitor.visit_bool(false), 84 | 85 | _ => visitor.visit_str(self.0), 86 | } 87 | } 88 | 89 | fn json>(self, visitor: V) -> Result { 90 | use serde::de::Deserializer; 91 | use serde::de::Error; 92 | 93 | serde_json::Deserializer::from_reader(Cursor::new(self.0)) 94 | .deserialize_any(visitor) 95 | .map_err(serde::de::value::Error::custom) 96 | } 97 | } 98 | 99 | impl<'de, P> de::Deserializer<'de> for EnvDeserializer

100 | where 101 | P: Iterator)>, 102 | { 103 | type Error = serde::de::value::Error; 104 | 105 | fn deserialize_any>(self, visitor: V) -> Result { 106 | visitor.visit_map(MapDeserializer::new(self.vals)) 107 | } 108 | 109 | forward_to_deserialize_any! { 110 | bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string 111 | bytes byte_buf option unit unit_struct newtype_struct seq tuple 112 | tuple_struct map struct enum identifier ignored_any 113 | } 114 | } 115 | 116 | // The approach is shamelessly borrowed from https://github.com/softprops/envy/blob/master/src/lib.rs#L113 117 | macro_rules! forward_parsed_values_env_value { 118 | ($($ty:ident => $method:ident,)*) => { 119 | $( 120 | fn $method(self, visitor: V) -> Result 121 | where V: de::Visitor<'de> 122 | { 123 | match self { 124 | EnvValue::Flag => self.deserialize_any(visitor), 125 | EnvValue::Json(_) => self.deserialize_any(visitor), 126 | EnvValue::Text(contents) => { 127 | match contents.parse::<$ty>() { 128 | Ok(val) => val.into_deserializer().$method(visitor), 129 | Err(e) => Err(de::Error::custom(format_args!("{} while parsing value '{}'", e, contents))) 130 | } 131 | } 132 | } 133 | } 134 | )* 135 | } 136 | } 137 | 138 | macro_rules! forward_parsed_values_env_item { 139 | ($($ty:ident => $method:ident,)*) => { 140 | $( 141 | fn $method(self, visitor: V) -> Result 142 | where V: de::Visitor<'de> 143 | { 144 | match self.0.parse::<$ty>() { 145 | Ok(val) => val.into_deserializer().$method(visitor), 146 | Err(e) => Err(de::Error::custom(format_args!("{} while parsing value '{}'", e, self.0))) 147 | } 148 | } 149 | )* 150 | } 151 | } 152 | 153 | impl<'de> de::Deserializer<'de> for EnvValue<'de> { 154 | type Error = serde::de::value::Error; 155 | 156 | fn deserialize_any>(self, visitor: V) -> Result { 157 | match self { 158 | EnvValue::Flag => visitor.visit_bool(true), 159 | EnvValue::Json(contents) => EnvItem(contents).json(visitor), 160 | EnvValue::Text(contents) => { 161 | if !contents.contains(',') { 162 | EnvItem(contents).infer(visitor) 163 | } else { 164 | SeqDeserializer::new(contents.split(',')).deserialize_seq(visitor) 165 | } 166 | } 167 | } 168 | } 169 | 170 | fn deserialize_seq>(self, visitor: V) -> Result { 171 | match self { 172 | EnvValue::Flag => SeqDeserializer::new(empty::<&'de str>()).deserialize_seq(visitor), 173 | EnvValue::Json(contents) => EnvItem(contents).json(visitor), 174 | EnvValue::Text(contents) => { 175 | SeqDeserializer::new(contents.split(',')).deserialize_seq(visitor) 176 | } 177 | } 178 | } 179 | 180 | fn deserialize_option>(self, visitor: V) -> Result { 181 | visitor.visit_some(self) 182 | } 183 | 184 | forward_parsed_values_env_value! { 185 | bool => deserialize_bool, 186 | u8 => deserialize_u8, 187 | u16 => deserialize_u16, 188 | u32 => deserialize_u32, 189 | u64 => deserialize_u64, 190 | u128 => deserialize_u128, 191 | i8 => deserialize_i8, 192 | i16 => deserialize_i16, 193 | i32 => deserialize_i32, 194 | i64 => deserialize_i64, 195 | i128 => deserialize_i128, 196 | f32 => deserialize_f32, 197 | f64 => deserialize_f64, 198 | } 199 | 200 | forward_to_deserialize_any! { 201 | byte_buf 202 | bytes 203 | char 204 | enum 205 | identifier 206 | ignored_any 207 | map 208 | newtype_struct 209 | str 210 | string 211 | struct 212 | tuple 213 | tuple_struct 214 | unit 215 | unit_struct 216 | } 217 | } 218 | 219 | impl<'de> de::Deserializer<'de> for EnvItem<'de> { 220 | type Error = serde::de::value::Error; 221 | 222 | fn deserialize_any>(self, visitor: V) -> Result { 223 | self.0.into_deserializer().deserialize_any(visitor) 224 | } 225 | 226 | fn deserialize_map>(self, visitor: V) -> Result { 227 | self.json(visitor) 228 | } 229 | 230 | fn deserialize_struct>( 231 | self, 232 | _: &'static str, 233 | _: &'static [&'static str], 234 | visitor: V, 235 | ) -> Result { 236 | self.json(visitor) 237 | } 238 | 239 | forward_parsed_values_env_item! { 240 | bool => deserialize_bool, 241 | u8 => deserialize_u8, 242 | u16 => deserialize_u16, 243 | u32 => deserialize_u32, 244 | u64 => deserialize_u64, 245 | u128 => deserialize_u128, 246 | i8 => deserialize_i8, 247 | i16 => deserialize_i16, 248 | i32 => deserialize_i32, 249 | i64 => deserialize_i64, 250 | i128 => deserialize_i128, 251 | f32 => deserialize_f32, 252 | f64 => deserialize_f64, 253 | } 254 | 255 | forward_to_deserialize_any! { 256 | byte_buf 257 | bytes 258 | char 259 | enum 260 | identifier 261 | ignored_any 262 | newtype_struct 263 | option 264 | seq 265 | str 266 | string 267 | tuple 268 | tuple_struct 269 | unit 270 | unit_struct 271 | } 272 | } 273 | -------------------------------------------------------------------------------- /buildkit-frontend/src/options/mod.rs: -------------------------------------------------------------------------------- 1 | mod default; 2 | mod deserializer; 3 | 4 | pub use self::default::Options; 5 | pub use self::deserializer::from_env; 6 | 7 | pub mod common; 8 | 9 | #[cfg(test)] 10 | mod tests { 11 | use std::path::PathBuf; 12 | 13 | use super::*; 14 | use serde::Deserialize; 15 | 16 | #[derive(Debug, Deserialize, PartialEq)] 17 | #[serde(untagged)] 18 | #[serde(field_identifier, rename_all = "lowercase")] 19 | enum Debug { 20 | All, 21 | LLB, 22 | Frontend, 23 | } 24 | 25 | #[derive(Debug, Deserialize, PartialEq)] 26 | #[serde(rename_all = "kebab-case")] 27 | struct CustomOptions { 28 | filename: Option, 29 | verbosity: u32, 30 | 31 | #[serde(default)] 32 | debug: Vec, 33 | 34 | #[serde(default)] 35 | cache_imports: Vec, 36 | } 37 | 38 | #[test] 39 | fn custom_options() { 40 | let env = vec![ 41 | ( 42 | "BUILDKIT_FRONTEND_OPT_0".into(), 43 | "filename=/path/to/Dockerfile".into(), 44 | ), 45 | ( 46 | "BUILDKIT_FRONTEND_OPT_1".into(), 47 | "debug=llb,frontend".into(), 48 | ), 49 | ( 50 | "BUILDKIT_FRONTEND_OPT_2".into(), 51 | r#"cache-imports=[{"Type":"local","Attrs":{"src":"cache"}}]"#.into(), 52 | ), 53 | ( 54 | "BUILDKIT_FRONTEND_OPT_3".into(), 55 | "verbosity=12345678".into(), 56 | ), 57 | ]; 58 | 59 | assert_eq!( 60 | from_env::(env.into_iter()).unwrap(), 61 | CustomOptions { 62 | filename: Some(PathBuf::from("/path/to/Dockerfile")), 63 | verbosity: 12_345_678, 64 | 65 | debug: vec![Debug::LLB, Debug::Frontend], 66 | 67 | cache_imports: vec![common::CacheOptionsEntry { 68 | cache_type: common::CacheType::Local, 69 | attrs: vec![("src".into(), "cache".into())].into_iter().collect() 70 | }], 71 | } 72 | ); 73 | } 74 | 75 | #[test] 76 | fn env_variable_names() { 77 | let env = vec![ 78 | ( 79 | "ANOTHER_OPT_0".into(), 80 | "filename=/path/to/Dockerfile".into(), 81 | ), 82 | ( 83 | "ANOTHER_OPT_2".into(), 84 | r#"cache-imports=[{"Type":"local","Attrs":{"src":"cache"}}]"#.into(), 85 | ), 86 | ("BUILDKIT_FRONTEND_OPT_1".into(), "debug=all".into()), 87 | ( 88 | "BUILDKIT_FRONTEND_OPT_2".into(), 89 | "verbosity=12345678".into(), 90 | ), 91 | ]; 92 | 93 | assert_eq!( 94 | from_env::(env.into_iter()).unwrap(), 95 | CustomOptions { 96 | filename: None, 97 | verbosity: 12_345_678, 98 | debug: vec![Debug::All], 99 | cache_imports: vec![], 100 | } 101 | ); 102 | } 103 | 104 | #[test] 105 | fn empty_cache() { 106 | let env = vec![ 107 | ("BUILDKIT_FRONTEND_OPT_1".into(), "cache-imports=".into()), 108 | ( 109 | "BUILDKIT_FRONTEND_OPT_2".into(), 110 | "verbosity=12345678".into(), 111 | ), 112 | ]; 113 | 114 | assert_eq!( 115 | from_env::(env.into_iter()).unwrap(), 116 | CustomOptions { 117 | filename: None, 118 | verbosity: 12_345_678, 119 | debug: vec![], 120 | cache_imports: vec![], 121 | } 122 | ); 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /buildkit-frontend/src/stdio.rs: -------------------------------------------------------------------------------- 1 | use std::io::{self, stdin, stdout}; 2 | use std::pin::Pin; 3 | use std::task::{Context, Poll}; 4 | 5 | use pin_project::pin_project; 6 | use tokio::io::*; 7 | use tonic::transport::Uri; 8 | 9 | #[pin_project] 10 | pub struct StdioSocket { 11 | #[pin] 12 | reader: PollEvented, 13 | 14 | #[pin] 15 | writer: PollEvented, 16 | } 17 | 18 | pub async fn stdio_connector(_: Uri) -> io::Result { 19 | StdioSocket::try_new() 20 | } 21 | 22 | impl StdioSocket { 23 | pub fn try_new() -> io::Result { 24 | Ok(StdioSocket { 25 | reader: PollEvented::new(async_stdio::EventedStdin::try_new(stdin())?)?, 26 | writer: PollEvented::new(async_stdio::EventedStdout::try_new(stdout())?)?, 27 | }) 28 | } 29 | } 30 | 31 | impl AsyncRead for StdioSocket { 32 | fn poll_read( 33 | self: Pin<&mut Self>, 34 | cx: &mut Context<'_>, 35 | buf: &mut [u8], 36 | ) -> Poll> { 37 | self.project().reader.poll_read(cx, buf) 38 | } 39 | } 40 | 41 | impl AsyncWrite for StdioSocket { 42 | fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { 43 | self.project().writer.poll_write(cx, buf) 44 | } 45 | 46 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 47 | self.project().writer.poll_flush(cx) 48 | } 49 | 50 | fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 51 | self.project().writer.poll_shutdown(cx) 52 | } 53 | } 54 | 55 | mod async_stdio { 56 | use std::io::{self, Read, Stdin, Stdout, Write}; 57 | use std::os::unix::io::AsRawFd; 58 | 59 | use mio::event::Evented; 60 | use mio::unix::EventedFd; 61 | use mio::{Poll, PollOpt, Ready, Token}; 62 | 63 | use libc::{fcntl, F_GETFL, F_SETFL, O_NONBLOCK}; 64 | 65 | pub struct EventedStdin(Stdin); 66 | pub struct EventedStdout(Stdout); 67 | 68 | impl EventedStdin { 69 | pub fn try_new(stdin: Stdin) -> io::Result { 70 | set_non_blocking_flag(&stdin)?; 71 | 72 | Ok(EventedStdin(stdin)) 73 | } 74 | } 75 | 76 | impl EventedStdout { 77 | pub fn try_new(stdout: Stdout) -> io::Result { 78 | set_non_blocking_flag(&stdout)?; 79 | 80 | Ok(EventedStdout(stdout)) 81 | } 82 | } 83 | 84 | impl Evented for EventedStdin { 85 | fn register( 86 | &self, 87 | poll: &Poll, 88 | token: Token, 89 | interest: Ready, 90 | opts: PollOpt, 91 | ) -> io::Result<()> { 92 | EventedFd(&self.0.as_raw_fd()).register(poll, token, interest, opts) 93 | } 94 | 95 | fn reregister( 96 | &self, 97 | poll: &Poll, 98 | token: Token, 99 | interest: Ready, 100 | opts: PollOpt, 101 | ) -> io::Result<()> { 102 | EventedFd(&self.0.as_raw_fd()).reregister(poll, token, interest, opts) 103 | } 104 | 105 | fn deregister(&self, poll: &Poll) -> io::Result<()> { 106 | EventedFd(&self.0.as_raw_fd()).deregister(poll) 107 | } 108 | } 109 | 110 | impl Read for EventedStdin { 111 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 112 | self.0.read(buf) 113 | } 114 | } 115 | 116 | impl Evented for EventedStdout { 117 | fn register( 118 | &self, 119 | poll: &Poll, 120 | token: Token, 121 | interest: Ready, 122 | opts: PollOpt, 123 | ) -> io::Result<()> { 124 | EventedFd(&self.0.as_raw_fd()).register(poll, token, interest, opts) 125 | } 126 | 127 | fn reregister( 128 | &self, 129 | poll: &Poll, 130 | token: Token, 131 | interest: Ready, 132 | opts: PollOpt, 133 | ) -> io::Result<()> { 134 | EventedFd(&self.0.as_raw_fd()).reregister(poll, token, interest, opts) 135 | } 136 | 137 | fn deregister(&self, poll: &Poll) -> io::Result<()> { 138 | EventedFd(&self.0.as_raw_fd()).deregister(poll) 139 | } 140 | } 141 | 142 | impl Write for EventedStdout { 143 | fn write(&mut self, buf: &[u8]) -> io::Result { 144 | self.0.write(buf) 145 | } 146 | 147 | fn flush(&mut self) -> io::Result<()> { 148 | self.0.flush() 149 | } 150 | } 151 | 152 | fn set_non_blocking_flag(stream: &T) -> io::Result<()> { 153 | let flags = unsafe { fcntl(stream.as_raw_fd(), F_GETFL, 0) }; 154 | 155 | if flags < 0 { 156 | return Err(std::io::Error::last_os_error()); 157 | } 158 | 159 | if unsafe { fcntl(stream.as_raw_fd(), F_SETFL, flags | O_NONBLOCK) } != 0 { 160 | return Err(std::io::Error::last_os_error()); 161 | } 162 | 163 | Ok(()) 164 | } 165 | } 166 | -------------------------------------------------------------------------------- /buildkit-frontend/src/utils.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | use failure::Error; 4 | 5 | #[derive(Clone, Debug)] 6 | pub struct OutputRef(pub(crate) String); 7 | 8 | pub struct ErrorWithCauses(pub Error, &'static str); 9 | 10 | impl ErrorWithCauses { 11 | pub fn multi_line(error: Error) -> Self { 12 | Self(error, "\n caused by: ") 13 | } 14 | 15 | pub fn single_line(error: Error) -> Self { 16 | Self(error, " => caused by: ") 17 | } 18 | 19 | pub fn into_inner(self) -> Error { 20 | self.0 21 | } 22 | } 23 | 24 | impl fmt::Display for ErrorWithCauses { 25 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 26 | write!(f, "{}", self.0)?; 27 | 28 | for cause in self.0.iter_causes() { 29 | write!(f, "{}{}", self.1, cause)?; 30 | } 31 | 32 | Ok(()) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /buildkit-frontend/tests/oci-image-spec-min.json: -------------------------------------------------------------------------------- 1 | { 2 | "architecture": "amd64", 3 | "os": "linux", 4 | "rootfs": { 5 | "type": "layers", 6 | "diff_ids": [ 7 | "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", 8 | "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" 9 | ] 10 | } 11 | } -------------------------------------------------------------------------------- /buildkit-frontend/tests/oci-image-spec.json: -------------------------------------------------------------------------------- 1 | { 2 | "created": "2015-10-31T22:22:56.015925234Z", 3 | "author": "Alyssa P. Hacker ", 4 | "architecture": "amd64", 5 | "os": "linux", 6 | "config": { 7 | "User": "alice", 8 | "ExposedPorts": { 9 | "8080/tcp": {}, 10 | "8081/udp": {} 11 | }, 12 | "Env": [ 13 | "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" 14 | ], 15 | "Entrypoint": [ 16 | "/bin/my-app-binary" 17 | ], 18 | "Cmd": [ 19 | "--foreground", 20 | "--config", 21 | "/etc/my-app.d/default.cfg" 22 | ], 23 | "Volumes": { 24 | "/var/job-result-data": {}, 25 | "/var/log/my-app-logs": {} 26 | }, 27 | "WorkingDir": "/home/alice", 28 | "Labels": { 29 | "com.example.project.git.url": "https://example.com/project.git" 30 | }, 31 | "StopSignal": "SIGKILL" 32 | }, 33 | "rootfs": { 34 | "type": "layers", 35 | "diff_ids": [ 36 | "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", 37 | "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" 38 | ] 39 | }, 40 | "history": [ 41 | { 42 | "created": "2015-10-31T22:22:54.690851953Z", 43 | "created_by": "/bin/sh -c #(nop) ADD file in /" 44 | }, 45 | { 46 | "created": "2015-10-31T22:22:55.613815829Z", 47 | "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", 48 | "empty_layer": true 49 | } 50 | ] 51 | } -------------------------------------------------------------------------------- /buildkit-llb/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | ## [Unreleased] 8 | 9 | ## [0.2.0] - 2020-03-04 10 | ### Changed 11 | - Update `buildkit-proto` dependency to use `tonic` for gRPC. 12 | 13 | ## [0.1.3] - 2020-01-24 14 | ### Added 15 | - `Mount::OptionalSshAgent` to mount the host SSH agent socket with `docker build --ssh=default`. 16 | 17 | ## [0.1.2] - 2019-11-20 18 | ### Added 19 | - `ImageSource::with_tag` method. 20 | 21 | ### Changed 22 | - `Source::image` behavior to conform Docker. 23 | 24 | ## [0.1.1] - 2019-10-22 25 | ### Added 26 | - `GitSource::with_reference` method. 27 | - HTTP source. 28 | 29 | ## [0.1.0] - 2019-09-24 30 | Initial release. 31 | -------------------------------------------------------------------------------- /buildkit-llb/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildkit-llb" 3 | version = "0.2.0" 4 | authors = ["Denys Zariaiev "] 5 | edition = "2018" 6 | 7 | description = "Idiomatic high-level API to create BuildKit LLB graphs" 8 | documentation = "https://docs.rs/buildkit-llb" 9 | repository = "https://github.com/denzp/rust-buildkit" 10 | readme = "README.md" 11 | keywords = ["buildkit", "docker", "llb"] 12 | categories = ["development-tools::build-utils", "api-bindings"] 13 | license = "MIT/Apache-2.0" 14 | 15 | [dependencies] 16 | either = "1.5" 17 | failure = "0.1" 18 | lazy_static = "1" 19 | log = "0.4" 20 | prost = "0.6" 21 | regex = "1" 22 | serde_json = "1.0" 23 | sha2 = "0.8" 24 | 25 | [dependencies.buildkit-proto] 26 | version = "0.2" 27 | path = "../buildkit-proto" 28 | -------------------------------------------------------------------------------- /buildkit-llb/README.md: -------------------------------------------------------------------------------- 1 | `buildkit-llb` - high-level API to create BuildKit LLB graphs 2 | ======= 3 | 4 | [![Actions Status]][Actions Link] 5 | [![buildkit-llb Crates Badge]][buildkit-llb Crates Link] 6 | [![buildkit-llb Docs Badge]][buildkit-llb Docs Link] 7 | 8 | # Usage 9 | 10 | Please check [docs][buildkit-llb Docs Link] or examples on how to use the crate. 11 | 12 | The LLB graph from stdout can easily be used with `buildctl`: 13 | ``` 14 | cargo run --example=scratch | buildctl build 15 | ``` 16 | 17 | # License 18 | 19 | `buildkit-llb` is primarily distributed under the terms of both the MIT license and 20 | the Apache License (Version 2.0), with portions covered by various BSD-like 21 | licenses. 22 | 23 | See LICENSE-APACHE, and LICENSE-MIT for details. 24 | 25 | # Contribution 26 | 27 | Unless you explicitly state otherwise, any contribution intentionally submitted 28 | for inclusion in `buildkit-llb` by you, as defined in the Apache-2.0 license, 29 | shall be dual licensed as above, without any additional terms or conditions. 30 | 31 | [Actions Link]: https://github.com/denzp/rust-buildkit/actions 32 | [Actions Status]: https://github.com/denzp/rust-buildkit/workflows/CI/badge.svg 33 | [buildkit-llb Docs Badge]: https://docs.rs/buildkit-llb/badge.svg 34 | [buildkit-llb Docs Link]: https://docs.rs/buildkit-llb/ 35 | [buildkit-llb Crates Badge]: https://img.shields.io/crates/v/buildkit-llb.svg 36 | [buildkit-llb Crates Link]: https://crates.io/crates/buildkit-llb 37 | -------------------------------------------------------------------------------- /buildkit-llb/examples/highly-parallel.rs: -------------------------------------------------------------------------------- 1 | use std::io::stdout; 2 | 3 | use buildkit_llb::ops::source::ImageSource; 4 | use buildkit_llb::prelude::*; 5 | 6 | fn main() { 7 | let image = Source::image("library/alpine:latest"); 8 | let commands = build_init_commands(&image); 9 | let commands = build_modify_commands(&image, commands); 10 | 11 | let base_fs = FileSystem::sequence() 12 | .custom_name("assemble outputs") 13 | .append(FileSystem::mkdir( 14 | OutputIdx(0), 15 | LayerPath::Scratch("/files"), 16 | )); 17 | 18 | let (final_fs, final_output) = 19 | commands 20 | .into_iter() 21 | .zip(0..) 22 | .fold((base_fs, 0), |(fs, last_output), (output, idx)| { 23 | let layer = fs.append( 24 | FileSystem::copy() 25 | .from(LayerPath::Other(output, format!("/file-{}.out", idx))) 26 | .to( 27 | OutputIdx(idx + 1), 28 | LayerPath::Own( 29 | OwnOutputIdx(last_output), 30 | format!("/files/file-{}.out", idx), 31 | ), 32 | ), 33 | ); 34 | 35 | (layer, idx + 1) 36 | }); 37 | 38 | Terminal::with(final_fs.output(final_output)) 39 | .write_definition(stdout()) 40 | .unwrap() 41 | } 42 | 43 | fn build_init_commands(image: &ImageSource) -> Vec { 44 | (0..100) 45 | .map(|idx| { 46 | let base_dir = format!("/file/{}", idx); 47 | let shell = format!("echo 'test {}' > /out{}/file.out", idx, base_dir); 48 | 49 | let output_mount = FileSystem::mkdir(OutputIdx(0), LayerPath::Scratch(&base_dir)) 50 | .make_parents(true) 51 | .into_operation() 52 | .ignore_cache(true) 53 | .ref_counted(); 54 | 55 | Command::run("/bin/sh") 56 | .args(&["-c", &shell]) 57 | .mount(Mount::ReadOnlyLayer(image.output(), "/")) 58 | .mount(Mount::Layer(OutputIdx(0), output_mount.output(0), "/out")) 59 | .ignore_cache(true) 60 | .ref_counted() 61 | .output(0) 62 | }) 63 | .collect() 64 | } 65 | 66 | fn build_modify_commands<'a>( 67 | image: &'a ImageSource, 68 | layers: Vec>, 69 | ) -> Vec> { 70 | layers 71 | .into_iter() 72 | .zip(0..) 73 | .map(|(output, idx)| { 74 | let shell = format!( 75 | "sed s/test/modified/ < /in/file/{}/file.in > /out/file-{}.out", 76 | idx, idx 77 | ); 78 | 79 | Command::run("/bin/sh") 80 | .args(&["-c", &shell]) 81 | .mount(Mount::ReadOnlyLayer(image.output(), "/")) 82 | .mount(Mount::Scratch(OutputIdx(0), "/out")) 83 | .mount(Mount::ReadOnlySelector( 84 | output, 85 | format!("/in/file/{}/file.in", idx), 86 | format!("file/{}/file.out", idx), 87 | )) 88 | .ignore_cache(true) 89 | .ref_counted() 90 | .output(0) 91 | }) 92 | .collect() 93 | } 94 | -------------------------------------------------------------------------------- /buildkit-llb/examples/network.rs: -------------------------------------------------------------------------------- 1 | use std::io::stdout; 2 | 3 | use buildkit_llb::prelude::*; 4 | 5 | fn main() { 6 | let bitflags_archive = Source::http("https://crates.io/api/v1/crates/bitflags/1.0.4/download") 7 | .with_file_name("bitflags.tar"); 8 | 9 | let alpine = Source::image("library/alpine:latest"); 10 | let bitflags_unpacked = { 11 | Command::run("/bin/tar") 12 | .args(&[ 13 | "-xvzC", 14 | "/out", 15 | "--strip-components=1", 16 | "-f", 17 | "/in/bitflags.tar", 18 | ]) 19 | .mount(Mount::ReadOnlyLayer(alpine.output(), "/")) 20 | .mount(Mount::ReadOnlyLayer(bitflags_archive.output(), "/in")) 21 | .mount(Mount::Scratch(OutputIdx(0), "/out")) 22 | }; 23 | 24 | let env_logger_repo = Source::git("https://github.com/sebasmagri/env_logger.git") 25 | .with_reference("ebf4829f3c04ce9b6d3e5d59fa8770bb71bffca3"); 26 | 27 | let fs = { 28 | FileSystem::sequence() 29 | .append( 30 | FileSystem::copy() 31 | .from(LayerPath::Other(bitflags_unpacked.output(0), "/Cargo.toml")) 32 | .to(OutputIdx(0), LayerPath::Scratch("/bitflags.toml")), 33 | ) 34 | .append( 35 | FileSystem::copy() 36 | .from(LayerPath::Other(env_logger_repo.output(), "/Cargo.toml")) 37 | .to( 38 | OutputIdx(1), 39 | LayerPath::Own(OwnOutputIdx(0), "/env_logger.toml"), 40 | ), 41 | ) 42 | }; 43 | 44 | Terminal::with(fs.output(1)) 45 | .write_definition(stdout()) 46 | .unwrap() 47 | } 48 | -------------------------------------------------------------------------------- /buildkit-llb/examples/scratch-owned.rs: -------------------------------------------------------------------------------- 1 | use std::io::stdout; 2 | 3 | use buildkit_llb::prelude::*; 4 | 5 | fn main() { 6 | Terminal::with(build_graph()) 7 | .write_definition(stdout()) 8 | .unwrap() 9 | } 10 | 11 | fn build_graph() -> OperationOutput<'static> { 12 | let builder_image = Source::image("library/alpine:latest") 13 | .custom_name("Using alpine:latest as a builder") 14 | .ref_counted(); 15 | 16 | let command = { 17 | Command::run("/bin/sh") 18 | .args(&["-c", "echo 'test string 5' > /out/file0"]) 19 | .custom_name("create a dummy file") 20 | .mount(Mount::ReadOnlyLayer(builder_image.output(), "/")) 21 | .mount(Mount::Scratch(OutputIdx(0), "/out")) 22 | .ref_counted() 23 | }; 24 | 25 | let fs = { 26 | FileSystem::sequence() 27 | .custom_name("do multiple file system manipulations") 28 | .append( 29 | FileSystem::copy() 30 | .from(LayerPath::Other(command.output(0), "/file0")) 31 | .to(OutputIdx(0), LayerPath::Other(command.output(0), "/file1")), 32 | ) 33 | .append( 34 | FileSystem::copy() 35 | .from(LayerPath::Own(OwnOutputIdx(0), "/file0")) 36 | .to(OutputIdx(1), LayerPath::Own(OwnOutputIdx(0), "/file2")), 37 | ) 38 | }; 39 | 40 | fs.ref_counted().output(1) 41 | } 42 | -------------------------------------------------------------------------------- /buildkit-llb/examples/scratch.rs: -------------------------------------------------------------------------------- 1 | use std::io::stdout; 2 | 3 | use buildkit_llb::prelude::*; 4 | 5 | fn main() { 6 | let builder_image = 7 | Source::image("library/alpine:latest").custom_name("Using alpine:latest as a builder"); 8 | 9 | let command = { 10 | Command::run("/bin/sh") 11 | .args(&["-c", "echo 'test string 5' > /out/file0"]) 12 | .custom_name("create a dummy file") 13 | .mount(Mount::ReadOnlyLayer(builder_image.output(), "/")) 14 | .mount(Mount::Scratch(OutputIdx(0), "/out")) 15 | }; 16 | 17 | let fs = { 18 | FileSystem::sequence() 19 | .custom_name("do multiple file system manipulations") 20 | .append( 21 | FileSystem::copy() 22 | .from(LayerPath::Other(command.output(0), "/file0")) 23 | .to(OutputIdx(0), LayerPath::Other(command.output(0), "/file1")), 24 | ) 25 | .append( 26 | FileSystem::copy() 27 | .from(LayerPath::Own(OwnOutputIdx(0), "/file0")) 28 | .to(OutputIdx(1), LayerPath::Own(OwnOutputIdx(0), "/file2")), 29 | ) 30 | }; 31 | 32 | Terminal::with(fs.output(1)) 33 | .write_definition(stdout()) 34 | .unwrap() 35 | } 36 | -------------------------------------------------------------------------------- /buildkit-llb/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(warnings)] 2 | #![deny(clippy::all)] 3 | 4 | // FIXME: get rid of the unwraps 5 | // TODO: implement warnings for op hash collisions (will incredibly help to debug problems). 6 | // TODO: implement efficient `std::fmt::Debug` for the ops (naive implementation can't handle huge nested graphs). 7 | 8 | mod serialization; 9 | 10 | /// Supported operations - building blocks of the LLB definition graph. 11 | pub mod ops; 12 | 13 | /// Various helpers and types. 14 | pub mod utils; 15 | 16 | /// Convenient re-export of a commonly used things. 17 | pub mod prelude { 18 | pub use crate::ops::exec::Mount; 19 | pub use crate::ops::fs::LayerPath; 20 | pub use crate::ops::source::ResolveMode; 21 | pub use crate::ops::*; 22 | pub use crate::utils::{OperationOutput, OutputIdx, OwnOutputIdx}; 23 | } 24 | -------------------------------------------------------------------------------- /buildkit-llb/src/ops/exec/command.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::iter::{empty, once}; 3 | use std::path::{Path, PathBuf}; 4 | use std::sync::Arc; 5 | 6 | use buildkit_proto::pb::{ 7 | self, op::Op, ExecOp, Input, MountType, NetMode, OpMetadata, SecurityMode, 8 | }; 9 | use either::Either; 10 | 11 | use super::context::Context; 12 | use super::mount::Mount; 13 | 14 | use crate::ops::{MultiBorrowedOutput, MultiOwnedOutput, OperationBuilder}; 15 | use crate::serialization::{Context as SerializationCtx, Node, Operation, OperationId, Result}; 16 | use crate::utils::{OperationOutput, OutputIdx}; 17 | 18 | /// Command execution operation. This is what a Dockerfile's `RUN` directive is translated to. 19 | #[derive(Debug, Clone)] 20 | pub struct Command<'a> { 21 | id: OperationId, 22 | 23 | context: Context, 24 | root_mount: Option>, 25 | other_mounts: Vec>, 26 | 27 | description: HashMap, 28 | caps: HashMap, 29 | ignore_cache: bool, 30 | } 31 | 32 | impl<'a> Command<'a> { 33 | pub fn run(name: S) -> Self 34 | where 35 | S: Into, 36 | { 37 | Self { 38 | id: OperationId::default(), 39 | 40 | context: Context::new(name), 41 | root_mount: None, 42 | other_mounts: vec![], 43 | 44 | description: Default::default(), 45 | caps: Default::default(), 46 | ignore_cache: false, 47 | } 48 | } 49 | 50 | pub fn args(mut self, args: A) -> Self 51 | where 52 | A: IntoIterator, 53 | S: AsRef, 54 | { 55 | self.context.args = args.into_iter().map(|item| item.as_ref().into()).collect(); 56 | self 57 | } 58 | 59 | pub fn env(mut self, name: S, value: Q) -> Self 60 | where 61 | S: AsRef, 62 | Q: AsRef, 63 | { 64 | let env = format!("{}={}", name.as_ref(), value.as_ref()); 65 | 66 | self.context.env.push(env); 67 | self 68 | } 69 | 70 | pub fn env_iter(mut self, iter: I) -> Self 71 | where 72 | I: IntoIterator, 73 | S: AsRef, 74 | Q: AsRef, 75 | { 76 | for (name, value) in iter.into_iter() { 77 | let env = format!("{}={}", name.as_ref(), value.as_ref()); 78 | self.context.env.push(env); 79 | } 80 | 81 | self 82 | } 83 | 84 | pub fn cwd

(mut self, path: P) -> Self 85 | where 86 | P: Into, 87 | { 88 | self.context.cwd = path.into(); 89 | self 90 | } 91 | 92 | pub fn user(mut self, user: S) -> Self 93 | where 94 | S: Into, 95 | { 96 | self.context.user = user.into(); 97 | self 98 | } 99 | 100 | pub fn mount

(mut self, mount: Mount<'a, P>) -> Self 101 | where 102 | P: AsRef, 103 | { 104 | match mount { 105 | Mount::Layer(..) | Mount::ReadOnlyLayer(..) | Mount::Scratch(..) => { 106 | self.caps.insert("exec.mount.bind".into(), true); 107 | } 108 | 109 | Mount::ReadOnlySelector(..) => { 110 | self.caps.insert("exec.mount.bind".into(), true); 111 | self.caps.insert("exec.mount.selector".into(), true); 112 | } 113 | 114 | Mount::SharedCache(..) => { 115 | self.caps.insert("exec.mount.cache".into(), true); 116 | self.caps.insert("exec.mount.cache.sharing".into(), true); 117 | } 118 | 119 | Mount::OptionalSshAgent(..) => { 120 | self.caps.insert("exec.mount.ssh".into(), true); 121 | } 122 | } 123 | 124 | if mount.is_root() { 125 | self.root_mount = Some(mount.into_owned()); 126 | } else { 127 | self.other_mounts.push(mount.into_owned()); 128 | } 129 | 130 | self 131 | } 132 | } 133 | 134 | impl<'a, 'b: 'a> MultiBorrowedOutput<'b> for Command<'b> { 135 | fn output(&'b self, index: u32) -> OperationOutput<'b> { 136 | // TODO: check if the requested index available. 137 | OperationOutput::borrowed(self, OutputIdx(index)) 138 | } 139 | } 140 | 141 | impl<'a> MultiOwnedOutput<'a> for Arc> { 142 | fn output(&self, index: u32) -> OperationOutput<'a> { 143 | // TODO: check if the requested index available. 144 | OperationOutput::owned(self.clone(), OutputIdx(index)) 145 | } 146 | } 147 | 148 | impl<'a> OperationBuilder<'a> for Command<'a> { 149 | fn custom_name(mut self, name: S) -> Self 150 | where 151 | S: Into, 152 | { 153 | self.description 154 | .insert("llb.customname".into(), name.into()); 155 | 156 | self 157 | } 158 | 159 | fn ignore_cache(mut self, ignore: bool) -> Self { 160 | self.ignore_cache = ignore; 161 | self 162 | } 163 | } 164 | 165 | impl<'a> Operation for Command<'a> { 166 | fn id(&self) -> &OperationId { 167 | &self.id 168 | } 169 | 170 | fn serialize(&self, cx: &mut SerializationCtx) -> Result { 171 | let (inputs, mounts): (Vec<_>, Vec<_>) = { 172 | let mut last_input_index = 0; 173 | 174 | self.root_mount 175 | .as_ref() 176 | .into_iter() 177 | .chain(self.other_mounts.iter()) 178 | .map(|mount| { 179 | let inner_mount = match mount { 180 | Mount::ReadOnlyLayer(_, destination) => pb::Mount { 181 | input: last_input_index, 182 | dest: destination.to_string_lossy().into(), 183 | output: -1, 184 | readonly: true, 185 | mount_type: MountType::Bind as i32, 186 | 187 | ..Default::default() 188 | }, 189 | 190 | Mount::ReadOnlySelector(_, destination, source) => pb::Mount { 191 | input: last_input_index, 192 | dest: destination.to_string_lossy().into(), 193 | output: -1, 194 | readonly: true, 195 | selector: source.to_string_lossy().into(), 196 | mount_type: MountType::Bind as i32, 197 | 198 | ..Default::default() 199 | }, 200 | 201 | Mount::Layer(output, _, path) => pb::Mount { 202 | input: last_input_index, 203 | dest: path.to_string_lossy().into(), 204 | output: output.into(), 205 | mount_type: MountType::Bind as i32, 206 | 207 | ..Default::default() 208 | }, 209 | 210 | Mount::Scratch(output, path) => { 211 | let mount = pb::Mount { 212 | input: -1, 213 | dest: path.to_string_lossy().into(), 214 | output: output.into(), 215 | mount_type: MountType::Bind as i32, 216 | 217 | ..Default::default() 218 | }; 219 | 220 | return (Either::Right(empty()), mount); 221 | } 222 | 223 | Mount::SharedCache(path) => { 224 | use buildkit_proto::pb::{CacheOpt, CacheSharingOpt}; 225 | 226 | let mount = pb::Mount { 227 | input: -1, 228 | dest: path.to_string_lossy().into(), 229 | output: -1, 230 | mount_type: MountType::Cache as i32, 231 | 232 | cache_opt: Some(CacheOpt { 233 | id: path.display().to_string(), 234 | sharing: CacheSharingOpt::Shared as i32, 235 | }), 236 | 237 | ..Default::default() 238 | }; 239 | 240 | return (Either::Right(empty()), mount); 241 | } 242 | 243 | Mount::OptionalSshAgent(path) => { 244 | use buildkit_proto::pb::SshOpt; 245 | 246 | let mount = pb::Mount { 247 | input: -1, 248 | dest: path.to_string_lossy().into(), 249 | output: -1, 250 | mount_type: MountType::Ssh as i32, 251 | 252 | ssh_opt: Some(SshOpt { 253 | mode: 0o600, 254 | optional: true, 255 | ..Default::default() 256 | }), 257 | 258 | ..Default::default() 259 | }; 260 | 261 | return (Either::Right(empty()), mount); 262 | } 263 | }; 264 | 265 | let input = match mount { 266 | Mount::ReadOnlyLayer(input, ..) => input, 267 | Mount::ReadOnlySelector(input, ..) => input, 268 | Mount::Layer(_, input, ..) => input, 269 | 270 | Mount::SharedCache(..) => { 271 | unreachable!(); 272 | } 273 | 274 | Mount::Scratch(..) => { 275 | unreachable!(); 276 | } 277 | 278 | Mount::OptionalSshAgent(..) => { 279 | unreachable!(); 280 | } 281 | }; 282 | 283 | let serialized = cx.register(input.operation()).unwrap(); 284 | let input = Input { 285 | digest: serialized.digest.clone(), 286 | index: input.output().into(), 287 | }; 288 | 289 | last_input_index += 1; 290 | 291 | (Either::Left(once(input)), inner_mount) 292 | }) 293 | .unzip() 294 | }; 295 | 296 | let head = pb::Op { 297 | op: Some(Op::Exec(ExecOp { 298 | mounts, 299 | network: NetMode::Unset.into(), 300 | security: SecurityMode::Sandbox.into(), 301 | meta: Some(self.context.clone().into()), 302 | })), 303 | 304 | inputs: inputs.into_iter().flatten().collect(), 305 | 306 | ..Default::default() 307 | }; 308 | 309 | let metadata = OpMetadata { 310 | description: self.description.clone(), 311 | caps: self.caps.clone(), 312 | ignore_cache: self.ignore_cache, 313 | 314 | ..Default::default() 315 | }; 316 | 317 | Ok(Node::new(head, metadata)) 318 | } 319 | } 320 | -------------------------------------------------------------------------------- /buildkit-llb/src/ops/exec/context.rs: -------------------------------------------------------------------------------- 1 | use std::iter::once; 2 | use std::path::PathBuf; 3 | 4 | use buildkit_proto::pb::Meta; 5 | 6 | #[derive(Debug, Clone)] 7 | pub(crate) struct Context { 8 | pub name: String, 9 | pub args: Vec, 10 | pub env: Vec, 11 | 12 | pub cwd: PathBuf, 13 | pub user: String, 14 | } 15 | 16 | impl Context { 17 | pub fn new(name: S) -> Self 18 | where 19 | S: Into, 20 | { 21 | Self { 22 | name: name.into(), 23 | 24 | cwd: PathBuf::from("/"), 25 | user: "root".into(), 26 | 27 | args: vec![], 28 | env: vec![], 29 | } 30 | } 31 | } 32 | 33 | impl Into for Context { 34 | fn into(self) -> Meta { 35 | Meta { 36 | args: { 37 | once(self.name.clone()) 38 | .chain(self.args.iter().cloned()) 39 | .collect() 40 | }, 41 | 42 | env: self.env, 43 | cwd: self.cwd.to_string_lossy().into(), 44 | user: self.user, 45 | 46 | ..Default::default() 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /buildkit-llb/src/ops/exec/mount.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | 3 | use crate::utils::{OperationOutput, OutputIdx}; 4 | 5 | /// Operand of *command execution operation* that specifies how are input sources mounted. 6 | #[derive(Debug, Clone)] 7 | pub enum Mount<'a, P: AsRef> { 8 | /// Read-only output of another operation. 9 | ReadOnlyLayer(OperationOutput<'a>, P), 10 | 11 | /// Read-only output of another operation with a selector. 12 | ReadOnlySelector(OperationOutput<'a>, P, P), 13 | 14 | /// Empty layer that produces an output. 15 | Scratch(OutputIdx, P), 16 | 17 | /// Writable output of another operation. 18 | Layer(OutputIdx, OperationOutput<'a>, P), 19 | 20 | /// Writable persistent cache. 21 | SharedCache(P), 22 | 23 | /// Optional SSH agent socket at the specified path. 24 | OptionalSshAgent(P), 25 | } 26 | 27 | impl<'a, P: AsRef> Mount<'a, P> { 28 | /// Transform the mount into owned variant (basically, with `PathBuf` as the path). 29 | pub fn into_owned(self) -> Mount<'a, PathBuf> { 30 | use Mount::*; 31 | 32 | match self { 33 | ReadOnlySelector(op, path, selector) => { 34 | ReadOnlySelector(op, path.as_ref().into(), selector.as_ref().into()) 35 | } 36 | 37 | ReadOnlyLayer(op, path) => ReadOnlyLayer(op, path.as_ref().into()), 38 | Scratch(output, path) => Scratch(output, path.as_ref().into()), 39 | Layer(output, input, path) => Layer(output, input, path.as_ref().into()), 40 | SharedCache(path) => SharedCache(path.as_ref().into()), 41 | OptionalSshAgent(path) => OptionalSshAgent(path.as_ref().into()), 42 | } 43 | } 44 | 45 | pub fn is_root(&self) -> bool { 46 | use Mount::*; 47 | 48 | let path = match self { 49 | ReadOnlySelector(_, path, ..) => path, 50 | ReadOnlyLayer(_, path) => path, 51 | Scratch(_, path) => path, 52 | Layer(_, _, path) => path, 53 | SharedCache(path) => path, 54 | OptionalSshAgent(_) => return false, 55 | }; 56 | 57 | path.as_ref() == Path::new("/") 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /buildkit-llb/src/ops/fs/copy.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::fmt::Debug; 3 | use std::path::{Path, PathBuf}; 4 | 5 | use buildkit_proto::pb; 6 | 7 | use super::path::{LayerPath, UnsetPath}; 8 | use super::FileOperation; 9 | 10 | use crate::serialization::{Context, Result}; 11 | use crate::utils::OutputIdx; 12 | 13 | #[derive(Debug)] 14 | pub struct CopyOperation { 15 | source: From, 16 | destination: To, 17 | 18 | follow_symlinks: bool, 19 | recursive: bool, 20 | create_path: bool, 21 | wildcard: bool, 22 | 23 | description: HashMap, 24 | caps: HashMap, 25 | } 26 | 27 | type OpWithoutSource = CopyOperation; 28 | type OpWithSource<'a> = CopyOperation, UnsetPath>; 29 | type OpWithDestination<'a> = 30 | CopyOperation, (OutputIdx, LayerPath<'a, PathBuf>)>; 31 | 32 | impl OpWithoutSource { 33 | pub(crate) fn new() -> OpWithoutSource { 34 | let mut caps = HashMap::::new(); 35 | caps.insert("file.base".into(), true); 36 | 37 | CopyOperation { 38 | source: UnsetPath, 39 | destination: UnsetPath, 40 | 41 | follow_symlinks: false, 42 | recursive: false, 43 | create_path: false, 44 | wildcard: false, 45 | 46 | caps, 47 | description: Default::default(), 48 | } 49 | } 50 | 51 | pub fn from

(self, source: LayerPath<'_, P>) -> OpWithSource 52 | where 53 | P: AsRef, 54 | { 55 | CopyOperation { 56 | source: source.into_owned(), 57 | destination: UnsetPath, 58 | 59 | follow_symlinks: self.follow_symlinks, 60 | recursive: self.recursive, 61 | create_path: self.create_path, 62 | wildcard: self.wildcard, 63 | 64 | description: self.description, 65 | caps: self.caps, 66 | } 67 | } 68 | } 69 | 70 | impl<'a> OpWithSource<'a> { 71 | pub fn to

(self, output: OutputIdx, destination: LayerPath<'a, P>) -> OpWithDestination<'a> 72 | where 73 | P: AsRef, 74 | { 75 | CopyOperation { 76 | source: self.source, 77 | destination: (output, destination.into_owned()), 78 | 79 | follow_symlinks: self.follow_symlinks, 80 | recursive: self.recursive, 81 | create_path: self.create_path, 82 | wildcard: self.wildcard, 83 | 84 | description: self.description, 85 | caps: self.caps, 86 | } 87 | } 88 | } 89 | 90 | impl<'a> OpWithDestination<'a> { 91 | pub fn into_operation(self) -> super::sequence::SequenceOperation<'a> { 92 | super::sequence::SequenceOperation::new().append(self) 93 | } 94 | } 95 | 96 | impl CopyOperation 97 | where 98 | From: Debug, 99 | To: Debug, 100 | { 101 | pub fn follow_symlinks(mut self, value: bool) -> Self { 102 | self.follow_symlinks = value; 103 | self 104 | } 105 | 106 | pub fn recursive(mut self, value: bool) -> Self { 107 | self.recursive = value; 108 | self 109 | } 110 | 111 | pub fn create_path(mut self, value: bool) -> Self { 112 | self.create_path = value; 113 | self 114 | } 115 | 116 | pub fn wildcard(mut self, value: bool) -> Self { 117 | self.wildcard = value; 118 | self 119 | } 120 | } 121 | 122 | impl<'a> FileOperation for OpWithDestination<'a> { 123 | fn output(&self) -> i32 { 124 | self.destination.0.into() 125 | } 126 | 127 | fn serialize_inputs(&self, cx: &mut Context) -> Result> { 128 | let mut inputs = if let LayerPath::Other(ref op, ..) = self.source { 129 | let serialized_from_head = cx.register(op.operation())?; 130 | 131 | vec![pb::Input { 132 | digest: serialized_from_head.digest.clone(), 133 | index: op.output().into(), 134 | }] 135 | } else { 136 | vec![] 137 | }; 138 | 139 | if let LayerPath::Other(ref op, ..) = self.destination.1 { 140 | let serialized_to_head = cx.register(op.operation())?; 141 | 142 | inputs.push(pb::Input { 143 | digest: serialized_to_head.digest.clone(), 144 | index: op.output().into(), 145 | }); 146 | } 147 | 148 | Ok(inputs) 149 | } 150 | 151 | fn serialize_action( 152 | &self, 153 | inputs_count: usize, 154 | inputs_offset: usize, 155 | ) -> Result { 156 | let (src_idx, src_offset, src) = match self.source { 157 | LayerPath::Scratch(ref path) => (-1, 0, path.to_string_lossy().into()), 158 | 159 | LayerPath::Other(_, ref path) => { 160 | (inputs_offset as i64, 1, path.to_string_lossy().into()) 161 | } 162 | 163 | LayerPath::Own(ref output, ref path) => { 164 | let output: i64 = output.into(); 165 | 166 | ( 167 | inputs_count as i64 + output, 168 | 0, 169 | path.to_string_lossy().into(), 170 | ) 171 | } 172 | }; 173 | 174 | let (dest_idx, dest) = match self.destination.1 { 175 | LayerPath::Scratch(ref path) => (-1, path.to_string_lossy().into()), 176 | 177 | LayerPath::Other(_, ref path) => ( 178 | inputs_offset as i32 + src_offset, 179 | path.to_string_lossy().into(), 180 | ), 181 | 182 | LayerPath::Own(ref output, ref path) => { 183 | let output: i32 = output.into(); 184 | 185 | (inputs_count as i32 + output, path.to_string_lossy().into()) 186 | } 187 | }; 188 | 189 | Ok(pb::FileAction { 190 | input: i64::from(dest_idx), 191 | secondary_input: src_idx, 192 | 193 | output: i64::from(self.output()), 194 | 195 | action: Some(pb::file_action::Action::Copy(pb::FileActionCopy { 196 | src, 197 | dest, 198 | 199 | follow_symlink: self.follow_symlinks, 200 | dir_copy_contents: self.recursive, 201 | create_dest_path: self.create_path, 202 | allow_wildcard: self.wildcard, 203 | 204 | // TODO: make this configurable 205 | mode: -1, 206 | 207 | // TODO: make this configurable 208 | timestamp: -1, 209 | 210 | ..Default::default() 211 | })), 212 | }) 213 | } 214 | } 215 | -------------------------------------------------------------------------------- /buildkit-llb/src/ops/fs/mkdir.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::path::{Path, PathBuf}; 3 | 4 | use buildkit_proto::pb; 5 | 6 | use super::path::LayerPath; 7 | use super::FileOperation; 8 | 9 | use crate::serialization::{Context, Result}; 10 | use crate::utils::OutputIdx; 11 | 12 | #[derive(Debug)] 13 | pub struct MakeDirOperation<'a> { 14 | path: LayerPath<'a, PathBuf>, 15 | output: OutputIdx, 16 | 17 | make_parents: bool, 18 | 19 | description: HashMap, 20 | caps: HashMap, 21 | } 22 | 23 | impl<'a> MakeDirOperation<'a> { 24 | pub(crate) fn new

(output: OutputIdx, path: LayerPath<'a, P>) -> Self 25 | where 26 | P: AsRef, 27 | { 28 | let mut caps = HashMap::::new(); 29 | caps.insert("file.base".into(), true); 30 | 31 | MakeDirOperation { 32 | path: path.into_owned(), 33 | output, 34 | 35 | make_parents: false, 36 | 37 | caps, 38 | description: Default::default(), 39 | } 40 | } 41 | 42 | pub fn make_parents(mut self, value: bool) -> Self { 43 | self.make_parents = value; 44 | self 45 | } 46 | 47 | pub fn into_operation(self) -> super::sequence::SequenceOperation<'a> { 48 | super::sequence::SequenceOperation::new().append(self) 49 | } 50 | } 51 | 52 | impl<'a> FileOperation for MakeDirOperation<'a> { 53 | fn output(&self) -> i32 { 54 | self.output.into() 55 | } 56 | 57 | fn serialize_inputs(&self, cx: &mut Context) -> Result> { 58 | if let LayerPath::Other(ref op, ..) = self.path { 59 | let serialized_from_head = cx.register(op.operation())?; 60 | 61 | let inputs = vec![pb::Input { 62 | digest: serialized_from_head.digest.clone(), 63 | index: op.output().into(), 64 | }]; 65 | 66 | Ok(inputs) 67 | } else { 68 | Ok(Vec::with_capacity(0)) 69 | } 70 | } 71 | 72 | fn serialize_action( 73 | &self, 74 | inputs_count: usize, 75 | inputs_offset: usize, 76 | ) -> Result { 77 | let (src_idx, path) = match self.path { 78 | LayerPath::Scratch(ref path) => (-1, path.to_string_lossy().into()), 79 | LayerPath::Other(_, ref path) => (inputs_offset as i64, path.to_string_lossy().into()), 80 | 81 | LayerPath::Own(ref output, ref path) => { 82 | let output: i64 = output.into(); 83 | 84 | (inputs_count as i64 + output, path.to_string_lossy().into()) 85 | } 86 | }; 87 | 88 | Ok(pb::FileAction { 89 | input: src_idx, 90 | secondary_input: -1, 91 | 92 | output: i64::from(self.output()), 93 | 94 | action: Some(pb::file_action::Action::Mkdir(pb::FileActionMkDir { 95 | path, 96 | 97 | make_parents: self.make_parents, 98 | 99 | // TODO: make this configurable 100 | mode: -1, 101 | 102 | // TODO: make this configurable 103 | timestamp: -1, 104 | 105 | // TODO: make this configurable 106 | owner: None, 107 | })), 108 | }) 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /buildkit-llb/src/ops/fs/mkfile.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::path::{Path, PathBuf}; 3 | 4 | use buildkit_proto::pb; 5 | 6 | use super::path::LayerPath; 7 | use super::FileOperation; 8 | 9 | use crate::serialization::{Context, Result}; 10 | use crate::utils::OutputIdx; 11 | 12 | #[derive(Debug)] 13 | pub struct MakeFileOperation<'a> { 14 | path: LayerPath<'a, PathBuf>, 15 | output: OutputIdx, 16 | 17 | data: Option>, 18 | 19 | description: HashMap, 20 | caps: HashMap, 21 | } 22 | 23 | impl<'a> MakeFileOperation<'a> { 24 | pub(crate) fn new

(output: OutputIdx, path: LayerPath<'a, P>) -> Self 25 | where 26 | P: AsRef, 27 | { 28 | let mut caps = HashMap::::new(); 29 | caps.insert("file.base".into(), true); 30 | 31 | MakeFileOperation { 32 | path: path.into_owned(), 33 | output, 34 | 35 | data: None, 36 | 37 | caps, 38 | description: Default::default(), 39 | } 40 | } 41 | 42 | pub fn data(mut self, bytes: Vec) -> Self { 43 | self.data = Some(bytes); 44 | self 45 | } 46 | 47 | pub fn into_operation(self) -> super::sequence::SequenceOperation<'a> { 48 | super::sequence::SequenceOperation::new().append(self) 49 | } 50 | } 51 | 52 | impl<'a> FileOperation for MakeFileOperation<'a> { 53 | fn output(&self) -> i32 { 54 | self.output.into() 55 | } 56 | 57 | fn serialize_inputs(&self, cx: &mut Context) -> Result> { 58 | if let LayerPath::Other(ref op, ..) = self.path { 59 | let serialized_from_head = cx.register(op.operation())?; 60 | 61 | let inputs = vec![pb::Input { 62 | digest: serialized_from_head.digest.clone(), 63 | index: op.output().into(), 64 | }]; 65 | 66 | Ok(inputs) 67 | } else { 68 | Ok(Vec::with_capacity(0)) 69 | } 70 | } 71 | 72 | fn serialize_action( 73 | &self, 74 | inputs_count: usize, 75 | inputs_offset: usize, 76 | ) -> Result { 77 | let (src_idx, path) = match self.path { 78 | LayerPath::Scratch(ref path) => (-1, path.to_string_lossy().into()), 79 | LayerPath::Other(_, ref path) => (inputs_offset as i64, path.to_string_lossy().into()), 80 | 81 | LayerPath::Own(ref output, ref path) => { 82 | let output: i64 = output.into(); 83 | 84 | (inputs_count as i64 + output, path.to_string_lossy().into()) 85 | } 86 | }; 87 | 88 | Ok(pb::FileAction { 89 | input: src_idx, 90 | secondary_input: -1, 91 | 92 | output: i64::from(self.output()), 93 | 94 | action: Some(pb::file_action::Action::Mkfile(pb::FileActionMkFile { 95 | path, 96 | 97 | data: self.data.clone().unwrap_or_else(|| Vec::with_capacity(0)), 98 | 99 | // TODO: make this configurable 100 | mode: -1, 101 | 102 | // TODO: make this configurable 103 | timestamp: -1, 104 | 105 | // TODO: make this configurable 106 | owner: None, 107 | })), 108 | }) 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /buildkit-llb/src/ops/fs/path.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | 3 | use crate::utils::{OperationOutput, OwnOutputIdx}; 4 | 5 | /// Internal representation for not yet specified path. 6 | #[derive(Debug)] 7 | pub struct UnsetPath; 8 | 9 | /// Operand of *file system operations* that defines either source or destination layer and a path. 10 | #[derive(Debug)] 11 | pub enum LayerPath<'a, P: AsRef> { 12 | /// References one of the *current operation outputs* and a path. 13 | Own(OwnOutputIdx, P), 14 | 15 | /// References an *output of another operation* and a path. 16 | Other(OperationOutput<'a>, P), 17 | 18 | /// A path in an *empty* layer (equivalent of Dockerfile's scratch source). 19 | Scratch(P), 20 | } 21 | 22 | impl<'a, P: AsRef> LayerPath<'a, P> { 23 | /// Transform the layer path into owned variant (basically, with `PathBuf` as the path). 24 | pub fn into_owned(self) -> LayerPath<'a, PathBuf> { 25 | use LayerPath::*; 26 | 27 | match self { 28 | Other(input, path) => Other(input, path.as_ref().into()), 29 | Own(output, path) => Own(output, path.as_ref().into()), 30 | Scratch(path) => Scratch(path.as_ref().into()), 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /buildkit-llb/src/ops/fs/sequence.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::sync::Arc; 3 | 4 | use buildkit_proto::pb::{self, op::Op}; 5 | 6 | use super::FileOperation; 7 | 8 | use crate::ops::*; 9 | use crate::serialization::{Context, Node, Operation, OperationId, Result}; 10 | use crate::utils::{OperationOutput, OutputIdx}; 11 | 12 | #[derive(Debug)] 13 | pub struct SequenceOperation<'a> { 14 | id: OperationId, 15 | inner: Vec>, 16 | 17 | description: HashMap, 18 | caps: HashMap, 19 | ignore_cache: bool, 20 | } 21 | 22 | impl<'a> SequenceOperation<'a> { 23 | pub(crate) fn new() -> Self { 24 | let mut caps = HashMap::::new(); 25 | caps.insert("file.base".into(), true); 26 | 27 | Self { 28 | id: OperationId::default(), 29 | inner: vec![], 30 | 31 | caps, 32 | description: Default::default(), 33 | ignore_cache: false, 34 | } 35 | } 36 | 37 | pub fn append(mut self, op: T) -> Self 38 | where 39 | T: FileOperation + 'a, 40 | { 41 | // TODO: verify no duplicated outputs 42 | 43 | self.inner.push(Box::new(op)); 44 | self 45 | } 46 | 47 | pub fn last_output_index(&self) -> Option { 48 | // TODO: make sure the `inner` elements have monotonic indexes 49 | 50 | self.inner 51 | .iter() 52 | .filter(|fs| fs.output() >= 0) 53 | .last() 54 | .map(|fs| fs.output() as u32) 55 | } 56 | } 57 | 58 | impl<'a, 'b: 'a> MultiBorrowedOutput<'b> for SequenceOperation<'b> { 59 | fn output(&'b self, index: u32) -> OperationOutput<'b> { 60 | // TODO: check if the requested index available. 61 | OperationOutput::borrowed(self, OutputIdx(index)) 62 | } 63 | } 64 | 65 | impl<'a> MultiOwnedOutput<'a> for Arc> { 66 | fn output(&self, index: u32) -> OperationOutput<'a> { 67 | // TODO: check if the requested index available. 68 | OperationOutput::owned(self.clone(), OutputIdx(index)) 69 | } 70 | } 71 | 72 | impl<'a, 'b: 'a> MultiBorrowedLastOutput<'b> for SequenceOperation<'b> { 73 | fn last_output(&'b self) -> Option> { 74 | self.last_output_index().map(|index| self.output(index)) 75 | } 76 | } 77 | 78 | impl<'a> MultiOwnedLastOutput<'a> for Arc> { 79 | fn last_output(&self) -> Option> { 80 | self.last_output_index().map(|index| self.output(index)) 81 | } 82 | } 83 | 84 | impl<'a> OperationBuilder<'a> for SequenceOperation<'a> { 85 | fn custom_name(mut self, name: S) -> Self 86 | where 87 | S: Into, 88 | { 89 | self.description 90 | .insert("llb.customname".into(), name.into()); 91 | 92 | self 93 | } 94 | 95 | fn ignore_cache(mut self, ignore: bool) -> Self { 96 | self.ignore_cache = ignore; 97 | self 98 | } 99 | } 100 | 101 | impl<'a> Operation for SequenceOperation<'a> { 102 | fn id(&self) -> &OperationId { 103 | &self.id 104 | } 105 | 106 | fn serialize(&self, cx: &mut Context) -> Result { 107 | let mut inputs = vec![]; 108 | let mut input_offsets = vec![]; 109 | 110 | for item in &self.inner { 111 | let mut inner_inputs = item.serialize_inputs(cx)?; 112 | 113 | input_offsets.push(inputs.len()); 114 | inputs.append(&mut inner_inputs); 115 | } 116 | 117 | let mut actions = vec![]; 118 | 119 | for (item, offset) in self.inner.iter().zip(input_offsets.into_iter()) { 120 | actions.push(item.serialize_action(inputs.len(), offset)?); 121 | } 122 | 123 | let head = pb::Op { 124 | inputs, 125 | op: Some(Op::File(pb::FileOp { actions })), 126 | 127 | ..Default::default() 128 | }; 129 | 130 | let metadata = pb::OpMetadata { 131 | description: self.description.clone(), 132 | caps: self.caps.clone(), 133 | ignore_cache: self.ignore_cache, 134 | 135 | ..Default::default() 136 | }; 137 | 138 | Ok(Node::new(head, metadata)) 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /buildkit-llb/src/ops/mod.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | pub mod exec; 4 | pub mod fs; 5 | pub mod source; 6 | pub mod terminal; 7 | 8 | pub use self::exec::Command; 9 | pub use self::fs::FileSystem; 10 | pub use self::source::Source; 11 | pub use self::terminal::Terminal; 12 | 13 | use crate::utils::OperationOutput; 14 | 15 | pub trait MultiBorrowedOutput<'a> { 16 | fn output(&'a self, number: u32) -> OperationOutput<'a>; 17 | } 18 | 19 | pub trait MultiBorrowedLastOutput<'a> { 20 | fn last_output(&'a self) -> Option>; 21 | } 22 | 23 | pub trait MultiOwnedOutput<'a> { 24 | fn output(&self, number: u32) -> OperationOutput<'a>; 25 | } 26 | 27 | pub trait MultiOwnedLastOutput<'a> { 28 | fn last_output(&self) -> Option>; 29 | } 30 | 31 | pub trait SingleBorrowedOutput<'a> { 32 | fn output(&'a self) -> OperationOutput<'a>; 33 | } 34 | 35 | pub trait SingleOwnedOutput<'a> { 36 | fn output(&self) -> OperationOutput<'a>; 37 | } 38 | 39 | /// Common operation methods. 40 | pub trait OperationBuilder<'a> { 41 | /// Sets an operation display name. 42 | fn custom_name(self, name: S) -> Self 43 | where 44 | S: Into; 45 | 46 | /// Sets caching behavior. 47 | fn ignore_cache(self, ignore: bool) -> Self; 48 | 49 | /// Convert the operation into `Arc` so it can be shared when efficient borrowing is not possible. 50 | fn ref_counted(self) -> Arc 51 | where 52 | Self: Sized + 'a, 53 | { 54 | Arc::new(self) 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /buildkit-llb/src/ops/source/git.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::sync::Arc; 3 | 4 | use buildkit_proto::pb::{self, op::Op, OpMetadata, SourceOp}; 5 | 6 | use crate::ops::{OperationBuilder, SingleBorrowedOutput, SingleOwnedOutput}; 7 | use crate::serialization::{Context, Node, Operation, OperationId, Result}; 8 | use crate::utils::{OperationOutput, OutputIdx}; 9 | 10 | #[derive(Default, Debug)] 11 | pub struct GitSource { 12 | id: OperationId, 13 | remote: String, 14 | reference: Option, 15 | description: HashMap, 16 | ignore_cache: bool, 17 | } 18 | 19 | impl GitSource { 20 | pub(crate) fn new(url: S) -> Self 21 | where 22 | S: Into, 23 | { 24 | let mut raw_url = url.into(); 25 | let remote = if raw_url.starts_with("http://") { 26 | raw_url.split_off(7) 27 | } else if raw_url.starts_with("https://") { 28 | raw_url.split_off(8) 29 | } else if raw_url.starts_with("git://") { 30 | raw_url.split_off(6) 31 | } else if raw_url.starts_with("git@") { 32 | raw_url.split_off(4) 33 | } else { 34 | raw_url 35 | }; 36 | 37 | Self { 38 | id: OperationId::default(), 39 | remote, 40 | reference: None, 41 | description: Default::default(), 42 | ignore_cache: false, 43 | } 44 | } 45 | } 46 | 47 | impl GitSource { 48 | pub fn with_reference(mut self, reference: S) -> Self 49 | where 50 | S: Into, 51 | { 52 | self.reference = Some(reference.into()); 53 | self 54 | } 55 | } 56 | 57 | impl<'a> SingleBorrowedOutput<'a> for GitSource { 58 | fn output(&'a self) -> OperationOutput<'a> { 59 | OperationOutput::borrowed(self, OutputIdx(0)) 60 | } 61 | } 62 | 63 | impl<'a> SingleOwnedOutput<'static> for Arc { 64 | fn output(&self) -> OperationOutput<'static> { 65 | OperationOutput::owned(self.clone(), OutputIdx(0)) 66 | } 67 | } 68 | 69 | impl OperationBuilder<'static> for GitSource { 70 | fn custom_name(mut self, name: S) -> Self 71 | where 72 | S: Into, 73 | { 74 | self.description 75 | .insert("llb.customname".into(), name.into()); 76 | 77 | self 78 | } 79 | 80 | fn ignore_cache(mut self, ignore: bool) -> Self { 81 | self.ignore_cache = ignore; 82 | self 83 | } 84 | } 85 | 86 | impl Operation for GitSource { 87 | fn id(&self) -> &OperationId { 88 | &self.id 89 | } 90 | 91 | fn serialize(&self, _: &mut Context) -> Result { 92 | let identifier = if let Some(ref reference) = self.reference { 93 | format!("git://{}#{}", self.remote, reference) 94 | } else { 95 | format!("git://{}", self.remote) 96 | }; 97 | 98 | let head = pb::Op { 99 | op: Some(Op::Source(SourceOp { 100 | identifier, 101 | attrs: Default::default(), 102 | })), 103 | 104 | ..Default::default() 105 | }; 106 | 107 | let metadata = OpMetadata { 108 | description: self.description.clone(), 109 | ignore_cache: self.ignore_cache, 110 | 111 | ..Default::default() 112 | }; 113 | 114 | Ok(Node::new(head, metadata)) 115 | } 116 | } 117 | 118 | #[test] 119 | fn serialization() { 120 | crate::check_op!( 121 | GitSource::new("any.url"), 122 | |digest| { "sha256:ecde982e19ace932e5474e57b0ca71ba690ed7d28abff2a033e8f969e22bf2d8" }, 123 | |description| { vec![] }, 124 | |caps| { vec![] }, 125 | |cached_tail| { vec![] }, 126 | |inputs| { vec![] }, 127 | |op| { 128 | Op::Source(SourceOp { 129 | identifier: "git://any.url".into(), 130 | attrs: Default::default(), 131 | }) 132 | }, 133 | ); 134 | 135 | crate::check_op!( 136 | GitSource::new("any.url").custom_name("git custom name"), 137 | |digest| { "sha256:ecde982e19ace932e5474e57b0ca71ba690ed7d28abff2a033e8f969e22bf2d8" }, 138 | |description| { vec![("llb.customname", "git custom name")] }, 139 | |caps| { vec![] }, 140 | |cached_tail| { vec![] }, 141 | |inputs| { vec![] }, 142 | |op| { 143 | Op::Source(SourceOp { 144 | identifier: "git://any.url".into(), 145 | attrs: Default::default(), 146 | }) 147 | }, 148 | ); 149 | } 150 | 151 | #[test] 152 | fn prefixes() { 153 | crate::check_op!( 154 | GitSource::new("http://any.url"), 155 | |digest| { "sha256:ecde982e19ace932e5474e57b0ca71ba690ed7d28abff2a033e8f969e22bf2d8" }, 156 | |description| { vec![] }, 157 | |caps| { vec![] }, 158 | |cached_tail| { vec![] }, 159 | |inputs| { vec![] }, 160 | |op| { 161 | Op::Source(SourceOp { 162 | identifier: "git://any.url".into(), 163 | attrs: Default::default(), 164 | }) 165 | }, 166 | ); 167 | 168 | crate::check_op!( 169 | GitSource::new("https://any.url"), 170 | |digest| { "sha256:ecde982e19ace932e5474e57b0ca71ba690ed7d28abff2a033e8f969e22bf2d8" }, 171 | |description| { vec![] }, 172 | |caps| { vec![] }, 173 | |cached_tail| { vec![] }, 174 | |inputs| { vec![] }, 175 | |op| { 176 | Op::Source(SourceOp { 177 | identifier: "git://any.url".into(), 178 | attrs: Default::default(), 179 | }) 180 | }, 181 | ); 182 | 183 | crate::check_op!( 184 | GitSource::new("git://any.url"), 185 | |digest| { "sha256:ecde982e19ace932e5474e57b0ca71ba690ed7d28abff2a033e8f969e22bf2d8" }, 186 | |description| { vec![] }, 187 | |caps| { vec![] }, 188 | |cached_tail| { vec![] }, 189 | |inputs| { vec![] }, 190 | |op| { 191 | Op::Source(SourceOp { 192 | identifier: "git://any.url".into(), 193 | attrs: Default::default(), 194 | }) 195 | }, 196 | ); 197 | 198 | crate::check_op!( 199 | GitSource::new("git@any.url"), 200 | |digest| { "sha256:ecde982e19ace932e5474e57b0ca71ba690ed7d28abff2a033e8f969e22bf2d8" }, 201 | |description| { vec![] }, 202 | |caps| { vec![] }, 203 | |cached_tail| { vec![] }, 204 | |inputs| { vec![] }, 205 | |op| { 206 | Op::Source(SourceOp { 207 | identifier: "git://any.url".into(), 208 | attrs: Default::default(), 209 | }) 210 | }, 211 | ); 212 | } 213 | 214 | #[test] 215 | fn with_reference() { 216 | crate::check_op!( 217 | GitSource::new("any.url").with_reference("abcdef"), 218 | |digest| { "sha256:f59aa7f8db62e0b5c2a1da396752ba8a2bb0b5d28ddcfdd1d4f822d26ebfe3cf" }, 219 | |description| { vec![] }, 220 | |caps| { vec![] }, 221 | |cached_tail| { vec![] }, 222 | |inputs| { vec![] }, 223 | |op| { 224 | Op::Source(SourceOp { 225 | identifier: "git://any.url#abcdef".into(), 226 | attrs: Default::default(), 227 | }) 228 | }, 229 | ); 230 | } 231 | -------------------------------------------------------------------------------- /buildkit-llb/src/ops/source/http.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::sync::Arc; 3 | 4 | use buildkit_proto::pb::{self, op::Op, OpMetadata, SourceOp}; 5 | 6 | use crate::ops::{OperationBuilder, SingleBorrowedOutput, SingleOwnedOutput}; 7 | use crate::serialization::{Context, Node, Operation, OperationId, Result}; 8 | use crate::utils::{OperationOutput, OutputIdx}; 9 | 10 | #[derive(Default, Debug)] 11 | pub struct HttpSource { 12 | id: OperationId, 13 | url: String, 14 | file_name: Option, 15 | description: HashMap, 16 | ignore_cache: bool, 17 | } 18 | 19 | impl HttpSource { 20 | pub(crate) fn new(url: S) -> Self 21 | where 22 | S: Into, 23 | { 24 | Self { 25 | id: OperationId::default(), 26 | url: url.into(), 27 | file_name: None, 28 | description: Default::default(), 29 | ignore_cache: false, 30 | } 31 | } 32 | } 33 | 34 | impl HttpSource { 35 | pub fn with_file_name(mut self, name: S) -> Self 36 | where 37 | S: Into, 38 | { 39 | self.file_name = Some(name.into()); 40 | self 41 | } 42 | } 43 | 44 | impl<'a> SingleBorrowedOutput<'a> for HttpSource { 45 | fn output(&'a self) -> OperationOutput<'a> { 46 | OperationOutput::borrowed(self, OutputIdx(0)) 47 | } 48 | } 49 | 50 | impl<'a> SingleOwnedOutput<'static> for Arc { 51 | fn output(&self) -> OperationOutput<'static> { 52 | OperationOutput::owned(self.clone(), OutputIdx(0)) 53 | } 54 | } 55 | 56 | impl OperationBuilder<'static> for HttpSource { 57 | fn custom_name(mut self, name: S) -> Self 58 | where 59 | S: Into, 60 | { 61 | self.description 62 | .insert("llb.customname".into(), name.into()); 63 | 64 | self 65 | } 66 | 67 | fn ignore_cache(mut self, ignore: bool) -> Self { 68 | self.ignore_cache = ignore; 69 | self 70 | } 71 | } 72 | 73 | impl Operation for HttpSource { 74 | fn id(&self) -> &OperationId { 75 | &self.id 76 | } 77 | 78 | fn serialize(&self, _: &mut Context) -> Result { 79 | let mut attrs = HashMap::default(); 80 | 81 | if let Some(ref file_name) = self.file_name { 82 | attrs.insert("http.filename".into(), file_name.into()); 83 | } 84 | 85 | let head = pb::Op { 86 | op: Some(Op::Source(SourceOp { 87 | identifier: self.url.clone(), 88 | attrs, 89 | })), 90 | 91 | ..Default::default() 92 | }; 93 | 94 | let metadata = OpMetadata { 95 | description: self.description.clone(), 96 | ignore_cache: self.ignore_cache, 97 | 98 | ..Default::default() 99 | }; 100 | 101 | Ok(Node::new(head, metadata)) 102 | } 103 | } 104 | 105 | #[test] 106 | fn serialization() { 107 | crate::check_op!( 108 | HttpSource::new("http://any.url/with/path"), 109 | |digest| { "sha256:22ec64461f39dd3b54680fc240b459248b1ced597f113b5d692abe9695860d12" }, 110 | |description| { vec![] }, 111 | |caps| { vec![] }, 112 | |cached_tail| { vec![] }, 113 | |inputs| { vec![] }, 114 | |op| { 115 | Op::Source(SourceOp { 116 | identifier: "http://any.url/with/path".into(), 117 | attrs: Default::default(), 118 | }) 119 | }, 120 | ); 121 | 122 | crate::check_op!( 123 | HttpSource::new("http://any.url/with/path").custom_name("git custom name"), 124 | |digest| { "sha256:22ec64461f39dd3b54680fc240b459248b1ced597f113b5d692abe9695860d12" }, 125 | |description| { vec![("llb.customname", "git custom name")] }, 126 | |caps| { vec![] }, 127 | |cached_tail| { vec![] }, 128 | |inputs| { vec![] }, 129 | |op| { 130 | Op::Source(SourceOp { 131 | identifier: "http://any.url/with/path".into(), 132 | attrs: Default::default(), 133 | }) 134 | }, 135 | ); 136 | 137 | crate::check_op!( 138 | HttpSource::new("http://any.url/with/path").with_file_name("file.name"), 139 | |digest| { "sha256:e1fe6584287dfa2b065ed29fcf4f77bcf86fb54781832d2f45074fa1671df692" }, 140 | |description| { vec![] }, 141 | |caps| { vec![] }, 142 | |cached_tail| { vec![] }, 143 | |inputs| { vec![] }, 144 | |op| { 145 | Op::Source(SourceOp { 146 | identifier: "http://any.url/with/path".into(), 147 | attrs: vec![("http.filename".to_string(), "file.name".to_string())] 148 | .into_iter() 149 | .collect(), 150 | }) 151 | }, 152 | ); 153 | } 154 | -------------------------------------------------------------------------------- /buildkit-llb/src/ops/source/local.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::sync::Arc; 3 | 4 | use buildkit_proto::pb::{self, op::Op, OpMetadata, SourceOp}; 5 | 6 | use crate::ops::{OperationBuilder, SingleBorrowedOutput, SingleOwnedOutput}; 7 | use crate::serialization::{Context, Node, Operation, OperationId, Result}; 8 | use crate::utils::{OperationOutput, OutputIdx}; 9 | 10 | #[derive(Default, Debug)] 11 | pub struct LocalSource { 12 | id: OperationId, 13 | name: String, 14 | description: HashMap, 15 | ignore_cache: bool, 16 | 17 | exclude: Vec, 18 | include: Vec, 19 | } 20 | 21 | impl LocalSource { 22 | pub(crate) fn new(name: S) -> Self 23 | where 24 | S: Into, 25 | { 26 | Self { 27 | id: OperationId::default(), 28 | name: name.into(), 29 | ignore_cache: false, 30 | 31 | ..Default::default() 32 | } 33 | } 34 | 35 | pub fn add_include_pattern(mut self, include: S) -> Self 36 | where 37 | S: Into, 38 | { 39 | // TODO: add `source.local.includepatterns` capability 40 | self.include.push(include.into()); 41 | self 42 | } 43 | 44 | pub fn add_exclude_pattern(mut self, exclude: S) -> Self 45 | where 46 | S: Into, 47 | { 48 | // TODO: add `source.local.excludepatterns` capability 49 | self.exclude.push(exclude.into()); 50 | self 51 | } 52 | } 53 | 54 | impl<'a> SingleBorrowedOutput<'a> for LocalSource { 55 | fn output(&'a self) -> OperationOutput<'a> { 56 | OperationOutput::borrowed(self, OutputIdx(0)) 57 | } 58 | } 59 | 60 | impl<'a> SingleOwnedOutput<'static> for Arc { 61 | fn output(&self) -> OperationOutput<'static> { 62 | OperationOutput::owned(self.clone(), OutputIdx(0)) 63 | } 64 | } 65 | 66 | impl OperationBuilder<'static> for LocalSource { 67 | fn custom_name(mut self, name: S) -> Self 68 | where 69 | S: Into, 70 | { 71 | self.description 72 | .insert("llb.customname".into(), name.into()); 73 | 74 | self 75 | } 76 | 77 | fn ignore_cache(mut self, ignore: bool) -> Self { 78 | self.ignore_cache = ignore; 79 | self 80 | } 81 | } 82 | 83 | impl Operation for LocalSource { 84 | fn id(&self) -> &OperationId { 85 | &self.id 86 | } 87 | 88 | fn serialize(&self, _: &mut Context) -> Result { 89 | let mut attrs = HashMap::default(); 90 | 91 | if !self.exclude.is_empty() { 92 | attrs.insert( 93 | "local.excludepatterns".into(), 94 | serde_json::to_string(&self.exclude).unwrap(), 95 | ); 96 | } 97 | 98 | if !self.include.is_empty() { 99 | attrs.insert( 100 | "local.includepattern".into(), 101 | serde_json::to_string(&self.include).unwrap(), 102 | ); 103 | } 104 | 105 | let head = pb::Op { 106 | op: Some(Op::Source(SourceOp { 107 | identifier: format!("local://{}", self.name), 108 | attrs, 109 | })), 110 | 111 | ..Default::default() 112 | }; 113 | 114 | let metadata = OpMetadata { 115 | description: self.description.clone(), 116 | ignore_cache: self.ignore_cache, 117 | 118 | ..Default::default() 119 | }; 120 | 121 | Ok(Node::new(head, metadata)) 122 | } 123 | } 124 | 125 | #[test] 126 | fn serialization() { 127 | crate::check_op!( 128 | LocalSource::new("context"), 129 | |digest| { "sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702" }, 130 | |description| { vec![] }, 131 | |caps| { vec![] }, 132 | |cached_tail| { vec![] }, 133 | |inputs| { vec![] }, 134 | |op| { 135 | Op::Source(SourceOp { 136 | identifier: "local://context".into(), 137 | attrs: Default::default(), 138 | }) 139 | }, 140 | ); 141 | 142 | crate::check_op!( 143 | LocalSource::new("context").custom_name("context custom name"), 144 | |digest| { "sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702" }, 145 | |description| { vec![("llb.customname", "context custom name")] }, 146 | |caps| { vec![] }, 147 | |cached_tail| { vec![] }, 148 | |inputs| { vec![] }, 149 | |op| { 150 | Op::Source(SourceOp { 151 | identifier: "local://context".into(), 152 | attrs: Default::default(), 153 | }) 154 | }, 155 | ); 156 | 157 | crate::check_op!( 158 | { 159 | LocalSource::new("context") 160 | .custom_name("context custom name") 161 | .add_exclude_pattern("**/target") 162 | .add_exclude_pattern("Dockerfile") 163 | }, 164 | |digest| { "sha256:f6962b8bb1659c63a2c2c3e2a7ccf0326c87530dd70c514343f127e4c20460c4" }, 165 | |description| { vec![("llb.customname", "context custom name")] }, 166 | |caps| { vec![] }, 167 | |cached_tail| { vec![] }, 168 | |inputs| { vec![] }, 169 | |op| { 170 | Op::Source(SourceOp { 171 | identifier: "local://context".into(), 172 | attrs: crate::utils::test::to_map(vec![( 173 | "local.excludepatterns", 174 | r#"["**/target","Dockerfile"]"#, 175 | )]), 176 | }) 177 | }, 178 | ); 179 | 180 | crate::check_op!( 181 | { 182 | LocalSource::new("context") 183 | .custom_name("context custom name") 184 | .add_include_pattern("Cargo.toml") 185 | .add_include_pattern("inner/Cargo.toml") 186 | }, 187 | |digest| { "sha256:a7e628333262b810572f83193bbf8554e688abfb51d44ac30bdad7fa425f3839" }, 188 | |description| { vec![("llb.customname", "context custom name")] }, 189 | |caps| { vec![] }, 190 | |cached_tail| { vec![] }, 191 | |inputs| { vec![] }, 192 | |op| { 193 | Op::Source(SourceOp { 194 | identifier: "local://context".into(), 195 | attrs: crate::utils::test::to_map(vec![( 196 | "local.includepattern", 197 | r#"["Cargo.toml","inner/Cargo.toml"]"#, 198 | )]), 199 | }) 200 | }, 201 | ); 202 | } 203 | -------------------------------------------------------------------------------- /buildkit-llb/src/ops/source/mod.rs: -------------------------------------------------------------------------------- 1 | mod git; 2 | mod http; 3 | mod image; 4 | mod local; 5 | 6 | pub use self::git::GitSource; 7 | pub use self::http::HttpSource; 8 | pub use self::image::{ImageSource, ResolveMode}; 9 | pub use self::local::LocalSource; 10 | 11 | /// Provide an input for other operations. For example: `FROM` directive in Dockerfile. 12 | #[derive(Debug)] 13 | pub struct Source; 14 | 15 | impl Source { 16 | pub fn image(name: S) -> ImageSource 17 | where 18 | S: Into, 19 | { 20 | ImageSource::new(name) 21 | } 22 | 23 | pub fn git(url: S) -> GitSource 24 | where 25 | S: Into, 26 | { 27 | GitSource::new(url) 28 | } 29 | 30 | pub fn local(name: S) -> LocalSource 31 | where 32 | S: Into, 33 | { 34 | LocalSource::new(name) 35 | } 36 | 37 | pub fn http(name: S) -> HttpSource 38 | where 39 | S: Into, 40 | { 41 | HttpSource::new(name) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /buildkit-llb/src/ops/terminal.rs: -------------------------------------------------------------------------------- 1 | use std::io::{self, Write}; 2 | use std::iter::once; 3 | 4 | use buildkit_proto::pb::{self, Input}; 5 | use prost::Message; 6 | 7 | use crate::serialization::{Context, Node, Result}; 8 | use crate::utils::OperationOutput; 9 | 10 | /// Final operation in the graph. Responsible for printing the complete LLB definition. 11 | #[derive(Debug)] 12 | pub struct Terminal<'a> { 13 | input: OperationOutput<'a>, 14 | } 15 | 16 | impl<'a> Terminal<'a> { 17 | pub fn with(input: OperationOutput<'a>) -> Self { 18 | Self { input } 19 | } 20 | 21 | pub fn into_definition(self) -> pb::Definition { 22 | let mut cx = Context::default(); 23 | let final_node_iter = once(self.serialize(&mut cx).unwrap()); 24 | 25 | let (def, metadata) = { 26 | cx.into_registered_nodes() 27 | .chain(final_node_iter) 28 | .map(|node| (node.bytes, (node.digest, node.metadata))) 29 | .unzip() 30 | }; 31 | 32 | pb::Definition { def, metadata } 33 | } 34 | 35 | pub fn write_definition(self, mut writer: impl Write) -> io::Result<()> { 36 | let mut bytes = Vec::new(); 37 | self.into_definition().encode(&mut bytes).unwrap(); 38 | 39 | writer.write_all(&bytes) 40 | } 41 | 42 | fn serialize(&self, cx: &mut Context) -> Result { 43 | let final_op = pb::Op { 44 | inputs: vec![Input { 45 | digest: cx.register(self.input.operation())?.digest.clone(), 46 | index: self.input.output().into(), 47 | }], 48 | 49 | ..Default::default() 50 | }; 51 | 52 | Ok(Node::new(final_op, Default::default())) 53 | } 54 | } 55 | 56 | #[test] 57 | fn serialization() { 58 | use crate::prelude::*; 59 | 60 | let context = Source::local("context"); 61 | let builder_image = Source::image("rustlang/rust:nightly"); 62 | let final_image = Source::image("library/alpine:latest"); 63 | 64 | let first_command = Command::run("rustc") 65 | .args(&["--crate-name", "crate-1"]) 66 | .mount(Mount::ReadOnlyLayer(builder_image.output(), "/")) 67 | .mount(Mount::ReadOnlyLayer(context.output(), "/context")) 68 | .mount(Mount::Scratch(OutputIdx(0), "/target")); 69 | 70 | let second_command = Command::run("rustc") 71 | .args(&["--crate-name", "crate-2"]) 72 | .mount(Mount::ReadOnlyLayer(builder_image.output(), "/")) 73 | .mount(Mount::ReadOnlyLayer(context.output(), "/context")) 74 | .mount(Mount::Scratch(OutputIdx(0), "/target")); 75 | 76 | let assembly_op = FileSystem::sequence() 77 | .append(FileSystem::mkdir( 78 | OutputIdx(0), 79 | LayerPath::Other(final_image.output(), "/output"), 80 | )) 81 | .append( 82 | FileSystem::copy() 83 | .from(LayerPath::Other(first_command.output(0), "/target/crate-1")) 84 | .to( 85 | OutputIdx(1), 86 | LayerPath::Own(OwnOutputIdx(0), "/output/crate-1"), 87 | ), 88 | ) 89 | .append( 90 | FileSystem::copy() 91 | .from(LayerPath::Other( 92 | second_command.output(0), 93 | "/target/crate-2", 94 | )) 95 | .to( 96 | OutputIdx(2), 97 | LayerPath::Own(OwnOutputIdx(1), "/output/crate-2"), 98 | ), 99 | ); 100 | 101 | let definition = Terminal::with(assembly_op.output(0)).into_definition(); 102 | 103 | assert_eq!( 104 | definition 105 | .def 106 | .iter() 107 | .map(|bytes| Node::get_digest(&bytes)) 108 | .collect::>(), 109 | crate::utils::test::to_vec(vec![ 110 | "sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702", 111 | "sha256:dee2a3d7dd482dd8098ba543ff1dcb01efd29fcd16fdb0979ef556f38564543a", 112 | "sha256:0e6b31ceed3e6dc542018f35a53a0e857e6a188453d32a2a5bbe7aa2971c1220", 113 | "sha256:782f343f8f4ee33e4f342ed4209ad1a9eb4582485e45251595a5211ebf2b3cbf", 114 | "sha256:3418ad515958b5e68fd45c9d6fbc8d2ce7d567a956150d22ff529a3fea401aa2", 115 | "sha256:13bb644e4ec0cabe836392649a04551686e69613b1ea9c89a1a8f3bc86181791", 116 | "sha256:d13a773a61236be3c7d539f3ef6d583095c32d2a2a60deda86e71705f2dbc99b", 117 | ]) 118 | ); 119 | 120 | let mut metadata_digests = { 121 | definition 122 | .metadata 123 | .iter() 124 | .map(|(digest, _)| digest.as_str()) 125 | .collect::>() 126 | }; 127 | 128 | metadata_digests.sort(); 129 | assert_eq!( 130 | metadata_digests, 131 | vec![ 132 | "sha256:0e6b31ceed3e6dc542018f35a53a0e857e6a188453d32a2a5bbe7aa2971c1220", 133 | "sha256:13bb644e4ec0cabe836392649a04551686e69613b1ea9c89a1a8f3bc86181791", 134 | "sha256:3418ad515958b5e68fd45c9d6fbc8d2ce7d567a956150d22ff529a3fea401aa2", 135 | "sha256:782f343f8f4ee33e4f342ed4209ad1a9eb4582485e45251595a5211ebf2b3cbf", 136 | "sha256:a60212791641cbeaa3a49de4f7dff9e40ae50ec19d1be9607232037c1db16702", 137 | "sha256:d13a773a61236be3c7d539f3ef6d583095c32d2a2a60deda86e71705f2dbc99b", 138 | "sha256:dee2a3d7dd482dd8098ba543ff1dcb01efd29fcd16fdb0979ef556f38564543a", 139 | ] 140 | ); 141 | } 142 | -------------------------------------------------------------------------------- /buildkit-llb/src/serialization/id.rs: -------------------------------------------------------------------------------- 1 | use std::ops::Deref; 2 | use std::sync::atomic::{AtomicU64, Ordering}; 3 | 4 | static LAST_ID: AtomicU64 = AtomicU64::new(0); 5 | 6 | #[derive(Debug)] 7 | pub(crate) struct OperationId(u64); 8 | 9 | impl Clone for OperationId { 10 | fn clone(&self) -> Self { 11 | OperationId::default() 12 | } 13 | } 14 | 15 | impl Default for OperationId { 16 | fn default() -> Self { 17 | Self(LAST_ID.fetch_add(1, Ordering::Relaxed)) 18 | } 19 | } 20 | 21 | impl Deref for OperationId { 22 | type Target = u64; 23 | 24 | fn deref(&self) -> &u64 { 25 | &self.0 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /buildkit-llb/src/serialization/mod.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | 3 | mod id; 4 | mod operation; 5 | mod output; 6 | 7 | pub(crate) use self::id::OperationId; 8 | pub(crate) use self::operation::Operation; 9 | pub(crate) use self::output::Node; 10 | 11 | pub(crate) type Result = std::result::Result; 12 | 13 | #[derive(Default)] 14 | pub struct Context { 15 | inner: BTreeMap, 16 | } 17 | 18 | impl Context { 19 | #[allow(clippy::map_entry)] 20 | pub(crate) fn register<'a>(&'a mut self, op: &dyn Operation) -> Result<&'a Node> { 21 | let id = **op.id(); 22 | 23 | if !self.inner.contains_key(&id) { 24 | let node = op.serialize(self)?; 25 | self.inner.insert(id, node); 26 | } 27 | 28 | Ok(self.inner.get(&id).unwrap()) 29 | } 30 | 31 | #[cfg(test)] 32 | pub(crate) fn registered_nodes_iter(&self) -> impl Iterator { 33 | self.inner.iter().map(|pair| pair.1) 34 | } 35 | 36 | pub(crate) fn into_registered_nodes(self) -> impl Iterator { 37 | self.inner.into_iter().map(|pair| pair.1) 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /buildkit-llb/src/serialization/operation.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | use super::{Context, OperationId}; 4 | use super::{Node, Result}; 5 | 6 | pub(crate) trait Operation: Debug + Send + Sync { 7 | fn id(&self) -> &OperationId; 8 | 9 | fn serialize(&self, cx: &mut Context) -> Result; 10 | } 11 | -------------------------------------------------------------------------------- /buildkit-llb/src/serialization/output.rs: -------------------------------------------------------------------------------- 1 | use buildkit_proto::pb; 2 | use prost::Message; 3 | use sha2::{Digest, Sha256}; 4 | 5 | #[derive(Debug, Default, Clone)] 6 | pub(crate) struct Node { 7 | pub bytes: Vec, 8 | pub digest: String, 9 | pub metadata: pb::OpMetadata, 10 | } 11 | 12 | impl Node { 13 | pub fn new(message: pb::Op, metadata: pb::OpMetadata) -> Self { 14 | let mut bytes = Vec::new(); 15 | message.encode(&mut bytes).unwrap(); 16 | 17 | Self { 18 | digest: Self::get_digest(&bytes), 19 | bytes, 20 | metadata, 21 | } 22 | } 23 | 24 | pub fn get_digest(bytes: &[u8]) -> String { 25 | let mut hasher = Sha256::new(); 26 | hasher.input(&bytes); 27 | 28 | format!("sha256:{:x}", hasher.result()) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /buildkit-llb/src/utils.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::serialization::Operation; 4 | 5 | #[derive(Copy, Clone, Debug)] 6 | pub struct OutputIdx(pub u32); 7 | 8 | #[derive(Copy, Clone, Debug)] 9 | pub struct OwnOutputIdx(pub u32); 10 | 11 | #[derive(Debug, Clone)] 12 | pub struct OperationOutput<'a> { 13 | kind: OperationOutputKind<'a>, 14 | } 15 | 16 | #[derive(Debug, Clone)] 17 | enum OperationOutputKind<'a> { 18 | Owned(Arc, OutputIdx), 19 | Borrowed(&'a dyn Operation, OutputIdx), 20 | } 21 | 22 | impl<'a> OperationOutput<'a> { 23 | pub(crate) fn owned(op: Arc, idx: OutputIdx) -> Self { 24 | Self { 25 | kind: OperationOutputKind::Owned(op, idx), 26 | } 27 | } 28 | 29 | pub(crate) fn borrowed(op: &'a dyn Operation, idx: OutputIdx) -> Self { 30 | Self { 31 | kind: OperationOutputKind::Borrowed(op, idx), 32 | } 33 | } 34 | 35 | pub(crate) fn operation(&self) -> &dyn Operation { 36 | match self.kind { 37 | OperationOutputKind::Owned(ref op, ..) => op.as_ref(), 38 | OperationOutputKind::Borrowed(ref op, ..) => *op, 39 | } 40 | } 41 | 42 | pub(crate) fn output(&self) -> OutputIdx { 43 | match self.kind { 44 | OperationOutputKind::Owned(_, output) | OperationOutputKind::Borrowed(_, output) => { 45 | output 46 | } 47 | } 48 | } 49 | } 50 | 51 | impl Into for OutputIdx { 52 | fn into(self) -> i64 { 53 | self.0.into() 54 | } 55 | } 56 | impl Into for &OutputIdx { 57 | fn into(self) -> i64 { 58 | self.0.into() 59 | } 60 | } 61 | 62 | impl Into for OwnOutputIdx { 63 | fn into(self) -> i64 { 64 | self.0.into() 65 | } 66 | } 67 | impl Into for &OwnOutputIdx { 68 | fn into(self) -> i64 { 69 | self.0.into() 70 | } 71 | } 72 | 73 | impl Into for OutputIdx { 74 | fn into(self) -> i32 { 75 | self.0 as i32 76 | } 77 | } 78 | impl Into for &OutputIdx { 79 | fn into(self) -> i32 { 80 | self.0 as i32 81 | } 82 | } 83 | 84 | impl Into for OwnOutputIdx { 85 | fn into(self) -> i32 { 86 | self.0 as i32 87 | } 88 | } 89 | impl Into for &OwnOutputIdx { 90 | fn into(self) -> i32 { 91 | self.0 as i32 92 | } 93 | } 94 | 95 | #[cfg(test)] 96 | pub mod test { 97 | #[macro_export] 98 | macro_rules! check_op { 99 | ($op:expr, $(|$name:ident| $value:expr,)*) => ($crate::check_op!($op, $(|$name| $value),*)); 100 | ($op:expr, $(|$name:ident| $value:expr),*) => {{ 101 | #[allow(unused_imports)] 102 | use crate::serialization::{Context, Operation}; 103 | 104 | let mut context = Context::default(); 105 | let serialized = $op.serialize(&mut context).unwrap(); 106 | 107 | $(crate::check_op_property!(serialized, context, $name, $value));* 108 | }}; 109 | } 110 | 111 | #[macro_export] 112 | macro_rules! check_op_property { 113 | ($serialized:expr, $context:expr, op, $value:expr) => {{ 114 | use std::io::Cursor; 115 | 116 | use buildkit_proto::pb; 117 | use prost::Message; 118 | 119 | assert_eq!( 120 | pb::Op::decode(Cursor::new(&$serialized.bytes)).unwrap().op, 121 | Some($value) 122 | ); 123 | }}; 124 | 125 | ($serialized:expr, $context:expr, inputs, $value:expr) => {{ 126 | use std::io::Cursor; 127 | 128 | use buildkit_proto::pb; 129 | use prost::Message; 130 | 131 | assert_eq!( 132 | pb::Op::decode(Cursor::new(&$serialized.bytes)) 133 | .unwrap() 134 | .inputs 135 | .into_iter() 136 | .map(|input| (input.digest, input.index)) 137 | .collect::>(), 138 | $value 139 | .into_iter() 140 | .map(|input: (&str, i64)| (String::from(input.0), input.1)) 141 | .collect::>() 142 | ); 143 | }}; 144 | 145 | ($serialized:expr, $context:expr, cached_tail, $value:expr) => { 146 | assert_eq!( 147 | $context 148 | .registered_nodes_iter() 149 | .map(|node| node.digest.clone()) 150 | .collect::>(), 151 | crate::utils::test::to_vec($value), 152 | ); 153 | }; 154 | 155 | ($serialized:expr, $context:expr, caps, $value:expr) => {{ 156 | let mut caps = $serialized 157 | .metadata 158 | .caps 159 | .into_iter() 160 | .map(|pair| pair.0) 161 | .collect::>(); 162 | 163 | caps.sort(); 164 | assert_eq!(caps, crate::utils::test::to_vec($value)); 165 | }}; 166 | 167 | ($serialized:expr, $context:expr, description, $value:expr) => { 168 | assert_eq!( 169 | $serialized.metadata.description, 170 | crate::utils::test::to_map($value), 171 | ); 172 | }; 173 | 174 | ($serialized:expr, $context:expr, digest, $value:expr) => { 175 | assert_eq!($serialized.digest, $value); 176 | }; 177 | } 178 | 179 | use std::collections::HashMap; 180 | 181 | pub fn to_map(pairs: Vec<(&str, &str)>) -> HashMap { 182 | pairs 183 | .into_iter() 184 | .map(|(key, value): (&str, &str)| (key.into(), value.into())) 185 | .collect() 186 | } 187 | 188 | pub fn to_vec(items: Vec<&str>) -> Vec { 189 | items.into_iter().map(String::from).collect() 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /buildkit-proto/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | ## [Unreleased] 8 | 9 | ## [0.2.0] - 2020-03-04 10 | ### Changed 11 | - Use `tonic` instead of `tower-grpc` for codegen. 12 | 13 | ## [0.1.0] - 2019-09-24 14 | Initial release. 15 | -------------------------------------------------------------------------------- /buildkit-proto/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "buildkit-proto" 3 | version = "0.2.0" 4 | authors = ["Denys Zariaiev "] 5 | edition = "2018" 6 | 7 | description = "Protobuf interfaces to BuildKit" 8 | documentation = "https://docs.rs/buildkit-proto" 9 | repository = "https://github.com/denzp/rust-buildkit" 10 | readme = "README.md" 11 | keywords = ["buildkit", "docker", "protobuf", "prost"] 12 | categories = ["development-tools::build-utils", "api-bindings"] 13 | license = "MIT/Apache-2.0" 14 | 15 | [dependencies] 16 | prost = "0.6" 17 | prost-types = "0.6" 18 | tonic = "0.1" 19 | 20 | [build-dependencies.tonic-build] 21 | version = "0.1" 22 | default-features = false 23 | features = ["transport"] 24 | -------------------------------------------------------------------------------- /buildkit-proto/README.md: -------------------------------------------------------------------------------- 1 | `buildkit-proto` - protobuf interfaces to BuildKit 2 | ======= 3 | 4 | [![Actions Status]][Actions Link] 5 | [![buildkit-proto Crates Badge]][buildkit-proto Crates Link] 6 | [![buildkit-proto Docs Badge]][buildkit-proto Docs Link] 7 | 8 | # Usage 9 | 10 | The crate is not intened to be used alone. 11 | An idiomatic high-level API provided by [`buildkit-llb`][buildkit-llb Crates Link] is a prefered way to build LLB graphs. 12 | 13 | # License 14 | 15 | `buildkit-proto` is primarily distributed under the terms of both the MIT license and 16 | the Apache License (Version 2.0), with portions covered by various BSD-like 17 | licenses. 18 | 19 | See LICENSE-APACHE, and LICENSE-MIT for details. 20 | 21 | # Contribution 22 | 23 | Unless you explicitly state otherwise, any contribution intentionally submitted 24 | for inclusion in `buildkit-proto` by you, as defined in the Apache-2.0 license, 25 | shall be dual licensed as above, without any additional terms or conditions. 26 | 27 | [Actions Link]: https://github.com/denzp/rust-buildkit/actions 28 | [Actions Status]: https://github.com/denzp/rust-buildkit/workflows/CI/badge.svg 29 | [buildkit-proto Docs Badge]: https://docs.rs/buildkit-proto/badge.svg 30 | [buildkit-proto Docs Link]: https://docs.rs/buildkit-proto/ 31 | [buildkit-proto Crates Badge]: https://img.shields.io/crates/v/buildkit-proto.svg 32 | [buildkit-proto Crates Link]: https://crates.io/crates/buildkit-proto 33 | [buildkit-llb Crates Link]: https://crates.io/crates/buildkit-llb 34 | -------------------------------------------------------------------------------- /buildkit-proto/build.rs: -------------------------------------------------------------------------------- 1 | const DEFS: &[&str] = &["proto/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto"]; 2 | const PATHS: &[&str] = &["proto"]; 3 | 4 | fn main() -> Result<(), Box> { 5 | tonic_build::configure() 6 | .build_client(true) 7 | .build_server(false) 8 | .compile(DEFS, PATHS)?; 9 | 10 | Ok(()) 11 | } 12 | -------------------------------------------------------------------------------- /buildkit-proto/proto/github.com/gogo/googleapis/google/rpc/status.proto: -------------------------------------------------------------------------------- 1 | // Copyright 2017 Google Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | syntax = "proto3"; 16 | 17 | package google.rpc; 18 | 19 | import "google/protobuf/any.proto"; 20 | 21 | option go_package = "google.golang.org/genproto/googleapis/rpc/status;status"; 22 | option java_multiple_files = true; 23 | option java_outer_classname = "StatusProto"; 24 | option java_package = "com.google.rpc"; 25 | option objc_class_prefix = "RPC"; 26 | 27 | // The `Status` type defines a logical error model that is suitable for 28 | // different programming environments, including REST APIs and RPC APIs. It is 29 | // used by [gRPC](https://github.com/grpc). The error model is designed to be: 30 | // 31 | // - Simple to use and understand for most users 32 | // - Flexible enough to meet unexpected needs 33 | // 34 | // # Overview 35 | // 36 | // The `Status` message contains three pieces of data: error code, error 37 | // message, and error details. The error code should be an enum value of 38 | // [google.rpc.Code][google.rpc.Code], but it may accept additional error codes 39 | // if needed. The error message should be a developer-facing English message 40 | // that helps developers *understand* and *resolve* the error. If a localized 41 | // user-facing error message is needed, put the localized message in the error 42 | // details or localize it in the client. The optional error details may contain 43 | // arbitrary information about the error. There is a predefined set of error 44 | // detail types in the package `google.rpc` that can be used for common error 45 | // conditions. 46 | // 47 | // # Language mapping 48 | // 49 | // The `Status` message is the logical representation of the error model, but it 50 | // is not necessarily the actual wire format. When the `Status` message is 51 | // exposed in different client libraries and different wire protocols, it can be 52 | // mapped differently. For example, it will likely be mapped to some exceptions 53 | // in Java, but more likely mapped to some error codes in C. 54 | // 55 | // # Other uses 56 | // 57 | // The error model and the `Status` message can be used in a variety of 58 | // environments, either with or without APIs, to provide a 59 | // consistent developer experience across different environments. 60 | // 61 | // Example uses of this error model include: 62 | // 63 | // - Partial errors. If a service needs to return partial errors to the client, 64 | // it may embed the `Status` in the normal response to indicate the partial 65 | // errors. 66 | // 67 | // - Workflow errors. A typical workflow has multiple steps. Each step may 68 | // have a `Status` message for error reporting. 69 | // 70 | // - Batch operations. If a client uses batch request and batch response, the 71 | // `Status` message should be used directly inside batch response, one for 72 | // each error sub-response. 73 | // 74 | // - Asynchronous operations. If an API call embeds asynchronous operation 75 | // results in its response, the status of those operations should be 76 | // represented directly using the `Status` message. 77 | // 78 | // - Logging. If some API errors are stored in logs, the message `Status` could 79 | // be used directly after any stripping needed for security/privacy reasons. 80 | message Status { 81 | // The status code, which should be an enum value of 82 | // [google.rpc.Code][google.rpc.Code]. 83 | int32 code = 1; 84 | 85 | // A developer-facing error message, which should be in English. Any 86 | // user-facing error message should be localized and sent in the 87 | // [google.rpc.Status.details][google.rpc.Status.details] field, or localized 88 | // by the client. 89 | string message = 2; 90 | 91 | // A list of messages that carry the error details. There is a common set of 92 | // message types for APIs to use. 93 | repeated google.protobuf.Any details = 3; 94 | } 95 | -------------------------------------------------------------------------------- /buildkit-proto/proto/github.com/gogo/protobuf/gogoproto/gogo.proto: -------------------------------------------------------------------------------- 1 | // Protocol Buffers for Go with Gadgets 2 | // 3 | // Copyright (c) 2013, The GoGo Authors. All rights reserved. 4 | // http://github.com/gogo/protobuf 5 | // 6 | // Redistribution and use in source and binary forms, with or without 7 | // modification, are permitted provided that the following conditions are 8 | // met: 9 | // 10 | // * Redistributions of source code must retain the above copyright 11 | // notice, this list of conditions and the following disclaimer. 12 | // * Redistributions in binary form must reproduce the above 13 | // copyright notice, this list of conditions and the following disclaimer 14 | // in the documentation and/or other materials provided with the 15 | // distribution. 16 | // 17 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | 29 | syntax = "proto2"; 30 | package gogoproto; 31 | 32 | import "google/protobuf/descriptor.proto"; 33 | 34 | option java_package = "com.google.protobuf"; 35 | option java_outer_classname = "GoGoProtos"; 36 | option go_package = "github.com/gogo/protobuf/gogoproto"; 37 | 38 | extend google.protobuf.EnumOptions { 39 | optional bool goproto_enum_prefix = 62001; 40 | optional bool goproto_enum_stringer = 62021; 41 | optional bool enum_stringer = 62022; 42 | optional string enum_customname = 62023; 43 | optional bool enumdecl = 62024; 44 | } 45 | 46 | extend google.protobuf.EnumValueOptions { 47 | optional string enumvalue_customname = 66001; 48 | } 49 | 50 | extend google.protobuf.FileOptions { 51 | optional bool goproto_getters_all = 63001; 52 | optional bool goproto_enum_prefix_all = 63002; 53 | optional bool goproto_stringer_all = 63003; 54 | optional bool verbose_equal_all = 63004; 55 | optional bool face_all = 63005; 56 | optional bool gostring_all = 63006; 57 | optional bool populate_all = 63007; 58 | optional bool stringer_all = 63008; 59 | optional bool onlyone_all = 63009; 60 | 61 | optional bool equal_all = 63013; 62 | optional bool description_all = 63014; 63 | optional bool testgen_all = 63015; 64 | optional bool benchgen_all = 63016; 65 | optional bool marshaler_all = 63017; 66 | optional bool unmarshaler_all = 63018; 67 | optional bool stable_marshaler_all = 63019; 68 | 69 | optional bool sizer_all = 63020; 70 | 71 | optional bool goproto_enum_stringer_all = 63021; 72 | optional bool enum_stringer_all = 63022; 73 | 74 | optional bool unsafe_marshaler_all = 63023; 75 | optional bool unsafe_unmarshaler_all = 63024; 76 | 77 | optional bool goproto_extensions_map_all = 63025; 78 | optional bool goproto_unrecognized_all = 63026; 79 | optional bool gogoproto_import = 63027; 80 | optional bool protosizer_all = 63028; 81 | optional bool compare_all = 63029; 82 | optional bool typedecl_all = 63030; 83 | optional bool enumdecl_all = 63031; 84 | 85 | optional bool goproto_registration = 63032; 86 | optional bool messagename_all = 63033; 87 | 88 | optional bool goproto_sizecache_all = 63034; 89 | optional bool goproto_unkeyed_all = 63035; 90 | } 91 | 92 | extend google.protobuf.MessageOptions { 93 | optional bool goproto_getters = 64001; 94 | optional bool goproto_stringer = 64003; 95 | optional bool verbose_equal = 64004; 96 | optional bool face = 64005; 97 | optional bool gostring = 64006; 98 | optional bool populate = 64007; 99 | optional bool stringer = 67008; 100 | optional bool onlyone = 64009; 101 | 102 | optional bool equal = 64013; 103 | optional bool description = 64014; 104 | optional bool testgen = 64015; 105 | optional bool benchgen = 64016; 106 | optional bool marshaler = 64017; 107 | optional bool unmarshaler = 64018; 108 | optional bool stable_marshaler = 64019; 109 | 110 | optional bool sizer = 64020; 111 | 112 | optional bool unsafe_marshaler = 64023; 113 | optional bool unsafe_unmarshaler = 64024; 114 | 115 | optional bool goproto_extensions_map = 64025; 116 | optional bool goproto_unrecognized = 64026; 117 | 118 | optional bool protosizer = 64028; 119 | optional bool compare = 64029; 120 | 121 | optional bool typedecl = 64030; 122 | 123 | optional bool messagename = 64033; 124 | 125 | optional bool goproto_sizecache = 64034; 126 | optional bool goproto_unkeyed = 64035; 127 | } 128 | 129 | extend google.protobuf.FieldOptions { 130 | optional bool nullable = 65001; 131 | optional bool embed = 65002; 132 | optional string customtype = 65003; 133 | optional string customname = 65004; 134 | optional string jsontag = 65005; 135 | optional string moretags = 65006; 136 | optional string casttype = 65007; 137 | optional string castkey = 65008; 138 | optional string castvalue = 65009; 139 | 140 | optional bool stdtime = 65010; 141 | optional bool stdduration = 65011; 142 | optional bool wktpointer = 65012; 143 | 144 | } 145 | -------------------------------------------------------------------------------- /buildkit-proto/proto/github.com/moby/buildkit/api/types/worker.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package moby.buildkit.v1.types; 4 | 5 | import "github.com/gogo/protobuf/gogoproto/gogo.proto"; 6 | import "github.com/moby/buildkit/solver/pb/ops.proto"; 7 | 8 | option (gogoproto.sizer_all) = true; 9 | option (gogoproto.marshaler_all) = true; 10 | option (gogoproto.unmarshaler_all) = true; 11 | 12 | message WorkerRecord { 13 | string ID = 1; 14 | map Labels = 2; 15 | repeated pb.Platform platforms = 3 [(gogoproto.nullable) = false]; 16 | repeated GCPolicy GCPolicy = 4; 17 | } 18 | 19 | message GCPolicy { 20 | bool all = 1; 21 | int64 keepDuration = 2; 22 | int64 keepBytes = 3; 23 | repeated string filters = 4; 24 | } 25 | -------------------------------------------------------------------------------- /buildkit-proto/proto/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package moby.buildkit.v1.frontend; 4 | 5 | import "github.com/gogo/protobuf/gogoproto/gogo.proto"; 6 | import "github.com/gogo/googleapis/google/rpc/status.proto"; 7 | import "github.com/moby/buildkit/solver/pb/ops.proto"; 8 | import "github.com/moby/buildkit/api/types/worker.proto"; 9 | import "github.com/moby/buildkit/util/apicaps/pb/caps.proto"; 10 | import "github.com/tonistiigi/fsutil/types/stat.proto"; 11 | 12 | option (gogoproto.sizer_all) = true; 13 | option (gogoproto.marshaler_all) = true; 14 | option (gogoproto.unmarshaler_all) = true; 15 | 16 | service LLBBridge { 17 | // apicaps:CapResolveImage 18 | rpc ResolveImageConfig(ResolveImageConfigRequest) returns (ResolveImageConfigResponse); 19 | // apicaps:CapSolveBase 20 | rpc Solve(SolveRequest) returns (SolveResponse); 21 | // apicaps:CapReadFile 22 | rpc ReadFile(ReadFileRequest) returns (ReadFileResponse); 23 | // apicaps:CapReadDir 24 | rpc ReadDir(ReadDirRequest) returns (ReadDirResponse); 25 | // apicaps:CapStatFile 26 | rpc StatFile(StatFileRequest) returns (StatFileResponse); 27 | rpc Ping(PingRequest) returns (PongResponse); 28 | rpc Return(ReturnRequest) returns (ReturnResponse); 29 | } 30 | 31 | message Result { 32 | oneof result { 33 | string ref = 1; 34 | RefMap refs = 2; 35 | } 36 | map metadata = 10; 37 | } 38 | 39 | message RefMap { 40 | map refs = 1; 41 | } 42 | 43 | message ReturnRequest { 44 | Result result = 1; 45 | google.rpc.Status error = 2; 46 | } 47 | 48 | message ReturnResponse { 49 | } 50 | 51 | message ResolveImageConfigRequest { 52 | string Ref = 1; 53 | pb.Platform Platform = 2; 54 | string ResolveMode = 3; 55 | string LogName = 4; 56 | } 57 | 58 | message ResolveImageConfigResponse { 59 | string Digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; 60 | bytes Config = 2; 61 | } 62 | 63 | message SolveRequest { 64 | pb.Definition Definition = 1; 65 | string Frontend = 2; 66 | map FrontendOpt = 3; 67 | // ImportCacheRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0. 68 | // When ImportCacheRefsDeprecated is set, the solver appends 69 | // {.Type = "registry", .Attrs = {"ref": importCacheRef}} 70 | // for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed) 71 | repeated string ImportCacheRefsDeprecated = 4; 72 | bool allowResultReturn = 5; 73 | 74 | // apicaps.CapSolveInlineReturn deprecated 75 | bool Final = 10; 76 | bytes ExporterAttr = 11; 77 | // CacheImports was added in BuildKit v0.4.0. 78 | // apicaps:CapImportCaches 79 | repeated CacheOptionsEntry CacheImports = 12; 80 | } 81 | 82 | // CacheOptionsEntry corresponds to the control.CacheOptionsEntry 83 | message CacheOptionsEntry { 84 | string Type = 1; 85 | map Attrs = 2; 86 | } 87 | 88 | message SolveResponse { 89 | // deprecated 90 | string ref = 1; // can be used by readfile request 91 | // deprecated 92 | /* bytes ExporterAttr = 2;*/ 93 | 94 | // these fields are returned when allowMapReturn was set 95 | Result result = 3; 96 | } 97 | 98 | message ReadFileRequest { 99 | string Ref = 1; 100 | string FilePath = 2; 101 | FileRange Range = 3; 102 | } 103 | 104 | message FileRange { 105 | int64 Offset = 1; 106 | int64 Length = 2; 107 | } 108 | 109 | message ReadFileResponse { 110 | bytes Data = 1; 111 | } 112 | 113 | message ReadDirRequest { 114 | string Ref = 1; 115 | string DirPath = 2; 116 | string IncludePattern = 3; 117 | } 118 | 119 | message ReadDirResponse { 120 | repeated fsutil.types.Stat entries = 1; 121 | } 122 | 123 | message StatFileRequest { 124 | string Ref = 1; 125 | string Path = 2; 126 | } 127 | 128 | message StatFileResponse { 129 | fsutil.types.Stat stat = 1; 130 | } 131 | 132 | message PingRequest{ 133 | } 134 | message PongResponse{ 135 | repeated moby.buildkit.v1.apicaps.APICap FrontendAPICaps = 1 [(gogoproto.nullable) = false]; 136 | repeated moby.buildkit.v1.apicaps.APICap LLBCaps = 2 [(gogoproto.nullable) = false]; 137 | repeated moby.buildkit.v1.types.WorkerRecord Workers = 3; 138 | } 139 | -------------------------------------------------------------------------------- /buildkit-proto/proto/github.com/moby/buildkit/solver/pb/ops.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | // Package pb provides the protobuf definition of LLB: low-level builder instruction. 4 | // LLB is DAG-structured; Op represents a vertex, and Definition represents a graph. 5 | package pb; 6 | 7 | import "github.com/gogo/protobuf/gogoproto/gogo.proto"; 8 | 9 | option (gogoproto.stable_marshaler_all) = true; 10 | 11 | // Op represents a vertex of the LLB DAG. 12 | message Op { 13 | // inputs is a set of input edges. 14 | repeated Input inputs = 1; 15 | oneof op { 16 | ExecOp exec = 2; 17 | SourceOp source = 3; 18 | FileOp file = 4; 19 | BuildOp build = 5; 20 | } 21 | Platform platform = 10; 22 | WorkerConstraints constraints = 11; 23 | } 24 | 25 | // Platform is github.com/opencontainers/image-spec/specs-go/v1.Platform 26 | message Platform { 27 | string Architecture = 1; 28 | string OS = 2; 29 | string Variant = 3; 30 | string OSVersion = 4; // unused 31 | repeated string OSFeatures = 5; // unused 32 | } 33 | 34 | // Input represents an input edge for an Op. 35 | message Input { 36 | // digest of the marshaled input Op 37 | string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; 38 | // output index of the input Op 39 | int64 index = 2 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; 40 | } 41 | 42 | // ExecOp executes a command in a container. 43 | message ExecOp { 44 | Meta meta = 1; 45 | repeated Mount mounts = 2; 46 | NetMode network = 3; 47 | SecurityMode security = 4; 48 | } 49 | 50 | // Meta is a set of arguments for ExecOp. 51 | // Meta is unrelated to LLB metadata. 52 | // FIXME: rename (ExecContext? ExecArgs?) 53 | message Meta { 54 | repeated string args = 1; 55 | repeated string env = 2; 56 | string cwd = 3; 57 | string user = 4; 58 | ProxyEnv proxy_env = 5; 59 | repeated HostIP extraHosts = 6; 60 | } 61 | 62 | enum NetMode { 63 | UNSET = 0; // sandbox 64 | HOST = 1; 65 | NONE = 2; 66 | } 67 | 68 | enum SecurityMode { 69 | SANDBOX = 0; 70 | INSECURE = 1; // privileged mode 71 | } 72 | 73 | // Mount specifies how to mount an input Op as a filesystem. 74 | message Mount { 75 | int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; 76 | string selector = 2; 77 | string dest = 3; 78 | int64 output = 4 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; 79 | bool readonly = 5; 80 | MountType mountType = 6; 81 | CacheOpt cacheOpt = 20; 82 | SecretOpt secretOpt = 21; 83 | SSHOpt SSHOpt = 22; 84 | } 85 | 86 | // MountType defines a type of a mount from a supported set 87 | enum MountType { 88 | BIND = 0; 89 | SECRET = 1; 90 | SSH = 2; 91 | CACHE = 3; 92 | TMPFS = 4; 93 | } 94 | 95 | // CacheOpt defines options specific to cache mounts 96 | message CacheOpt { 97 | // ID is an optional namespace for the mount 98 | string ID = 1; 99 | // Sharing is the sharing mode for the mount 100 | CacheSharingOpt sharing = 2; 101 | } 102 | 103 | // CacheSharingOpt defines different sharing modes for cache mount 104 | enum CacheSharingOpt { 105 | // SHARED cache mount can be used concurrently by multiple writers 106 | SHARED = 0; 107 | // PRIVATE creates a new mount if there are multiple writers 108 | PRIVATE = 1; 109 | // LOCKED pauses second writer until first one releases the mount 110 | LOCKED = 2; 111 | } 112 | 113 | // SecretOpt defines options describing secret mounts 114 | message SecretOpt { 115 | // ID of secret. Used for quering the value. 116 | string ID = 1; 117 | // UID of secret file 118 | uint32 uid = 2; 119 | // GID of secret file 120 | uint32 gid = 3; 121 | // Mode is the filesystem mode of secret file 122 | uint32 mode = 4; 123 | // Optional defines if secret value is required. Error is produced 124 | // if value is not found and optional is false. 125 | bool optional = 5; 126 | } 127 | 128 | // SSHOpt defines options describing secret mounts 129 | message SSHOpt { 130 | // ID of exposed ssh rule. Used for quering the value. 131 | string ID = 1; 132 | // UID of agent socket 133 | uint32 uid = 2; 134 | // GID of agent socket 135 | uint32 gid = 3; 136 | // Mode is the filesystem mode of agent socket 137 | uint32 mode = 4; 138 | // Optional defines if ssh socket is required. Error is produced 139 | // if client does not expose ssh. 140 | bool optional = 5; 141 | } 142 | 143 | // SourceOp specifies a source such as build contexts and images. 144 | message SourceOp { 145 | // TODO: use source type or any type instead of URL protocol. 146 | // identifier e.g. local://, docker-image://, git://, https://... 147 | string identifier = 1; 148 | // attrs are defined in attr.go 149 | map attrs = 2; 150 | } 151 | 152 | // BuildOp is used for nested build invocation. 153 | // BuildOp is experimental and can break without backwards compatibility 154 | message BuildOp { 155 | int64 builder = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; 156 | map inputs = 2; 157 | Definition def = 3; 158 | map attrs = 4; 159 | // outputs 160 | } 161 | 162 | // BuildInput is used for BuildOp. 163 | message BuildInput { 164 | int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; 165 | } 166 | 167 | // OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time. 168 | message OpMetadata { 169 | // ignore_cache specifies to ignore the cache for this Op. 170 | bool ignore_cache = 1; 171 | // Description can be used for keeping any text fields that builder doesn't parse 172 | map description = 2; 173 | // index 3 reserved for WorkerConstraint in previous versions 174 | // WorkerConstraint worker_constraint = 3; 175 | ExportCache export_cache = 4; 176 | 177 | map caps = 5 [(gogoproto.castkey) = "github.com/moby/buildkit/util/apicaps.CapID", (gogoproto.nullable) = false]; 178 | } 179 | 180 | message ExportCache { 181 | bool Value = 1; 182 | } 183 | 184 | message ProxyEnv { 185 | string http_proxy = 1; 186 | string https_proxy = 2; 187 | string ftp_proxy = 3; 188 | string no_proxy = 4; 189 | } 190 | 191 | // WorkerConstraints defines conditions for the worker 192 | message WorkerConstraints { 193 | repeated string filter = 1; // containerd-style filter 194 | } 195 | 196 | // Definition is the LLB definition structure with per-vertex metadata entries 197 | message Definition { 198 | // def is a list of marshaled Op messages 199 | repeated bytes def = 1; 200 | // metadata contains metadata for the each of the Op messages. 201 | // A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future. 202 | map metadata = 2 [(gogoproto.castkey) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; 203 | } 204 | 205 | message HostIP { 206 | string Host = 1; 207 | string IP = 2; 208 | } 209 | 210 | message FileOp { 211 | repeated FileAction actions = 2; 212 | } 213 | 214 | message FileAction { 215 | int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // could be real input or target (target index + max input index) 216 | int64 secondaryInput = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // --//-- 217 | int64 output = 3 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; 218 | oneof action { 219 | // FileActionCopy copies files from secondaryInput on top of input 220 | FileActionCopy copy = 4; 221 | // FileActionMkFile creates a new file 222 | FileActionMkFile mkfile = 5; 223 | // FileActionMkDir creates a new directory 224 | FileActionMkDir mkdir = 6; 225 | // FileActionRm removes a file 226 | FileActionRm rm = 7; 227 | } 228 | } 229 | 230 | message FileActionCopy { 231 | // src is the source path 232 | string src = 1; 233 | // dest path 234 | string dest = 2; 235 | // optional owner override 236 | ChownOpt owner = 3; 237 | // optional permission bits override 238 | int32 mode = 4; 239 | // followSymlink resolves symlinks in src 240 | bool followSymlink = 5; 241 | // dirCopyContents only copies contents if src is a directory 242 | bool dirCopyContents = 6; 243 | // attemptUnpackDockerCompatibility detects if src is an archive to unpack it instead 244 | bool attemptUnpackDockerCompatibility = 7; 245 | // createDestPath creates dest path directories if needed 246 | bool createDestPath = 8; 247 | // allowWildcard allows filepath.Match wildcards in src path 248 | bool allowWildcard = 9; 249 | // allowEmptyWildcard doesn't fail the whole copy if wildcard doesn't resolve to files 250 | bool allowEmptyWildcard = 10; 251 | // optional created time override 252 | int64 timestamp = 11; 253 | } 254 | 255 | message FileActionMkFile { 256 | // path for the new file 257 | string path = 1; 258 | // permission bits 259 | int32 mode = 2; 260 | // data is the new file contents 261 | bytes data = 3; 262 | // optional owner for the new file 263 | ChownOpt owner = 4; 264 | // optional created time override 265 | int64 timestamp = 5; 266 | } 267 | 268 | message FileActionMkDir { 269 | // path for the new directory 270 | string path = 1; 271 | // permission bits 272 | int32 mode = 2; 273 | // makeParents creates parent directories as well if needed 274 | bool makeParents = 3; 275 | // optional owner for the new directory 276 | ChownOpt owner = 4; 277 | // optional created time override 278 | int64 timestamp = 5; 279 | } 280 | 281 | message FileActionRm { 282 | // path to remove 283 | string path = 1; 284 | // allowNotFound doesn't fail the rm if file is not found 285 | bool allowNotFound = 2; 286 | // allowWildcard allows filepath.Match wildcards in path 287 | bool allowWildcard = 3; 288 | } 289 | 290 | message ChownOpt { 291 | UserOpt user = 1; 292 | UserOpt group = 2; 293 | } 294 | 295 | message UserOpt { 296 | oneof user { 297 | NamedUserOpt byName = 1; 298 | uint32 byID = 2; 299 | } 300 | } 301 | 302 | message NamedUserOpt { 303 | string name = 1; 304 | int64 input = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; 305 | } -------------------------------------------------------------------------------- /buildkit-proto/proto/github.com/moby/buildkit/util/apicaps/pb/caps.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package moby.buildkit.v1.apicaps; 4 | 5 | import "github.com/gogo/protobuf/gogoproto/gogo.proto"; 6 | 7 | option (gogoproto.sizer_all) = true; 8 | option (gogoproto.marshaler_all) = true; 9 | option (gogoproto.unmarshaler_all) = true; 10 | 11 | // APICap defines a capability supported by the service 12 | message APICap { 13 | string ID = 1; 14 | bool Enabled = 2; 15 | bool Deprecated = 3; // Unused. May be used for warnings in the future 16 | string DisabledReason = 4; // Reason key for detection code 17 | string DisabledReasonMsg = 5; // Message to the user 18 | string DisabledAlternative = 6; // Identifier that updated client could catch. 19 | } -------------------------------------------------------------------------------- /buildkit-proto/proto/github.com/tonistiigi/fsutil/types/stat.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package fsutil.types; 4 | 5 | option go_package = "types"; 6 | 7 | message Stat { 8 | string path = 1; 9 | uint32 mode = 2; 10 | uint32 uid = 3; 11 | uint32 gid = 4; 12 | int64 size = 5; 13 | int64 modTime = 6; 14 | // int32 typeflag = 7; 15 | string linkname = 7; 16 | int64 devmajor = 8; 17 | int64 devminor = 9; 18 | map xattrs = 10; 19 | } -------------------------------------------------------------------------------- /buildkit-proto/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[allow(clippy::all)] 2 | 3 | pub mod moby { 4 | pub mod buildkit { 5 | pub mod v1 { 6 | pub mod frontend { 7 | include!(concat!(env!("OUT_DIR"), "/moby.buildkit.v1.frontend.rs")); 8 | } 9 | 10 | pub mod apicaps { 11 | include!(concat!(env!("OUT_DIR"), "/moby.buildkit.v1.apicaps.rs")); 12 | } 13 | 14 | pub mod types { 15 | include!(concat!(env!("OUT_DIR"), "/moby.buildkit.v1.types.rs")); 16 | } 17 | } 18 | } 19 | } 20 | 21 | pub mod google { 22 | pub mod rpc { 23 | include!(concat!(env!("OUT_DIR"), "/google.rpc.rs")); 24 | } 25 | } 26 | 27 | pub mod pb { 28 | include!(concat!(env!("OUT_DIR"), "/pb.rs")); 29 | } 30 | 31 | pub mod fsutil { 32 | pub mod types { 33 | include!(concat!(env!("OUT_DIR"), "/fsutil.types.rs")); 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /buildkit-proto/update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | export BUILDKIT_VERSION="v0.5.1" 5 | 6 | curl "https://raw.githubusercontent.com/moby/buildkit/$BUILDKIT_VERSION/api/types/worker.proto" > proto/github.com/moby/buildkit/api/types/worker.proto 7 | curl "https://raw.githubusercontent.com/moby/buildkit/$BUILDKIT_VERSION/frontend/gateway/pb/gateway.proto" > proto/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto 8 | curl "https://raw.githubusercontent.com/moby/buildkit/$BUILDKIT_VERSION/solver/pb/ops.proto" > proto/github.com/moby/buildkit/solver/pb/ops.proto 9 | curl "https://raw.githubusercontent.com/moby/buildkit/$BUILDKIT_VERSION/util/apicaps/pb/caps.proto" > proto/github.com/moby/buildkit/util/apicaps/pb/caps.proto 10 | 11 | curl "https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/status.proto" > proto/github.com/gogo/googleapis/google/rpc/status.proto 12 | curl "https://raw.githubusercontent.com/gogo/protobuf/v1.2.1/gogoproto/gogo.proto" > proto/github.com/gogo/protobuf/gogoproto/gogo.proto 13 | curl "https://raw.githubusercontent.com/tonistiigi/fsutil/master/types/stat.proto" > proto/github.com/tonistiigi/fsutil/types/stat.proto 14 | --------------------------------------------------------------------------------