├── .gitignore ├── .travis.yml ├── BUILD ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── SPEC.md ├── configs ├── demo.json ├── edonus.json └── standard.json ├── docs.sh ├── examples ├── .gitignore ├── README ├── hello │ ├── .lal │ │ ├── BUILD │ │ └── manifest.json │ ├── Makefile │ └── main.c ├── hello2 │ ├── .lal │ │ ├── BUILD │ │ └── manifest.json │ ├── Makefile │ └── main.c ├── libhello │ ├── .gitignore │ ├── .lal │ │ ├── BUILD │ │ └── manifest.json │ ├── Makefile │ ├── hello.c │ └── hello.h └── libhello2 │ ├── .gitignore │ ├── .lal │ ├── BUILD │ └── manifest.json │ ├── Makefile │ ├── hello.c │ └── hello.h ├── lal.complete.sh ├── manifest.json ├── package.sh ├── release.sh ├── rustfmt.toml ├── src ├── build.rs ├── clean.rs ├── configure.rs ├── core │ ├── config.rs │ ├── ensure.rs │ ├── errors.rs │ ├── input.rs │ ├── lockfile.rs │ ├── manifest.rs │ ├── mod.rs │ ├── output.rs │ └── sticky.rs ├── env.rs ├── export.rs ├── fetch.rs ├── init.rs ├── lib.rs ├── list.rs ├── main.rs ├── propagate.rs ├── publish.rs ├── query.rs ├── remove.rs ├── shell.rs ├── stash.rs ├── status.rs ├── storage │ ├── artifactory.rs │ ├── download.rs │ ├── local.rs │ ├── mod.rs │ ├── progress.rs │ └── traits.rs ├── update.rs ├── upgrade.rs └── verify.rs └── tests ├── .gitignore ├── helloworld ├── BUILD ├── Makefile ├── README.md ├── hello.c └── manifest.json ├── heylib ├── BUILD ├── Makefile ├── README.md ├── hey.c ├── hey.h └── manifest.json ├── prop-base ├── BUILD └── manifest.json ├── prop-leaf ├── BUILD └── manifest.json ├── prop-mid-1 ├── BUILD └── manifest.json ├── prop-mid-2 ├── BUILD └── manifest.json └── testmain.rs /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | *.rs.bk 3 | lal.coreos.tar 4 | lal.musl.tar 5 | OUTPUT 6 | lal.tar 7 | ARTIFACT 8 | testtmp 9 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | services: 3 | - docker 4 | dist: trusty 5 | addons: 6 | apt: 7 | packages: 8 | - libssl-dev 9 | language: rust 10 | rust: 11 | - stable 12 | before_script: 13 | - export RUST_BACKTRACE=full 14 | script: 15 | - cargo clean 16 | - if [ "$TRAVIS_PULL_REQUEST" = "true" ]; then cargo build; fi 17 | - if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then docker run --rm -v $PWD:/volume -w /volume -t clux/muslrust cargo build --release; fi 18 | - if [ "$TRAVIS_PULL_REQUEST" = "true" ]; then cargo test; fi 19 | - if [ "$TRAVIS_PULL_REQUEST" = "true" ]; then cargo doc --no-deps; fi 20 | notifications: 21 | email: 22 | on_success: change 23 | on_failure: always 24 | branches: 25 | only: 26 | - master 27 | - /^v.*$/ 28 | before_deploy: 29 | - sudo chown -R $USER:$USER target 30 | - ldd target/x86_64-unknown-linux-musl/release/lal || true 31 | - mkdir ARTIFACT/{bin,share/lal} -p 32 | - mv target/x86_64-unknown-linux-musl/release/lal ARTIFACT/bin/ 33 | - cp lal.complete.sh ARTIFACT/share/lal/ 34 | - tar czf lal.tar.gz --transform=s,^ARTIFACT/,, $(find ARTIFACT/ -type f -o -type l) 35 | deploy: 36 | provider: releases 37 | file: lal.tar.gz 38 | skip_cleanup: true 39 | on: 40 | tags: true 41 | api_key: 42 | secure: jo7upaSy/Suepldw8OAjFhIl0y0xdIrd5W0WS7XMNCtHk3QC+iUSGiqlmZ/uQPp0IW5v8cTKVGyQ5bW66eUj8yLbw1QOwGUKdVF33jdZA1kAwY0baxcZCkbeEbWUiDKMRSJ/kr/AqFKgVJVLBcPhqGmHHmfK+m8RuVSI2ioUrW3hYdLUT35xws4wOdgQiEuKOn6EGUsFaulNj6LJWfl9jYmCUFX55UmMU/VIQiwCEMSCBin739BwHXBDNLwJEgHR9VHceSaBWsazOW6roT9gE6FdUqMGkjiK9vOBgTp9LElfK/4RO30TJ7Nz4EYz3/PoQzNrB3TMMBRtD14I8BnWRGeNKlFwUCXoPWnEeVuyurNQLLb0KiV4+JoxYQ1614pwaIwiz1wMf2yGMswc7VT89KqGGV18saVcQJaPOWSjsNyySWjK0FSMIAyCX5qzixFmvgfPu6Jf9/A2Wd5v6oJypo9i4n4x63UjXXuH/dOeg49V/QlM3py0CsztlUNRypb0NyFOBlYGJerAkKuTSW3Fh3fjte3GS+ze3rzC//ADYowG9R1mJdrpM48busoOzwzytgRHXAAXq0CFlWEw5a/Ly+iES2f3yQbGqx0/ZLIb5oZqlXsv3iEEczhokPiK74Psc/7yG7Mn5+Vjndb1AGcZ6gmNfGFRy4WWcaUFZ/b23VA= 43 | after_script: | 44 | if [ "$TRAVIS_PULL_REQUEST" = "false" ] && [[ "$TRAVIS_RUST_VERSION" == stable ]]; then 45 | bash <(curl -sSL https://raw.githubusercontent.com/xd009642/tarpaulin/master/travis-install.sh) 46 | sudo chown -R $USER:$USER target 47 | cargo tarpaulin --ciserver travis-ci --coveralls $TRAVIS_JOB_ID 48 | fi 49 | -------------------------------------------------------------------------------- /BUILD: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | main() { 4 | # build in the currently available muslrust container 5 | set -e 6 | if [ ! -d ~/.cargo/registry ]; then 7 | echo "Ensure you have created a cargo-cache docker volume to speed up subsequent builds" 8 | echo "If this is your first build, this is normal" 9 | echo "Otherwise, please 'docker volume create cargo-cache' and ensure it is specified in your lal config" 10 | echo "Continuing from blank cache..." 11 | fi 12 | if [[ $1 == "lal" ]]; then 13 | mkdir -p OUTPUT/{bin,share/lal/configs} 14 | cp configs/*.json OUTPUT/share/lal/configs/ 15 | cp lal.complete* OUTPUT/share/lal/ 16 | if [[ $2 == "slim" ]]; then 17 | (set -x; cargo build --no-default-features --release --verbose) 18 | cp ./target/x86_64-unknown-linux-musl/release/lal OUTPUT/bin/ 19 | elif [[ $2 == "release" ]]; then 20 | (set -x; cargo build --release --verbose) 21 | cp ./target/x86_64-unknown-linux-musl/release/lal OUTPUT/bin/ 22 | elif [[ $2 == "debug" ]]; then 23 | (set -x; cargo build --verbose) 24 | cp ./target/x86_64-unknown-linux-musl/debug/lal OUTPUT/bin/ 25 | else 26 | echo "No such configuration $2 found" 27 | exit 2 28 | fi 29 | elif [[ $1 == "lal-unit-tests" ]]; then 30 | cargo build --test testmain 31 | cp ./target/x86_64-unknown-linux-musl/debug/testmain-* OUTPUT/ 32 | rm -f OUTPUT/testmain-*.d 33 | echo "Please run the testmain executable in ./OUTPUT/" 34 | else 35 | echo "No such component $1 found" 36 | exit 2 37 | fi 38 | } 39 | 40 | # If we were not sourced as a library, pass arguments onto main 41 | if [ "$0" = "${BASH_SOURCE[0]}" ]; then 42 | main "$@" 43 | else 44 | echo "${BASH_SOURCE[0]} sourced" 45 | fi 46 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lal" 3 | version = "3.8.1" 4 | authors = ["Eirik Albrigtsen "] 5 | description = "A strict, language-agnostic build system and dependency manager" 6 | documentation = "http://lalbuild.github.io/lal" 7 | license = "MIT" 8 | categories = ["command-line-utilities"] 9 | keywords = ["package", "dependency", "build", "docker", "artifactory"] 10 | readme = "README.md" 11 | 12 | [badges] 13 | travis-ci = { repository = "lalbuild/lal", branch = "master" } 14 | coveralls = { repository = "lalbuild/lal", branch = "master" } 15 | 16 | [[bin]] 17 | doc = false 18 | name = "lal" 19 | path = "src/main.rs" 20 | 21 | [[test]] 22 | harness = false 23 | name = "testmain" 24 | 25 | [dependencies] 26 | ansi_term = "0.7.2" 27 | chrono = "0.2" 28 | clap = "2.27.1" 29 | filetime = "0.1" 30 | flate2 = "0.2" 31 | hyper = "0.10.9" 32 | hyper-native-tls = "0.2.2" 33 | log = "0.3.5" 34 | loggerv = "0.6.0" 35 | openssl-probe = "0.1.1" 36 | rand = "0.3.14" 37 | regex = "0.1.55" 38 | semver = "0.9.0" 39 | serde = "1.0.24" 40 | serde_derive = "1.0.24" 41 | serde_json = "1.0.8" 42 | sha1 = "0.3.0" 43 | tar = "0.4.10" 44 | walkdir = "1.0.7" 45 | 46 | [dependencies.indicatif] 47 | optional = true 48 | version = "0.3.3" 49 | 50 | [features] 51 | default = ["progress"] 52 | progress = ["indicatif"] 53 | upgrade = [] 54 | 55 | [lib] 56 | name = "lal" 57 | path = "src/lib.rs" 58 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Cisco Systems 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # lal 2 | [![build status](https://secure.travis-ci.org/lalbuild/lal.svg)](http://travis-ci.org/lalbuild/lal) 3 | [![coverage status](http://img.shields.io/coveralls/lalbuild/lal.svg)](https://coveralls.io/r/lalbuild/lal) 4 | [![crates status](https://img.shields.io/crates/v/lal.svg)](https://crates.io/crates/lal) 5 | 6 | A strict, language-agnostic build system and dependency manager. 7 | 8 | * **Use existing tools**: `lal build` only shells out to an executable `BUILD` script in a configured docker container. Install what you want in your build environments: cmake, autotools, cargo, go, python. 9 | * **Cache large builds**: publish built libraries for later use down the dependency tree. 10 | * **Strict with environments and versions**: `lal verify` enforces that all your dependencies are built in the same environment and use the same version down the tree (and it runs before your build). 11 | * **Builds on existing package manager ideas**: versions in a manifest, fetch dependencies first, verify them, then build however you want, lal autogenerates lockfiles during build. 12 | * **Transparent use of docker for build environments** with configurable mounts and direct view of the docker run commands used. `lal shell` or `lal script` provides additional easy ways to use the build environments. 13 | 14 | ## Conception 15 | We needed a simple dependency manager built around the idea of a storage backend and a build environment. Strict versioning and consistent build environments for our C++ codebases where the most important features needed, and we already had docker and artifactory for the rest, however other storage backends can be implemented in the future. 16 | 17 | The command line [specification](./SPEC.md) contains a detailed overview of what `lal` does. 18 | 19 | ## Showcases 20 | A few short ascii shorts about how lal is typically used internally: 21 | 22 | - [build / fetch](https://asciinema.org/a/3udzvbettco6sx44mbn238x0v) 23 | - [custom dependencies](https://asciinema.org/a/c9v790m4euh190ladaqzfdc43) 24 | - [scripts](https://asciinema.org/a/a3xmki0iz5j0am2vv780p41xa) 25 | 26 | ## Setup 27 | Needs a few pieces to be set up across a team at the moment. Grab a :coffee: 28 | 29 | ### Prerequisites (devs) 30 | You need [docker](https://docs.docker.com/engine/installation/linux/) (minimum version 1.12), logged into the group with access to your docker images in the [relevant config file](./configs). Distros with Linux >= 4.4.0 is the primary target, but Mac is also getting there. 31 | 32 | ### Prerequisites (ops) 33 | A set of docker images as outlined in the [relevant config file](./configs), all built to include a `lal` user and available to docker logged in devs (see below) 34 | 35 | CI setup to build and upload releases of master as outlined further below. 36 | 37 | A configured backend in same config file, distrubuted with lal to your devs. Currently, this only supports artifactory. 38 | 39 | ## Installation 40 | If you do not want to install rust, get a statically linked version of lal: 41 | 42 | ```sh 43 | curl -sSL https://github.com/lalbuild/lal/releases/download/v3.8.1/lal.tar.gz | sudo tar xz -C /usr/local 44 | echo "source /usr/local/share/lal/lal.complete.sh" > ~/.bash_completion 45 | curl -sSL https://raw.githubusercontent.com/lalbuild/lal/master/configs/demo.json > cfg.json 46 | lal configure cfg.json 47 | ``` 48 | 49 | These are built on [CI](https://travis-ci.org/lalbuild/lal/builds) via [muslrust](https://github.com/clux/muslrust). You can drop `sudo` if you own or `chown` your install prefix. 50 | 51 | ## Building 52 | Clone, install from source with [rust](https://www.rust-lang.org/en-US/install.html), setup autocomplete, and select your site-config: 53 | 54 | ```sh 55 | git clone git@github.com:lalbuild/lal.git && cd lal 56 | cargo install 57 | echo "source $PWD/lal.complete.sh" >> ~/.bash_completion 58 | lal configure configs/demo.json 59 | ``` 60 | 61 | ## Usage 62 | 63 | ### Creating a new component 64 | Create a git repo, lal init it, then update deps and verify it builds. 65 | 66 | ```sh 67 | lal init alpine # create manifest for a alpine component 68 | git add .lal/ 69 | git commit -m "init newcomponent" 70 | # add some dependencies to manifest (if you have a storage backend) 71 | lal update gtest --save-dev 72 | lal update libwebsockets --save 73 | # create source and iterate until `lal build` passes 74 | 75 | # later.. 76 | git commit -a -m "inital working version" 77 | git push -u origin master 78 | ``` 79 | 80 | Note that the first `lal build` will call `lal env update` to make sure you have the build environment. 81 | 82 | ### Creating a new version 83 | Designed to be handled by CI on each push to master (ideally through validated merge). CI should create your numeric tag and upload the build output to artifactory. See the [spec](./SPEC.md) for full info. 84 | 85 | ## Docker Image 86 | The `build` and `shell` commands will use `docker run` on a configured image. The only condition we require of docker images is that they have a `lal` user added. 87 | 88 | Normally, this is sufficient in a docker image to satisfy constraints: 89 | 90 | ``` 91 | RUN useradd -ms /bin/bash lal -G sudo && \ 92 | echo "%sudo ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers 93 | 94 | VOLUME ["/home/lal/volume"] 95 | ``` 96 | 97 | Note that `sudo` is not necessary, but sometimes convenient. 98 | 99 | We will use this user inside the container to run build scripts. By default this works best if the `id` of the host user is 1000:1000, but if it is not, then lal will create a slightly modified version of the image that matches the user id and group id for your host system. 100 | 101 | This is a one time operation, and it is a more general solution for use than docker usernamespaces (which is currently incompatible with features like host networking). 102 | 103 | ## Developing 104 | Have the [rust documentation for lal](https://cisco.github.io/lal-build-manager) ready. 105 | 106 | To hack on `lal`, follow normal install procedure, but build non-release builds iteratively. 107 | When developing we do not do `--release`. Thus you should for convenience link `lal` via `ln -sf $PWD/target/debug/lal /usr/local/bin/lal`. 108 | 109 | When making changes: 110 | 111 | ```sh 112 | cargo build 113 | lal subcommand ..args # check that your thing is good 114 | cargo test # write tests 115 | ``` 116 | 117 | Good practices before comitting (not mandatory): 118 | 119 | ```sh 120 | cargo fmt # requires `cargo install rustfmt` and $HOME/.cargo/bin on $PATH 121 | rustup run nighthly cargo clippy # requires nightly install of clippy 122 | ``` 123 | 124 | Note that if you have a rust environment set up in your lal config, you can actually `lal build lal` (which will use the provided `manifest.json` and `BUILD` file). 125 | 126 | ## Build issues 127 | If libraries cannot be built, then upgrade `rustc` by running `rustup update stable`. 128 | 129 | - missing ssl: install distro equivalent of `libssl-dev` then `cargo clean` 130 | - fatal error: 'openssl/hmac.h' file not found If you are on a GNU/Linux distribution (like Ubuntu), please install `libssl-dev`. If you are on OSX, please install openssl and check your OpenSSL configuration: 131 | 132 | ```sh 133 | brew install openssl 134 | export OPENSSL_INCLUDE_DIR=`brew --prefix openssl`/include 135 | export OPENSSL_LIB_DIR=`brew --prefix openssl`/lib 136 | export DEP_OPENSSL_INCLUDE=`brew --prefix openssl`/include 137 | ``` 138 | 139 | ## Runtime issues 140 | ### SSL Certificates 141 | The lookup of SSL certificates to do peer verification can fail if they are missing or in a non-standard location. The search is done via the [openssl-probe crate](https://github.com/alexcrichton/openssl-probe/blob/master/src/lib.rs). 142 | 143 | Although this shouldn't be necessary anymore; you can also override the search yourself by pointing to the certificates explicitly: 144 | 145 | ``` 146 | # OSX 147 | export SSL_CERT_FILE=/usr/local/etc/openssl/cert.pem 148 | # CentOS 149 | export SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt 150 | ``` 151 | 152 | 153 | This should be put in your `~/.bashrc` or `~/.bash_profile` as `lal` reads it on every run. Note that the normal location is `/etc/ssl/certs/ca-certificates.crt` for most modern linux distros. 154 | 155 | ### Docker permission denieds 156 | You need to have performed `docker login`, and your user must have been added to the correct group on dockerhub by someone in charge before you can pull build environments. 157 | 158 | ## Logging 159 | Configurable via flags before the subcommand: 160 | 161 | ```sh 162 | lal fetch # normal output 163 | lal -v fetch # debug output 164 | lal -vv fetch # all output 165 | ``` 166 | 167 | ### Influences 168 | Main inspirations were [cargo](https://github.com/rust-lang/cargo) and [npm](https://github.com/npm/npm). 169 | A useful reference for the terms used throughout: [so you want to write a package manager](https://medium.com/@sdboyer/so-you-want-to-write-a-package-manager-4ae9c17d9527#.rlvjqxc4r) (long read). 170 | -------------------------------------------------------------------------------- /configs/demo.json: -------------------------------------------------------------------------------- 1 | { 2 | "backend": { 3 | "local": {} 4 | }, 5 | "environments": { 6 | "alpine": { 7 | "name": "clux/lal-alpine", 8 | "tag": "3.6" 9 | }, 10 | "xenial": { 11 | "name": "clux/lal-xenial", 12 | "tag": "latest" 13 | }, 14 | "rust": { 15 | "name": "clux/lal-muslrust", 16 | "tag": "latest" 17 | } 18 | }, 19 | "mounts": [ 20 | { 21 | "src": "~/.lal/history", 22 | "dest": "/home/lal/.bash_history", 23 | "readonly": false 24 | } 25 | ], 26 | "minimum_lal": "3.7.0" 27 | } 28 | -------------------------------------------------------------------------------- /configs/edonus.json: -------------------------------------------------------------------------------- 1 | { 2 | "backend": { 3 | "artifactory": { 4 | "master": "https://engci-maven-master.cisco.com/artifactory", 5 | "slave": "https://engci-maven.cisco.com/artifactory", 6 | "release": "CME-release", 7 | "vgroup": "CME-group", 8 | "credentials": null 9 | } 10 | }, 11 | "environments": { 12 | "py3": { 13 | "name": "edonusdevelopers/build_python", 14 | "tag": "latest" 15 | }, 16 | "rust": { 17 | "name": "edonusdevelopers/muslrust", 18 | "tag": "latest" 19 | }, 20 | "xenial": { 21 | "name": "edonusdevelopers/build_xenial", 22 | "tag": "latest" 23 | }, 24 | "artful": { 25 | "name": "edonusdevelopers/build_artful", 26 | "tag": "latest" 27 | } 28 | }, 29 | "mounts": [ 30 | { 31 | "src": "~/.lal/history", 32 | "dest": "/home/lal/.bash_history", 33 | "readonly": false 34 | }, 35 | { 36 | "src": "/mnt/tools", 37 | "dest": "/tools", 38 | "readonly": true 39 | }, 40 | { 41 | "src": "cargo-cache", 42 | "dest": "/home/lal/.cargo", 43 | "readonly": false 44 | } 45 | ], 46 | "minimum_lal": "3.5.0" 47 | } 48 | -------------------------------------------------------------------------------- /configs/standard.json: -------------------------------------------------------------------------------- 1 | { 2 | "backend": { 3 | "local": {} 4 | }, 5 | "environments": { 6 | "xenial": { 7 | "name": "clux/lal-xenial", 8 | "tag": "latest" 9 | }, 10 | "muslrust": { 11 | "name": "clux/lal-muslrust", 12 | "tag": "latest" 13 | } 14 | }, 15 | "mounts": [ 16 | { 17 | "src": "~/.lal/history", 18 | "dest": "/home/lal/.bash_history", 19 | "readonly": false 20 | } 21 | ], 22 | "minimum_lal": "3.8.0" 23 | } 24 | -------------------------------------------------------------------------------- /docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | cargo doc 5 | echo "" > target/doc/index.html 6 | ghp-import -n target/doc 7 | git push -qf "git@github.com:lalbuild/lal.git" gh-pages 8 | -------------------------------------------------------------------------------- /examples/.gitignore: -------------------------------------------------------------------------------- 1 | ARTIFACT 2 | INPUT 3 | OUTPUT 4 | -------------------------------------------------------------------------------- /examples/README: -------------------------------------------------------------------------------- 1 | ## Introduction to the lal build management tool 2 | 3 | Get hold of the `lal` tool. 4 | 5 | $ git clone https://github.com/lalbuild/lal; cd lal 6 | $ cargo build --release 7 | 8 | Add it to your PATH 9 | 10 | $ cp lal/target/release/lal ~/bin/lal 11 | 12 | Configure lal for local development 13 | 14 | $ lal configure ./configs/standard.json 15 | 16 | ### Running the Hello World example 17 | 18 | The build process for a component will generate files in OUTPUT. These are packed 19 | together and published as an ARTIFACT, available for other components to depend on. 20 | Other components can depend on published artifacts as INPUT for their own build. 21 | 22 | This example shows how to use `lal` as a basic dependency manager. We will create 23 | a C static archive containing a single function, and a binary which depends on it. 24 | 25 | Building any component is as simple as calling `lal build` in it's directory. 26 | 27 | $ cd examples/libhello 28 | 29 | $ lal build 30 | lal::build: Running build script in xenial container 31 | ... 32 | lal::build: Build succeeded with verified dependencies 33 | $ ls OUTPUT 34 | hello.h libhello.a lockfile.json 35 | 36 | To create an artifact, build using the `--release` flag, and publish it. 37 | 38 | $ lal build --release --with-version=1 --with-sha=$(git rev-parse HEAD) 39 | ... 40 | lal::core::output: Taring OUTPUT 41 | $ ls ARTIFACT 42 | libhello.tar.gz lockfile.json 43 | $ lal publish libhello 44 | lal::publish: Publishing libhello=1 to xenial 45 | 46 | This component is now available as a dependency to other builds. 47 | `lal fetch` will fetch all dependencies into our build tree. 48 | 49 | $ cd ../hello 50 | $ lal fetch 51 | lal::fetch: Fetch xenial libhello 1 52 | $ ls INPUT 53 | libhello 54 | 55 | With the dependencies in place, you can now build the executable. 56 | 57 | $ lal build 58 | lal::verify: Dependencies fully verified 59 | lal::build: Running build script in xenial container 60 | cc -static -o OUTPUT/hello main.c INPUT/libhello/libhello.a 61 | lal::build: Build succeeded with verified dependencies 62 | 63 | Run the executable in a controlled environment 64 | 65 | $ lal shell ./OUTPUT/hello 66 | lal::shell: Entering clux/lal-xenial:latest 67 | Hello World! 68 | 69 | Or execute on the host 70 | 71 | $ ./OUTPUT/hello 72 | Hello World! 73 | 74 | ### Managing versions 75 | 76 | Let's change libhello and version bump it. 77 | 78 | $ cd ../libhello2 79 | $ lal build --release --with-version=2 --with-sha=$(git rev-parse HEAD) 80 | ... 81 | $ lal publish libhello 82 | lal::publish: Publishing libhello=2 to xenial 83 | 84 | Pull in the new version of libhello. 85 | 86 | $ cd ../hello2 87 | $ lal update libhello 88 | lal::update: Fetch xenial libhello 89 | lal::storage::download: Last versions for libhello in xenial env is {1, 2} 90 | lal::update: Fetch xenial libhello=2 91 | 92 | Build with the new dependencies. 93 | 94 | $ lal build 95 | lal::verify: Dependencies fully verified 96 | lal::build: Running build script in xenial container 97 | cc -static -o OUTPUT/hello main.c INPUT/libhello/libhello.a 98 | lal::build: Build succeeded with verified dependencies 99 | 100 | Run it! 101 | 102 | $ ./OUTPUT/hello 103 | Hello World! 104 | $ ./OUTPUT/hello Ben 105 | Hello Ben! 106 | -------------------------------------------------------------------------------- /examples/hello/.lal/BUILD: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | make 3 | -------------------------------------------------------------------------------- /examples/hello/.lal/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "hello", 3 | "environment": "xenial", 4 | "supportedEnvironments": [ 5 | "xenial" 6 | ], 7 | "components": { 8 | "hello": { 9 | "defaultConfig": "release", 10 | "configurations": [ 11 | "release" 12 | ] 13 | } 14 | }, 15 | "dependencies": { 16 | "libhello": 1 17 | }, 18 | "devDependencies": {} 19 | } 20 | -------------------------------------------------------------------------------- /examples/hello/Makefile: -------------------------------------------------------------------------------- 1 | CFLAGS=-static -IINPUT 2 | LDFLAGS=-LINPUT/libhello 3 | LIBS=-lhello 4 | 5 | all: OUTPUT OUTPUT/hello 6 | OUTPUT/hello: main.c 7 | $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $^ $(LIBS) 8 | 9 | OUTPUT: 10 | mkdir -p OUTPUT 11 | -------------------------------------------------------------------------------- /examples/hello/main.c: -------------------------------------------------------------------------------- 1 | #include "libhello/hello.h" 2 | 3 | int main() { 4 | hello(); 5 | return 0; 6 | } 7 | -------------------------------------------------------------------------------- /examples/hello2/.lal/BUILD: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | make 3 | -------------------------------------------------------------------------------- /examples/hello2/.lal/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "hello", 3 | "environment": "xenial", 4 | "supportedEnvironments": [ 5 | "xenial" 6 | ], 7 | "components": { 8 | "hello": { 9 | "defaultConfig": "release", 10 | "configurations": [ 11 | "release" 12 | ] 13 | } 14 | }, 15 | "dependencies": { 16 | "libhello": 2 17 | }, 18 | "devDependencies": {} 19 | } 20 | -------------------------------------------------------------------------------- /examples/hello2/Makefile: -------------------------------------------------------------------------------- 1 | CFLAGS=-static -IINPUT 2 | LDFLAGS=-LINPUT/libhello 3 | LIBS=-lhello 4 | 5 | all: OUTPUT OUTPUT/hello 6 | OUTPUT/hello: main.c 7 | $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $^ $(LIBS) 8 | 9 | OUTPUT: 10 | mkdir -p OUTPUT 11 | -------------------------------------------------------------------------------- /examples/hello2/main.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include "libhello/hello.h" 3 | 4 | int main(int argc, char **argv) { 5 | 6 | if (argc > 1) { 7 | hello(argv[1]); 8 | } else { 9 | hello(NULL); 10 | } 11 | 12 | return 0; 13 | } 14 | -------------------------------------------------------------------------------- /examples/libhello/.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | -------------------------------------------------------------------------------- /examples/libhello/.lal/BUILD: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | make 3 | -------------------------------------------------------------------------------- /examples/libhello/.lal/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "libhello", 3 | "environment": "xenial", 4 | "supportedEnvironments": [ 5 | "xenial" 6 | ], 7 | "components": { 8 | "libhello": { 9 | "defaultConfig": "release", 10 | "configurations": [ 11 | "release" 12 | ] 13 | } 14 | }, 15 | "dependencies": {}, 16 | "devDependencies": {} 17 | } 18 | -------------------------------------------------------------------------------- /examples/libhello/Makefile: -------------------------------------------------------------------------------- 1 | all: OUTPUT OUTPUT/libhello.a OUTPUT/hello.h 2 | 3 | OUTPUT: 4 | mkdir -p $@ 5 | 6 | OUTPUT/libhello.a: hello.o 7 | $(AR) rcs $@ $^ 8 | 9 | OUTPUT/hello.h: hello.h 10 | cp -v $^ $@ 11 | 12 | hello.o: hello.c 13 | $(CC) -c -o $@ $^ 14 | 15 | -------------------------------------------------------------------------------- /examples/libhello/hello.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void hello() { 4 | printf("Hello World!\n"); 5 | } 6 | -------------------------------------------------------------------------------- /examples/libhello/hello.h: -------------------------------------------------------------------------------- 1 | void hello(); 2 | -------------------------------------------------------------------------------- /examples/libhello2/.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | -------------------------------------------------------------------------------- /examples/libhello2/.lal/BUILD: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | make 3 | -------------------------------------------------------------------------------- /examples/libhello2/.lal/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "libhello", 3 | "environment": "xenial", 4 | "supportedEnvironments": [ 5 | "xenial" 6 | ], 7 | "components": { 8 | "libhello": { 9 | "defaultConfig": "release", 10 | "configurations": [ 11 | "release" 12 | ] 13 | } 14 | }, 15 | "dependencies": {}, 16 | "devDependencies": {} 17 | } 18 | -------------------------------------------------------------------------------- /examples/libhello2/Makefile: -------------------------------------------------------------------------------- 1 | all: OUTPUT OUTPUT/libhello.a OUTPUT/hello.h 2 | 3 | OUTPUT: 4 | mkdir -p $@ 5 | 6 | OUTPUT/libhello.a: hello.o 7 | $(AR) rcs $@ $^ 8 | 9 | OUTPUT/hello.h: hello.h 10 | cp -v $^ $@ 11 | 12 | hello.o: hello.c 13 | $(CC) -c -o $@ $^ 14 | 15 | -------------------------------------------------------------------------------- /examples/libhello2/hello.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | void hello(char *name) { 4 | if (name == NULL) { 5 | printf("Hello World!\n"); 6 | } else { 7 | printf("Hello %s!\n", name); 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /examples/libhello2/hello.h: -------------------------------------------------------------------------------- 1 | void hello(char *); 2 | -------------------------------------------------------------------------------- /lal.complete.sh: -------------------------------------------------------------------------------- 1 | # lal(1) completion 2 | 3 | _lal() 4 | { 5 | local cur prev words cword 6 | _init_completion || return 7 | 8 | local -r subcommands="build clean configure export fetch help init script run ls 9 | query remove rm shell stash save status update upgrade verify 10 | publish env list-components list-supported-environments list-dependencies 11 | list-environments list-configurations propagate" 12 | 13 | local has_sub 14 | for (( i=0; i < ${#words[@]}-1; i++ )); do 15 | if [[ ${words[i]} == @(build|clean|configure|export|script|propagate|fetch|help|init|remove|rm|script|run|query|shell|stash|save|status|ls|update|upgrade|verify|publish|env) ]]; then 16 | has_sub=1 17 | fi 18 | done 19 | 20 | local in_lal_repo="" 21 | if [ -f "$PWD/.lal/manifest.json" ] || [ -f "$PWD/manifest.json" ]; then 22 | in_lal_repo="1" 23 | fi 24 | 25 | # global flags 26 | if [[ $prev = 'lal' && "$cur" == -* ]]; then 27 | COMPREPLY=( $(compgen -W '-v -h -V --version --help' -- "$cur" ) ) 28 | return 0 29 | fi 30 | # first subcommand 31 | if [[ -z "$has_sub" ]]; then 32 | COMPREPLY=( $(compgen -W "$subcommands" -- "$cur" ) ) 33 | return 0 34 | fi 35 | 36 | # special subcommand completions 37 | local special i 38 | for (( i=0; i < ${#words[@]}-1; i++ )); do 39 | if [[ ${words[i]} == @(build|remove|rm|propagate|export|init|update|script|run|status|ls|query|shell|publish|env|configure|help) ]]; then 40 | special=${words[i]} 41 | fi 42 | done 43 | 44 | if [[ -n $special ]]; then 45 | case $special in 46 | build) 47 | # lal can get the keys from manifest.components 48 | [[ $in_lal_repo ]] || return 0 49 | local -r components=$(lal list-components) 50 | if [[ $prev = "build" ]]; then 51 | COMPREPLY=($(compgen -W "$components" -- "$cur")) 52 | elif [[ $prev == @(--config|-c) ]]; then 53 | # Identify which component is used (arg after build that's not a flag) 54 | local build_component i 55 | for (( i=2; i < ${#words[@]}-1; i++ )); do 56 | if [[ ${words[i]} != -* ]]; then 57 | build_component=${words[i]} 58 | fi 59 | done 60 | local -r configs="$(lal list-configurations "${build_component}")" 61 | COMPREPLY=($(compgen -W "$configs" -- "$cur")) 62 | else 63 | # suggest flags 64 | local -r build_flags="-r --release -f --force -c --config -h --help --X11 -X -n --net-host --print-only --simple-verify -s --env-var" 65 | COMPREPLY=($(compgen -W "$build_flags" -- "$cur")) 66 | fi 67 | ;; 68 | publish) 69 | # lal can get the keys from manifest.components 70 | [[ $in_lal_repo ]] || return 0 71 | local -r components=$(lal list-components) 72 | if [[ $prev = "publish" ]]; then 73 | COMPREPLY=($(compgen -W "$components" -- "$cur")) 74 | fi 75 | ;; 76 | env) 77 | [[ $in_lal_repo ]] || return 0 78 | local -r env_subs="set reset update help -h --help" 79 | if [[ $prev = "set" ]]; then 80 | local -r envs="$(lal list-supported-environments)" 81 | COMPREPLY=($(compgen -W "$envs" -- "$cur")) 82 | else 83 | COMPREPLY=($(compgen -W "$env_subs" -- "$cur")) 84 | fi 85 | ;; 86 | init) 87 | if [[ $prev = "init" ]]; then 88 | local -r envs="$(lal list-environments)" 89 | COMPREPLY=($(compgen -W "$envs" -- "$cur")) 90 | fi 91 | ;; 92 | status|ls) 93 | [[ $in_lal_repo ]] || return 0 94 | local -r ls_flags="-f --full -o --origin -t --time -h --help" 95 | COMPREPLY=($(compgen -W "$ls_flags" -- "$cur")) 96 | ;; 97 | export|query) 98 | components=$(find "$HOME/.lal/cache/environments" -maxdepth 2 -mindepth 2 -type d -printf "%f " 2> /dev/null) 99 | COMPREPLY=($(compgen -W "$components" -- "$cur")) 100 | ;; 101 | update) 102 | [[ $in_lal_repo ]] || return 0 103 | # Looking in local cache for allowed component names 104 | # Means this won't work first time, but will be quick 105 | local components="" 106 | components=$(find "$HOME/.lal/cache/environments/" -maxdepth 2 -mindepth 2 -type d -printf "%f " 2> /dev/null) 107 | # also add stashed components to list 108 | for dr in ~/.lal/cache/stash/**/**; do 109 | if [[ "$dr" != *"**" ]]; then # ignore empty element (ends in **) 110 | components="${components} $(basename "$(dirname "$dr")")=$(basename "$dr")" 111 | fi 112 | done 113 | # can't complete past the equals because = is a new word for some reason 114 | # but at least you have the info in the list - #bash 115 | COMPREPLY=($(compgen -W "$components" -- "$cur")) 116 | ;; 117 | remove|rm) 118 | [[ $in_lal_repo ]] || return 0 119 | # look in INPUT here, nothing else makes sense 120 | local -r installed=$(find "$PWD/INPUT/" -maxdepth 1 -mindepth 1 -type d -printf "%f " 2> /dev/null) 121 | COMPREPLY=($(compgen -W "$installed" -- "$cur")) 122 | ;; 123 | propagate) 124 | [[ $in_lal_repo ]] || return 0 125 | # look in INPUT here, nothing else makes sense 126 | local -r installed=$(find "$PWD/INPUT/" -maxdepth 1 -mindepth 1 -type d -printf "%f " 2> /dev/null) 127 | COMPREPLY=($(compgen -W "$installed" -- "$cur")) 128 | ;; 129 | shell) 130 | [[ $in_lal_repo ]] || return 0 131 | # suggest flags 132 | local -r sh_flags="-p --privileged -h --help --print-only --X11 -X -n --net-host --env-var" 133 | if [[ $prev = "shell" ]]; then 134 | COMPREPLY=($(compgen -W "$sh_flags" -- "$cur")) 135 | fi 136 | ;; 137 | configure) 138 | # figure out what type of lal installation we have 139 | # and from that infer where the configs would be 140 | local -r run_pth=$(readlink -f "$(which lal)") 141 | local config_dir; 142 | if [[ $run_pth == *target/debug/lal ]] || [[ $run_pth == *target/release/lal ]]; then 143 | # compiled lal => configs in the source dir (up from the target build dir) 144 | config_dir="${run_pth%/target/*}/configs" 145 | else 146 | # musl release => configs in prefix/share/lal/configs 147 | config_dir="${run_pth%/bin/*}/share/lal/configs" 148 | fi 149 | local -r configs=$(find "$config_dir" -type f) 150 | COMPREPLY=($(compgen -W "$configs" -- "$cur")) 151 | ;; 152 | help) 153 | COMPREPLY=($(compgen -W "$subcommands" -- "$cur")) 154 | ;; 155 | script|run) 156 | [[ $in_lal_repo ]] || return 0 157 | # locate the scripts in .lal/scripts 158 | local -r scripts="$(find "$PWD/.lal/scripts/" -maxdepth 1 -type f -printf "%f " 2> /dev/null)" 159 | local -r second_args="${scripts} -p --privileged --X11 -X -n --net-host --print-only --env-var" 160 | 161 | if [[ $prev == @(script|run) ]] || [[ $prev == -* ]]; then 162 | COMPREPLY=($(compgen -W "$second_args" -- "$cur")) 163 | else 164 | # Identify which script we used (arg after run that's not a flag) 165 | local run_script i 166 | for (( i=2; i < ${#words[@]}-1; i++ )); do 167 | if [[ ${words[i]} != -* ]] && echo "$scripts" | grep -q "${words[i]}"; then 168 | run_script=${words[i]} 169 | fi 170 | done 171 | local -r comps=$(source "$PWD/.lal/scripts/$run_script"; completer) 172 | COMPREPLY=($(compgen -W "$comps" -- "$cur")) 173 | fi 174 | ;; 175 | esac 176 | fi 177 | 178 | return 0 179 | } && 180 | complete -F _lal lal 181 | -------------------------------------------------------------------------------- /manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "lal", 3 | "environment": "rust", 4 | "supportedEnvironments": ["rust"], 5 | "components": { 6 | "lal": { 7 | "defaultConfig": "release", 8 | "configurations": [ 9 | "release", 10 | "debug", 11 | "slim" 12 | ] 13 | }, 14 | "lal-unit-tests": { 15 | "defaultConfig": "debug", 16 | "configurations": [ 17 | "debug" 18 | ] 19 | } 20 | }, 21 | "dependencies": {}, 22 | "devDependencies": {} 23 | } 24 | -------------------------------------------------------------------------------- /package.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # THIS SCRIPT SHOULD BE INVOKED BY CI INSTEAD OF lal publish 4 | # This is because lal intentionally uses semver. 5 | 6 | 7 | # If you have done `lal build lal --release` 8 | # It will convert the standard release structure of ARTIFACT: 9 | # 10 | # ARTIFACT/ 11 | # ├── lal.tar.gz 12 | # └── lockfile.json 13 | # 14 | # And converts it into this folder structure: 15 | # 16 | # ARTIFACT/ 17 | # ├── 3.3.3 18 | # │   └── lal.tar.gz 19 | # ├── latest 20 | # │   └── lal.tar.gz 21 | # └── lockfile.json 22 | # 23 | # Such that jenkins will upload this verbatim to: 24 | # http://engci-maven.cisco.com/artifactory/CME-release/lal/ 25 | # This is the canonical source of lal AT THE MOMENT 26 | # It is also hardcoded in `lal upgrade` so this works. 27 | # We cannot (and should never) `lal publish lal` because lal uses semver. 28 | # We also would not want it accidentally introduced into the normal dependency tree. 29 | # 30 | # crates.io may become the canonical one in the future if it is open sourced. 31 | 32 | mutate_artifact_folder() { 33 | local -r lalversion=$(grep version Cargo.toml | awk -F"\"" '{print $2}' | head -n 1) 34 | # Guard on version not existing 35 | buildurl="http://engci-maven.cisco.com/artifactory/api/storage/CME-release/lal" 36 | if curl -s "${buildurl}" | grep -q "$lalversion"; then 37 | echo "lal version already uploaded - stopping" # don't want to overwrite 38 | # don't want to upload anything accidentally - jenkins is dumb 39 | rm -rf ARTIFACT/ 40 | else 41 | echo "Packaging new lal version" 42 | mkdir "ARTIFACT/${lalversion}" -p 43 | mv ARTIFACT/lal.tar.gz "ARTIFACT/${lalversion}/lal.tar.gz" 44 | # Overwrite the latest folder 45 | cp "ARTIFACT/${lalversion}" "ARTIFACT/latest" -R 46 | fi 47 | } 48 | 49 | 50 | main() { 51 | set -e 52 | if [ ! -f ARTIFACT/lal.tar.gz ]; then 53 | echo "No release build of lal found" 54 | rm -rf ARTIFACT # just in case 55 | exit 2 56 | fi 57 | echo "Found release build with:" 58 | tar tvf ARTIFACT/lal.tar.gz 59 | mutate_artifact_folder 60 | } 61 | 62 | main "$@" 63 | -------------------------------------------------------------------------------- /release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | ver=$(grep version Cargo.toml | head -n 1 | awk -F'"' '{print $2}') 5 | 6 | git tag -a "v${ver}" -m "${ver}" 7 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | chain_indent = "block" 2 | fn_call_style = "visual" 3 | closure_block_indent_threshold = 0 4 | fn_single_line = true 5 | array_layout = "block" 6 | 7 | max_width = 100 8 | single_line_if_else_max_width = 90 9 | chain_one_line_max = 90 10 | 11 | newline_style = "unix" 12 | 13 | write_mode = "overwrite" 14 | use_try_shorthand = true 15 | -------------------------------------------------------------------------------- /src/build.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | use std::fs; 3 | 4 | use shell; 5 | use verify::verify; 6 | use super::{ensure_dir_exists_fresh, output, Lockfile, Manifest, Container, Config, LalResult, 7 | CliError, DockerRunFlags, ShellModes}; 8 | 9 | 10 | fn find_valid_build_script() -> LalResult { 11 | use std::os::unix::fs::PermissionsExt; 12 | 13 | // less intrusive location for BUILD scripts 14 | let bpath_new = Path::new("./.lal/BUILD"); 15 | let bpath_old = Path::new("./BUILD"); // fallback if new version does not exist 16 | let bpath = if bpath_new.exists() { 17 | if bpath_old.exists() { 18 | warn!("BUILD found in both .lal/ and current directory"); 19 | warn!("Using the default: .lal/BUILD"); 20 | } 21 | bpath_new 22 | } else if bpath_old.exists() { 23 | bpath_old 24 | } else { 25 | return Err(CliError::MissingBuildScript); 26 | }; 27 | trace!("Using BUILD script found in {}", bpath.display()); 28 | // Need the string to construct a list of argument for docker run 29 | // lossy convert because paths can somehow contain non-unicode? 30 | let build_string = bpath.to_string_lossy(); 31 | 32 | // presumably we can always get the permissions of a file, right? (inb4 nfs..) 33 | let mode = bpath.metadata()?.permissions().mode(); 34 | if mode & 0o111 == 0 { 35 | return Err(CliError::BuildScriptNotExecutable(build_string.into())); 36 | } 37 | Ok(build_string.into()) 38 | } 39 | 40 | 41 | /// Configurable build flags for `lal build` 42 | pub struct BuildOptions { 43 | /// Component to build if specified 44 | pub name: Option, 45 | /// Configuration to use for the component if specified 46 | pub configuration: Option, 47 | /// Container to run the `./BUILD` script in 48 | pub container: Container, 49 | /// Create release tarball in `./ARTIFACT` 50 | pub release: bool, 51 | /// An explicit version to put in the lockfile 52 | pub version: Option, 53 | /// An explicit sha changeset id to put in the lockfile 54 | pub sha: Option, 55 | /// Ignore verify failures 56 | pub force: bool, 57 | /// Use the `simple` verify algorithm 58 | pub simple_verify: bool, 59 | } 60 | 61 | 62 | /// Runs the `./BUILD` script in a container and packages artifacts. 63 | /// 64 | /// The function performs basic sanity checks, before shelling out to `docker run` 65 | /// to perform the actual execution of the containerized `./BUILD` script. 66 | /// 67 | pub fn build( 68 | cfg: &Config, 69 | manifest: &Manifest, 70 | opts: &BuildOptions, 71 | envname: String, 72 | _modes: ShellModes, 73 | ) -> LalResult<()> { 74 | let mut modes = _modes; 75 | 76 | // have a better warning on first file-io operation 77 | // if nfs mounts and stuff cause issues this usually catches it 78 | ensure_dir_exists_fresh("./OUTPUT") 79 | .map_err(|e| { 80 | error!("Failed to clean out OUTPUT dir: {}", e); 81 | e 82 | })?; 83 | 84 | debug!("Version flag is {:?}", opts.version); 85 | 86 | // Verify INPUT 87 | let mut verify_failed = false; 88 | if let Some(e) = verify(manifest, &envname, opts.simple_verify).err() { 89 | if !opts.force { 90 | return Err(e); 91 | } 92 | verify_failed = true; 93 | warn!("Verify failed - build will fail on jenkins, but continuing"); 94 | } 95 | 96 | 97 | let component = opts.name.clone().unwrap_or_else(|| manifest.name.clone()); 98 | debug!("Getting configurations for {}", component); 99 | 100 | // A couple of matchups of configurations and components and sanity checks 101 | // If verify passed then these won't fail, but verify is sometimes ignorable 102 | 103 | // find component details in components.NAME 104 | let component_settings = match manifest.components.get(&component) { 105 | Some(c) => c, 106 | None => return Err(CliError::MissingComponent(component)), 107 | }; 108 | let configuration_name: String = if let Some(c) = opts.configuration.clone() { 109 | c 110 | } else { 111 | component_settings.defaultConfig.clone() 112 | }; 113 | if !component_settings.configurations.contains(&configuration_name) { 114 | let ename = format!("{} not found in configurations list", configuration_name); 115 | return Err(CliError::InvalidBuildConfiguration(ename)); 116 | } 117 | let lockfile = Lockfile::new(&component, 118 | &opts.container, 119 | &envname, 120 | opts.version.clone(), 121 | Some(&configuration_name)) 122 | .set_default_env(manifest.environment.clone()) 123 | .attach_revision_id(opts.sha.clone()) 124 | .populate_from_input()?; 125 | 126 | let lockpth = Path::new("./OUTPUT/lockfile.json"); 127 | lockfile.write(lockpth)?; // always put a lockfile in OUTPUT at the start of a build 128 | 129 | let bpath = find_valid_build_script()?; 130 | let cmd = vec![bpath, component.clone(), configuration_name]; 131 | 132 | if let Some(v) = opts.version.clone() { 133 | modes.env_vars.push(format!("BUILD_VERSION={}", v)); 134 | } 135 | 136 | debug!("Build script is {:?}", cmd); 137 | if !modes.printonly { 138 | info!("Running build script in {} container", envname); 139 | } 140 | 141 | let run_flags = DockerRunFlags { 142 | interactive: cfg.interactive, 143 | privileged: false, 144 | }; 145 | shell::docker_run(cfg, &opts.container, cmd, &run_flags, &modes)?; 146 | if modes.printonly { 147 | return Ok(()); // nothing else worth doing - warnings are pointless 148 | } 149 | 150 | // Extra info and warnings for people who missed the leading ones (build is spammy) 151 | if verify_failed { 152 | warn!("Build succeeded - but `lal verify` failed"); 153 | warn!("Please make sure you are using correct dependencies before pushing") 154 | } else { 155 | info!("Build succeeded with verified dependencies") 156 | } 157 | // environment is temporarily optional in manifest: 158 | if envname != manifest.environment { 159 | warn!("Build was using non-default {} environment", envname); 160 | } 161 | 162 | if opts.release && !modes.printonly { 163 | trace!("Create ARTIFACT dir"); 164 | ensure_dir_exists_fresh("./ARTIFACT")?; 165 | trace!("Copy lockfile to ARTIFACT dir"); 166 | fs::copy(&lockpth, Path::new("./ARTIFACT/lockfile.json"))?; 167 | 168 | trace!("Tar up OUTPUT into ARTIFACT/component.tar.gz"); 169 | let tarpth = Path::new("./ARTIFACT").join([component, ".tar.gz".into()].concat()); 170 | output::tar(&tarpth)?; 171 | } 172 | Ok(()) 173 | } 174 | -------------------------------------------------------------------------------- /src/clean.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | use std::path::Path; 3 | 4 | use chrono::{DateTime, UTC, Duration, TimeZone}; 5 | use filetime::FileTime; 6 | use walkdir::WalkDir; 7 | 8 | use super::LalResult; 9 | 10 | // helper for `lal::clean` 11 | fn clean_in_dir(cutoff: DateTime, dirs: WalkDir) -> LalResult<()> { 12 | let drs = dirs.into_iter().filter_map(|e| e.ok()).filter(|e| e.path().is_dir()); 13 | 14 | for d in drs { 15 | let pth = d.path(); 16 | trace!("Checking {}", pth.to_str().unwrap()); 17 | let mtime = FileTime::from_last_modification_time(&d.metadata().unwrap()); 18 | let mtimedate = UTC.ymd(1970, 1, 1).and_hms(0, 0, 0) + 19 | Duration::seconds(mtime.seconds_relative_to_1970() as i64); 20 | 21 | trace!("Found {} with mtime {}", pth.to_str().unwrap(), mtimedate); 22 | if mtimedate < cutoff { 23 | debug!("Cleaning {}", pth.to_str().unwrap()); 24 | fs::remove_dir_all(pth)?; 25 | } 26 | } 27 | Ok(()) 28 | } 29 | 30 | /// Clean old artifacts in cache directory 31 | /// 32 | /// This does the equivalent of find CACHEDIR -mindepth 3 -maxdepth 3 -type d 33 | /// With the correct mtime flags, then -exec deletes these folders. 34 | pub fn clean(cachedir: &str, days: i64) -> LalResult<()> { 35 | let cutoff = UTC::now() - Duration::days(days); 36 | debug!("Cleaning all artifacts from before {}", cutoff); 37 | 38 | // clean out environment subdirectories 39 | let edir = Path::new(&cachedir).join("environments"); 40 | let edirs = WalkDir::new(&edir).min_depth(3).max_depth(3); 41 | clean_in_dir(cutoff, edirs)?; 42 | 43 | // clean out stash 44 | let dirs = WalkDir::new(&cachedir).min_depth(3).max_depth(3); 45 | clean_in_dir(cutoff, dirs)?; 46 | 47 | Ok(()) 48 | } 49 | -------------------------------------------------------------------------------- /src/configure.rs: -------------------------------------------------------------------------------- 1 | use std::path::{Path, PathBuf}; 2 | use std::fs; 3 | use std::env; 4 | use std::process::Command; 5 | use semver::Version; 6 | 7 | use super::{LalResult, Config, ConfigDefaults, CliError, config_dir}; 8 | 9 | fn executable_on_path(exe: &str) -> LalResult<()> { 10 | trace!("Verifying executable {}", exe); 11 | let s = Command::new("which").arg(exe).output()?; 12 | if !s.status.success() { 13 | debug!("Failed to find {}: {}", 14 | exe, 15 | String::from_utf8_lossy(&s.stderr).trim()); 16 | return Err(CliError::ExecutableMissing(exe.into())); 17 | }; 18 | debug!("Found {} at {}", 19 | exe, 20 | String::from_utf8_lossy(&s.stdout).trim()); 21 | Ok(()) 22 | } 23 | 24 | fn docker_sanity() -> LalResult<()> { 25 | let dinfo_output = Command::new("docker").arg("info").output()?; 26 | let doutstr = String::from_utf8_lossy(&dinfo_output.stdout); 27 | if doutstr.contains("aufs") { 28 | warn!("Your storage driver is AUFS - this is known to have build issues"); 29 | warn!("Please change your storage driver to overlay2 or devicemapper"); 30 | warn!("Consult https://docs.docker.com/engine/userguide/storagedriver/ for info"); 31 | } 32 | // TODO: Can grep for CPUs, RAM if in the config perhaps? 33 | Ok(()) 34 | } 35 | 36 | fn kernel_sanity() -> LalResult<()> { 37 | use semver::Identifier; 38 | // NB: ubuntu's use of linux kernel is not completely semver 39 | // the pre numbers does not indicate a prerelease, but rather fixes 40 | // thus 4.4.0-93 on ubuntu is semver LESS than semver 4.4.0 41 | // We thus restrict to be > 4.4.0-0-0 instead (>= number of pre-identifiers) 42 | let req = Version { 43 | major: 4, 44 | minor: 4, 45 | patch: 0, 46 | pre: vec![Identifier::Numeric(0), Identifier::Numeric(0)], 47 | build: vec![], 48 | }; 49 | let uname_output = Command::new("uname").arg("-r").output()?; 50 | let uname = String::from_utf8_lossy(&uname_output.stdout); 51 | match uname.trim().parse::() { 52 | Ok(ver) => { 53 | debug!("Found linux kernel version {}", ver); 54 | trace!("found major {} minor {} patch {} - prelen {}", 55 | ver.major, 56 | ver.minor, 57 | ver.patch, 58 | ver.pre.len()); 59 | trace!("req major {} minor {} patch {} - prelen {}", 60 | req.major, 61 | req.minor, 62 | req.patch, 63 | req.pre.len()); 64 | if ver >= req { 65 | debug!("Minimum kernel requirement of {} satisfied ({})", 66 | req.to_string(), 67 | ver.to_string()); 68 | } else { 69 | warn!("Your Linux kernel {} is very old", ver.to_string()); 70 | warn!("A kernel >= {} is highly recommended on Linux systems", 71 | req.to_string()) 72 | } 73 | } 74 | Err(e) => { 75 | // NB: Darwin would enter here.. 76 | warn!("Failed to parse kernel version from `uname -r`: {}", e); 77 | warn!("Note that a kernel version of 4.4 is expected on linux"); 78 | } 79 | } 80 | Ok(()) // don't block on this atm to not break OSX 81 | } 82 | 83 | fn docker_version_check() -> LalResult<()> { 84 | // docker-ce changes to different version scheme, but still semver >= 1.13 85 | let req = Version { 86 | major: 1, 87 | minor: 12, 88 | patch: 0, 89 | pre: vec![], 90 | build: vec![], 91 | }; 92 | // NB: this is nicer: `docker version -f "{{ .Server.Version }}"` 93 | // but it doesn't work on the old versions we wnat to prevent.. 94 | let dver_output = Command::new("docker").arg("--version").output()?; 95 | let dverstr = String::from_utf8_lossy(&dver_output.stdout); 96 | trace!("docker version string {}", dverstr); 97 | let dverary = dverstr.trim().split(" ").collect::>(); 98 | if dverary.len() < 3 { 99 | warn!("Failed to parse docker version: ({})", dverstr); 100 | return Ok(()); // assume it's a really weird docker 101 | } 102 | let mut dver = dverary[2].to_string(); // third entry is the semver version 103 | dver.pop(); // remove trailing comma (even if it goes, this parses) 104 | match dver.parse::() { 105 | Ok(ver) => { 106 | debug!("Found docker version {}", ver); 107 | if ver < req { 108 | warn!("Your docker version {} is very old", ver.to_string()); 109 | warn!("A docker version >= {} is highly recommended", 110 | req.to_string()) 111 | } else { 112 | debug!("Minimum docker requirement of {} satisfied ({})", 113 | req.to_string(), 114 | ver.to_string()); 115 | } 116 | } 117 | Err(e) => { 118 | warn!("Failed to parse docker version from `docker --version`: {}", 119 | e); 120 | warn!("Note that a docker version >= 1.12 is expected"); 121 | } 122 | } 123 | Ok(()) 124 | } 125 | 126 | fn ssl_cert_sanity() -> LalResult<()> { 127 | // SSL_CERT_FILE are overridden by openssl_probe in main.rs 128 | // BUT this happens AFTER lal configure 129 | // evars are currently empty (unless set manually) - so we can provide debug here 130 | let is_overridden = env::var_os("SSL_CERT_FILE").is_some(); 131 | use openssl_probe; 132 | let proberes = openssl_probe::probe(); 133 | if let Some(cert) = proberes.cert_dir { 134 | debug!("Using SSL_CERT_DIR as {}", cert.display()); 135 | } 136 | if let Some(cert) = proberes.cert_file.clone() { 137 | debug!("Using SSL_CERT_FILE as {}", cert.display()); 138 | Ok(()) 139 | } else { 140 | if is_overridden { 141 | warn!("SSL_CERT_FILE overridden by user"); 142 | warn!("This should generally not be necessary any more"); 143 | } 144 | warn!("CA certificates bundle appears to be missing - you will encounter ssl errors"); 145 | warn!("Please ensure you have the standard ca-certificates package"); 146 | warn!("Alternatively you can the SSL_CERT_FILE in you shell to a non-standard location"); 147 | Err(CliError::MissingSslCerts) 148 | } 149 | } 150 | 151 | fn lal_version_check(minlal: &str) -> LalResult<()> { 152 | let current = Version::parse(env!("CARGO_PKG_VERSION")).unwrap(); 153 | let req = Version::parse(minlal).unwrap(); 154 | if current < req { 155 | Err(CliError::OutdatedLal(current.to_string(), req.to_string())) 156 | } else { 157 | debug!("Minimum lal requirement of {} satisfied ({})", 158 | req.to_string(), 159 | current.to_string()); 160 | Ok(()) 161 | } 162 | } 163 | 164 | fn non_root_sanity() -> LalResult<()> { 165 | let uid_output = Command::new("id").arg("-u").output()?; 166 | let uid_str = String::from_utf8_lossy(&uid_output.stdout); 167 | let uid = uid_str.trim().parse::().unwrap(); // trust `id -u` is sane 168 | 169 | if uid == 0 { 170 | warn!("Running lal as root user not allowed"); 171 | warn!("Builds remap your user id to a corresponding one inside a build environment"); 172 | warn!("This is at the moment incompatible with the root user"); 173 | warn!("Try again without sudo, or if you are root, create a proper build user"); 174 | Err(CliError::UnmappableRootUser) 175 | } else { 176 | Ok(()) 177 | } 178 | } 179 | 180 | fn create_lal_dir() -> LalResult { 181 | let laldir = config_dir(); 182 | if !laldir.is_dir() { 183 | fs::create_dir(&laldir)?; 184 | } 185 | let histfile = Path::new(&laldir).join("history"); 186 | if !histfile.exists() { 187 | fs::File::create(histfile)?; 188 | } 189 | Ok(laldir) 190 | } 191 | 192 | /// Create `~/.lal/config` with defaults 193 | /// 194 | /// A boolean option to discard the output is supplied for tests. 195 | /// A defaults file must be supplied to seed the new config with defined environments 196 | pub fn configure(save: bool, interactive: bool, defaults: &str) -> LalResult { 197 | let _ = create_lal_dir()?; 198 | 199 | for exe in [ 200 | "docker", 201 | "tar", 202 | "touch", 203 | "id", 204 | "find", 205 | "mkdir", 206 | "chmod", 207 | "uname", 208 | ].into_iter() 209 | { 210 | executable_on_path(exe)?; 211 | } 212 | docker_sanity()?; 213 | docker_version_check()?; 214 | kernel_sanity()?; 215 | ssl_cert_sanity()?; 216 | non_root_sanity()?; 217 | 218 | let def = ConfigDefaults::read(defaults)?; 219 | 220 | // Enforce minimum_lal version check here if it's set in the defaults file 221 | if let Some(minlal) = def.minimum_lal.clone() { 222 | lal_version_check(&minlal)?; 223 | } 224 | 225 | let mut cfg = Config::new(def); 226 | cfg.interactive = interactive; // need to override default for tests 227 | if save { 228 | cfg.write(false)?; 229 | } 230 | Ok(cfg) 231 | } 232 | -------------------------------------------------------------------------------- /src/core/config.rs: -------------------------------------------------------------------------------- 1 | use serde_json; 2 | use chrono::UTC; 3 | use std::path::{Path, PathBuf}; 4 | use std::fs; 5 | use std::vec::Vec; 6 | use std::io::prelude::*; 7 | use std::collections::BTreeMap; 8 | use std::env; 9 | 10 | use super::{Container, LalResult, CliError}; 11 | use storage::BackendConfiguration; 12 | 13 | fn find_home_dir() -> PathBuf { 14 | // Either we have LAL_CONFIG_HOME evar, or HOME 15 | if let Ok(lh) = env::var("LAL_CONFIG_HOME") { 16 | Path::new(&lh).to_owned() 17 | } else { 18 | env::home_dir().unwrap() 19 | } 20 | } 21 | 22 | /// Master override for where the .lal config lives 23 | pub fn config_dir() -> PathBuf { 24 | let home = find_home_dir(); 25 | Path::new(&home).join(".lal") 26 | } 27 | 28 | /// Docker volume mount representation 29 | #[derive(Serialize, Deserialize, Clone)] 30 | pub struct Mount { 31 | /// File or folder to mount 32 | pub src: String, 33 | /// Location inside the container to mount it at 34 | pub dest: String, 35 | /// Whether or not to write protect the mount inside the container 36 | pub readonly: bool, 37 | } 38 | 39 | /// Representation of `~/.lal/config` 40 | #[allow(non_snake_case)] 41 | #[derive(Serialize, Deserialize, Clone)] 42 | pub struct Config { 43 | /// Configuration settings for the `Backend` 44 | pub backend: BackendConfiguration, 45 | /// Cache directory for global and stashed builds 46 | pub cache: String, 47 | /// Environments shorthands that are allowed and their full meaning 48 | pub environments: BTreeMap, 49 | /// Time of last upgrade 50 | pub lastUpgrade: String, 51 | /// Whether to perform automatic upgrade 52 | pub autoupgrade: bool, 53 | /// Extra volume mounts to be set for the container 54 | pub mounts: Vec, 55 | /// Force inteactive shells 56 | pub interactive: bool, 57 | /// Minimum version restriction of lal enforced by this config 58 | pub minimum_lal: Option, 59 | } 60 | 61 | /// Representation of a configuration defaults file 62 | /// 63 | /// This file is being used to generate the config when using `lal configure` 64 | #[derive(Serialize, Deserialize, Clone, Default)] 65 | pub struct ConfigDefaults { 66 | /// Configuration settings for the `Backend` 67 | pub backend: BackendConfiguration, 68 | /// Environments shorthands that are allowed and their full meaning 69 | pub environments: BTreeMap, 70 | /// Extra volume mounts to be set for the container 71 | pub mounts: Vec, 72 | /// Optional minimum version restriction of lal 73 | pub minimum_lal: Option, 74 | } 75 | 76 | impl ConfigDefaults { 77 | /// Open and deserialize a defaults file 78 | pub fn read(file: &str) -> LalResult { 79 | let pth = Path::new(file); 80 | if !pth.exists() { 81 | error!("No such defaults file '{}'", file); // file open will fail below 82 | } 83 | let mut f = fs::File::open(&pth)?; 84 | let mut data = String::new(); 85 | f.read_to_string(&mut data)?; 86 | let defaults: ConfigDefaults = serde_json::from_str(&data)?; 87 | Ok(defaults) 88 | } 89 | } 90 | 91 | fn check_mount(name: &str) -> LalResult { 92 | // See if it's a path first: 93 | let home = find_home_dir(); 94 | let src = name.to_string().replace("~", &home.to_string_lossy()); 95 | let mount_path = Path::new(&src); 96 | if mount_path.exists() { 97 | debug!("Configuring existing mount {}", &src); 98 | return Ok(src.clone()); // pass along the real source 99 | } 100 | 101 | // Otherwise, if it does not contain a slash 102 | if !name.contains("/") { 103 | use std::process::Command; 104 | let volume_output = Command::new("docker").args(vec!["volume", "ls", "-q"]).output()?; 105 | let volstr = String::from_utf8_lossy(&volume_output.stdout); 106 | // If it exists, do nothing: 107 | if volstr.contains(name) { 108 | debug!("Configuring existing volume {}", name); 109 | return Ok(name.into()); 110 | } 111 | // Otherwise warn 112 | warn!("Discarding missing docker volume {}", name); 113 | Err(CliError::MissingMount(name.into())) 114 | } else { 115 | warn!("Discarding missing mount {}", src); 116 | Err(CliError::MissingMount(src.clone())) 117 | } 118 | } 119 | 120 | 121 | impl Config { 122 | /// Initialize a Config with ConfigDefaults 123 | /// 124 | /// This will locate you homedir, and set last update check 2 days in the past. 125 | /// Thus, with a blank default config, you will always trigger an upgrade check. 126 | pub fn new(defaults: ConfigDefaults) -> Config { 127 | let cachepath = config_dir().join("cache"); 128 | let cachedir = cachepath.as_path().to_str().unwrap(); 129 | 130 | // reset last update time 131 | let time = UTC::now(); 132 | 133 | // scan default mounts 134 | let mut mounts = vec![]; 135 | for mount in defaults.mounts { 136 | // Check src for pathiness or prepare a docker volume 137 | match check_mount(&mount.src) { 138 | Ok(src) => { 139 | let mut mountnew = mount.clone(); 140 | mountnew.src = src; // update potentially mapped source 141 | mounts.push(mountnew); 142 | } 143 | Err(e) => debug!("Ignoring mount check error {}", e), 144 | } 145 | } 146 | 147 | Config { 148 | cache: cachedir.into(), 149 | mounts: mounts, // the filtered defaults 150 | lastUpgrade: time.to_rfc3339(), 151 | autoupgrade: cfg!(feature = "upgrade"), 152 | environments: defaults.environments, 153 | backend: defaults.backend, 154 | minimum_lal: defaults.minimum_lal, 155 | interactive: true, 156 | } 157 | } 158 | 159 | /// Read and deserialize a Config from ~/.lal/config 160 | pub fn read() -> LalResult { 161 | let cfg_path = config_dir().join("config"); 162 | if !cfg_path.exists() { 163 | return Err(CliError::MissingConfig); 164 | } 165 | let mut f = fs::File::open(&cfg_path)?; 166 | let mut cfg_str = String::new(); 167 | f.read_to_string(&mut cfg_str)?; 168 | let res: Config = serde_json::from_str(&cfg_str)?; 169 | Ok(res) 170 | } 171 | 172 | /// Checks if it is time to perform an upgrade check 173 | #[cfg(feature = "upgrade")] 174 | pub fn upgrade_check_time(&self) -> bool { 175 | use chrono::{Duration, DateTime}; 176 | let last = self.lastUpgrade.parse::>().unwrap(); 177 | let cutoff = UTC::now() - Duration::days(1); 178 | last < cutoff 179 | } 180 | /// Update the lastUpgrade time to avoid triggering it for another day 181 | #[cfg(feature = "upgrade")] 182 | pub fn performed_upgrade(&mut self) -> LalResult<()> { 183 | self.lastUpgrade = UTC::now().to_rfc3339(); 184 | Ok(self.write(true)?) 185 | } 186 | 187 | /// Overwrite `~/.lal/config` with serialized data from this struct 188 | pub fn write(&self, silent: bool) -> LalResult<()> { 189 | let cfg_path = config_dir().join("config"); 190 | let encoded = serde_json::to_string_pretty(self)?; 191 | 192 | let mut f = fs::File::create(&cfg_path)?; 193 | write!(f, "{}\n", encoded)?; 194 | if !silent { 195 | info!("Wrote config to {}", cfg_path.display()); 196 | } 197 | debug!("Wrote config \n{}", encoded); 198 | Ok(()) 199 | } 200 | 201 | /// Resolve an arbitrary container shorthand 202 | pub fn get_container(&self, env: String) -> LalResult { 203 | if let Some(container) = self.environments.get(&env) { 204 | return Ok(container.clone()); 205 | } 206 | Err(CliError::MissingEnvironment(env)) 207 | } 208 | } 209 | -------------------------------------------------------------------------------- /src/core/ensure.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | use std::fs; 3 | use std::io; 4 | 5 | 6 | /// Ensure a directory exists and is empty 7 | pub fn ensure_dir_exists_fresh(dir: &str) -> io::Result<()> { 8 | let dir = Path::new(dir); 9 | if dir.is_dir() { 10 | // clean it out first 11 | fs::remove_dir_all(&dir)?; 12 | } 13 | fs::create_dir_all(&dir)?; 14 | Ok(()) 15 | } 16 | -------------------------------------------------------------------------------- /src/core/errors.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::io; 3 | use hyper; 4 | use serde_json; 5 | 6 | /// The one and only error type for the lal library 7 | /// 8 | /// Every command will raise one of these on failure, and these is some reuse between 9 | /// commands for these errors. `Result` is effectively the safety net 10 | /// that every single advanced call goes through to avoid `panic!` 11 | #[derive(Debug)] 12 | pub enum CliError { 13 | /// Errors propagated from `std::fs` 14 | Io(io::Error), 15 | /// Errors propagated from `serde_json` 16 | Parse(serde_json::error::Error), 17 | /// Errors propagated from `hyper` 18 | Hype(hyper::Error), 19 | 20 | // main errors 21 | /// Manifest file not found in working directory 22 | MissingManifest, 23 | /// Config not found in ~/.lal 24 | MissingConfig, 25 | /// Component not found in manifest 26 | MissingComponent(String), 27 | /// Value in manifest is not lowercase 28 | InvalidComponentName(String), 29 | /// Manifest cannot be overwritten without forcing 30 | ManifestExists, 31 | /// Executable we shell out to is missing 32 | ExecutableMissing(String), 33 | /// lal version required by config is too old 34 | OutdatedLal(String, String), 35 | /// Missing SSL certificates 36 | MissingSslCerts, 37 | /// Root user encountered 38 | UnmappableRootUser, 39 | /// Missing predefined mount 40 | MissingMount(String), 41 | 42 | // status/verify errors 43 | /// Core dependencies missing in INPUT 44 | MissingDependencies, 45 | /// Cyclical dependency loop found in INPUT 46 | DependencyCycle(String), 47 | /// Dependency present at wrong version 48 | InvalidVersion(String), 49 | /// Extraneous dependencies in INPUT 50 | ExtraneousDependencies(String), 51 | /// No lockfile found for a component in INPUT 52 | MissingLockfile(String), 53 | /// Multiple versions of a component was involved in this build 54 | MultipleVersions(String), 55 | /// Multiple environments was used to build a component 56 | MultipleEnvironments(String), 57 | /// Environment for a component did not match our expected environment 58 | EnvironmentMismatch(String, String), 59 | /// Custom versions are stashed in INPUT which will not fly on Jenkins 60 | NonGlobalDependencies(String), 61 | /// No supported environments in the manifest 62 | NoSupportedEnvironments, 63 | /// Environment in manifest is not in the supported environments 64 | UnsupportedEnvironment, 65 | 66 | // env related errors 67 | /// Specified environment is not present in the main config 68 | MissingEnvironment(String), 69 | /// Command now requires an environment specified 70 | EnvironmentUnspecified, 71 | 72 | // build errors 73 | /// Build configurations does not match manifest or user input 74 | InvalidBuildConfiguration(String), 75 | /// BUILD script not executable 76 | BuildScriptNotExecutable(String), 77 | /// BUILD script not found 78 | MissingBuildScript, 79 | 80 | // script errors 81 | /// Script not found in local .lal/scripts/ directory 82 | MissingScript(String), 83 | 84 | // cache errors 85 | /// Failed to find a tarball after fetching from artifactory 86 | MissingTarball, 87 | /// Failed to find build artifacts in OUTPUT after a build or before stashing 88 | MissingBuild, 89 | 90 | // stash errors 91 | /// Invalid integer name used with lal stash 92 | InvalidStashName(u32), 93 | /// Failed to find stashed artifact in the lal cache 94 | MissingStashArtifact(String), 95 | 96 | /// Shell errors from docker subprocess 97 | SubprocessFailure(i32), 98 | /// Docker permission gate 99 | DockerPermissionSafety(String, u32, u32), 100 | /// Docker image not found 101 | DockerImageNotFound(String), 102 | 103 | // fetch/update failures 104 | /// Unspecified install failure 105 | InstallFailure, 106 | /// Fetch failure related to backend 107 | BackendFailure(String), 108 | /// No version found at same version across `supportedEnvironments` 109 | NoIntersectedVersion(String), 110 | 111 | // publish errors 112 | /// Missing release build 113 | MissingReleaseBuild, 114 | /// Config missing backend credentials 115 | MissingBackendCredentials, 116 | /// Failed upload request to the backend 117 | UploadFailure(String), 118 | 119 | // upgrade error 120 | /// Failing to write to our current install prefix 121 | MissingPrefixPermissions(String), 122 | /// Failing to validate latest lal version 123 | UpgradeValidationFailure(String), 124 | } 125 | 126 | // Format implementation used when printing an error 127 | impl fmt::Display for CliError { 128 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 129 | match *self { 130 | CliError::Io(ref err) => { 131 | let knd = err.kind(); 132 | if knd == io::ErrorKind::PermissionDenied { 133 | warn!("If you are on norman - ensure you have access to clean ./OUTPUT and \ 134 | ./INPUT"); 135 | } 136 | err.fmt(f) 137 | } 138 | CliError::Parse(ref err) => err.fmt(f), 139 | CliError::Hype(ref err) => err.fmt(f), 140 | CliError::MissingManifest => { 141 | write!(f, 142 | "No manifest.json found - are you at repository toplevel?") 143 | } 144 | CliError::ExecutableMissing(ref s) => { 145 | write!(f, 146 | "Please ensure you have `{}` installed on your system first.", 147 | s) 148 | } 149 | CliError::OutdatedLal(ref o, ref n) => { 150 | write!(f, 151 | "Your version of lal `{}` is too old (<{}). Please `lal upgrade`.", 152 | o, 153 | n) 154 | } 155 | CliError::MissingSslCerts => write!(f, "Missing SSL certificates"), 156 | CliError::UnmappableRootUser => write!(f, "Root user is not supported for lal builds"), 157 | CliError::MissingMount(ref s) => write!(f, "Missing mount {}", s), 158 | CliError::MissingConfig => write!(f, "No ~/.lal/config found"), 159 | CliError::MissingComponent(ref s) => { 160 | write!(f, "Component '{}' not found in manifest", s) 161 | } 162 | CliError::InvalidComponentName(ref s) => { 163 | write!(f, "Invalid component name {} - not lowercase", s) 164 | } 165 | CliError::ManifestExists => write!(f, "Manifest already exists (use -f to force)"), 166 | CliError::MissingDependencies => { 167 | write!(f, 168 | "Core dependencies missing in INPUT - try `lal fetch` first") 169 | } 170 | CliError::DependencyCycle(ref s) => { 171 | write!(f, "Cyclical dependencies found for {} in INPUT", s) 172 | } 173 | CliError::InvalidVersion(ref s) => { 174 | write!(f, "Dependency {} using incorrect version", s) 175 | } 176 | CliError::ExtraneousDependencies(ref s) => { 177 | write!(f, "Extraneous dependencies in INPUT ({})", s) 178 | } 179 | CliError::MissingLockfile(ref s) => write!(f, "No lockfile found for {}", s), 180 | CliError::MultipleVersions(ref s) => { 181 | write!(f, "Depending on multiple versions of {}", s) 182 | } 183 | CliError::MultipleEnvironments(ref s) => { 184 | write!(f, "Depending on multiple environments to build {}", s) 185 | } 186 | CliError::EnvironmentMismatch(ref dep, ref env) => { 187 | write!(f, "Environment mismatch for {} - built in {}", dep, env) 188 | } 189 | CliError::NonGlobalDependencies(ref s) => { 190 | write!(f, 191 | "Depending on a custom version of {} (use -s to allow stashed versions)", 192 | s) 193 | } 194 | CliError::NoSupportedEnvironments => { 195 | write!(f, "Need to specify supported environments in the manifest") 196 | } 197 | CliError::UnsupportedEnvironment => { 198 | write!(f, "manifest.environment must exist in manifest.supportedEnvironments") 199 | } 200 | CliError::MissingEnvironment(ref s) => { 201 | write!(f, "Environment '{}' not found in ~/.lal/config", s) 202 | } 203 | CliError::EnvironmentUnspecified => { 204 | write!(f, "Environment must be specified for this operation") 205 | } 206 | CliError::InvalidBuildConfiguration(ref s) => { 207 | write!(f, "Invalid build configuration - {}", s) 208 | } 209 | CliError::BuildScriptNotExecutable(ref s) => { 210 | write!(f, "BUILD script at {} is not executable", s) 211 | } 212 | CliError::MissingBuildScript => write!(f, "No `BUILD` script found"), 213 | CliError::MissingScript(ref s) => { 214 | write!(f, "Missing script '{}' in local folder .lal/scripts/", s) 215 | } 216 | CliError::MissingTarball => write!(f, "Tarball missing in PWD"), 217 | CliError::MissingBuild => write!(f, "No build found in OUTPUT"), 218 | CliError::InvalidStashName(n) => { 219 | write!(f, 220 | "Invalid name '{}' to stash under - must not be an integer", 221 | n) 222 | } 223 | CliError::MissingStashArtifact(ref s) => { 224 | write!(f, "No stashed artifact '{}' found in ~/.lal/cache/stash", s) 225 | } 226 | CliError::SubprocessFailure(n) => write!(f, "Process exited with {}", n), 227 | CliError::DockerPermissionSafety(ref s, u, g) => { 228 | write!(f, 229 | "ID mismatch inside and outside docker - {}; UID and GID are {}:{}", 230 | s, 231 | u, 232 | g) 233 | } 234 | CliError::DockerImageNotFound(ref s) => write!(f, "Could not find docker image {}", s), 235 | CliError::InstallFailure => write!(f, "Install failed"), 236 | CliError::BackendFailure(ref s) => write!(f, "Backend - {}", s), 237 | CliError::NoIntersectedVersion(ref s) => { 238 | write!(f, "No version of {} found across all environments", s) 239 | } 240 | CliError::MissingReleaseBuild => write!(f, "Missing release build"), 241 | CliError::MissingBackendCredentials => { 242 | write!(f, "Missing backend credentials in ~/.lal/config") 243 | } 244 | CliError::MissingPrefixPermissions(ref s) => { 245 | write!(f, 246 | "No write access in {} - consider chowning: `sudo chown -R $USER {}`", 247 | s, 248 | s) 249 | } 250 | CliError::UpgradeValidationFailure(ref s) => { 251 | write!(f, 252 | "Failed to validate new lal version - rolling back ({})", 253 | s) 254 | } 255 | CliError::UploadFailure(ref up) => write!(f, "Upload failure: {}", up), 256 | } 257 | } 258 | } 259 | 260 | // Allow io and json errors to be converted to `CliError` in a try! without map_err 261 | impl From for CliError { 262 | fn from(err: io::Error) -> CliError { CliError::Io(err) } 263 | } 264 | 265 | impl From for CliError { 266 | fn from(err: hyper::Error) -> CliError { CliError::Hype(err) } 267 | } 268 | 269 | impl From for CliError { 270 | fn from(err: serde_json::error::Error) -> CliError { CliError::Parse(err) } 271 | } 272 | 273 | /// Type alias to stop having to type out `CliError` everywhere. 274 | /// 275 | /// Most functions can simply add the return type `LalResult` for some `T`, 276 | /// and enjoy the benefit of using `try!` or `?` without having to worry about 277 | /// the many different error types that can arise from using curl, json serializers, 278 | /// file IO, user errors, and potential logic bugs. 279 | pub type LalResult = Result; 280 | -------------------------------------------------------------------------------- /src/core/input.rs: -------------------------------------------------------------------------------- 1 | #![allow(missing_docs)] 2 | 3 | use std::io::prelude::*; 4 | use std::fs::File; 5 | use std::path::Path; 6 | use std::collections::BTreeMap; 7 | use serde_json; 8 | 9 | use walkdir::WalkDir; 10 | 11 | use super::{Manifest, Lockfile, CliError, LalResult}; 12 | 13 | #[derive(Deserialize)] 14 | struct PartialLock { 15 | pub version: String, 16 | } 17 | fn read_partial_lockfile(component: &str) -> LalResult { 18 | let lock_path = Path::new("./INPUT").join(component).join("lockfile.json"); 19 | if !lock_path.exists() { 20 | return Err(CliError::MissingLockfile(component.to_string())); 21 | } 22 | let mut lock_str = String::new(); 23 | trace!("Deserializing lockfile for {}", component); 24 | File::open(&lock_path)?.read_to_string(&mut lock_str)?; 25 | Ok(serde_json::from_str(&lock_str)?) 26 | } 27 | 28 | pub fn present() -> bool { 29 | Path::new("./INPUT").is_dir() 30 | } 31 | 32 | /// Simple INPUT analyzer for the lockfile generator and `analyze_full` 33 | pub fn analyze() -> LalResult> { 34 | let input = Path::new("./INPUT"); 35 | 36 | let mut deps = BTreeMap::new(); 37 | if !input.is_dir() { 38 | return Ok(deps); 39 | } 40 | let dirs = WalkDir::new("INPUT") 41 | .min_depth(1) 42 | .max_depth(1) 43 | .into_iter() 44 | .filter_map(|e| e.ok()) 45 | .filter(|e| e.path().is_dir()); 46 | 47 | for d in dirs { 48 | let pth = d.path().strip_prefix("INPUT").unwrap(); 49 | let component = pth.to_str().unwrap(); 50 | let lck = read_partial_lockfile(component)?; 51 | deps.insert(component.to_string(), lck.version); 52 | } 53 | Ok(deps) 54 | } 55 | 56 | #[derive(Debug)] 57 | pub struct InputDependency { 58 | pub name: String, 59 | pub missing: bool, 60 | pub extraneous: bool, 61 | pub development: bool, 62 | pub version: String, // on disk 63 | pub requirement: Option, // from manifest 64 | } 65 | 66 | pub type InputMap = BTreeMap; 67 | 68 | /// Helper for `lal::status` 69 | pub fn analyze_full(manifest: &Manifest) -> LalResult { 70 | let input = Path::new("./INPUT"); 71 | 72 | let deps = analyze()?; 73 | let saved_deps = manifest.all_dependencies(); 74 | 75 | let mut depmap = InputMap::new(); 76 | if !input.is_dir() { 77 | return Ok(depmap); 78 | } 79 | 80 | // check manifested deps 81 | // something in manifest 82 | for (d, v) in saved_deps.clone() { 83 | // use manifest ver if not in INPUT 84 | let version: String = match deps.get(&d) { 85 | Some(v) => v.clone(), 86 | None => v.to_string(), 87 | }; 88 | depmap.insert(d.clone(), 89 | InputDependency { 90 | name: d.clone(), 91 | version: version, 92 | requirement: Some(format!("{}", v)), 93 | missing: deps.get(&d).is_none(), 94 | development: manifest.devDependencies.contains_key(&d), 95 | extraneous: false, 96 | }); 97 | } 98 | // check for potentially non-manifested deps 99 | // i.e. something in INPUT, but not in manifest 100 | for name in deps.keys() { 101 | let actual_ver = deps[name].clone(); 102 | if !saved_deps.contains_key(name) { 103 | depmap.insert(name.clone(), 104 | InputDependency { 105 | name: name.clone(), 106 | version: actual_ver, 107 | requirement: None, 108 | missing: false, 109 | development: false, 110 | extraneous: true, 111 | }); 112 | } 113 | } 114 | 115 | Ok(depmap) 116 | } 117 | 118 | /// Basic part of input verifier - checks that everything is at least present 119 | pub fn verify_dependencies_present(m: &Manifest) -> LalResult<()> { 120 | let mut error = None; 121 | let mut deps = vec![]; 122 | let dirs = WalkDir::new("INPUT") 123 | .min_depth(1) 124 | .max_depth(1) 125 | .into_iter() 126 | .filter_map(|e| e.ok()) 127 | .filter(|e| e.path().is_dir()); 128 | for entry in dirs { 129 | let pth = entry.path().strip_prefix("INPUT").unwrap(); 130 | debug!("-> {}", pth.display()); 131 | 132 | let component = pth.to_str().unwrap(); 133 | deps.push(component.to_string()); 134 | } 135 | debug!("Found the following deps in INPUT: {:?}", deps); 136 | // NB: deliberately not returning Err early because we want a large warning list 137 | // if INPUT folders are missing at the start of a build (forgot to fetch) 138 | for (d, v) in &m.dependencies { 139 | trace!("Verifying dependency from manifest: {}@{}", d, v); 140 | if !deps.contains(d) { 141 | warn!("Dependency {} not found in INPUT", d); 142 | error = Some(CliError::MissingDependencies); 143 | } 144 | } 145 | if let Some(e) = error { Err(e) } else { Ok(()) } 146 | } 147 | 148 | /// Optional part of input verifier - checks that all versions use correct versions 149 | pub fn verify_global_versions(lf: &Lockfile, m: &Manifest) -> LalResult<()> { 150 | let all_deps = m.all_dependencies(); 151 | for (name, dep) in &lf.dependencies { 152 | let v = dep.version 153 | .parse::() 154 | .map_err(|e| { 155 | debug!("Failed to parse first version of {} as int ({:?})", name, e); 156 | CliError::NonGlobalDependencies(name.clone()) 157 | })?; 158 | // also ensure it matches the version in the manifest 159 | let vreq = *all_deps 160 | .get(name) 161 | .ok_or_else(|| { 162 | // This is a first level dependency - it should be in the manifest 163 | CliError::ExtraneousDependencies(name.clone()) 164 | })?; 165 | if v != vreq { 166 | warn!("Dependency {} has version {}, but manifest requires {}", 167 | name, 168 | v, 169 | vreq); 170 | return Err(CliError::InvalidVersion(name.clone())); 171 | } 172 | // Prevent Cycles (enough to stop it at one manifest level) 173 | if &m.name == name { 174 | return Err(CliError::DependencyCycle(name.clone())); 175 | } 176 | } 177 | Ok(()) 178 | } 179 | 180 | /// Strict requirement for verifier - dependency tree must be flat-equivalent 181 | pub fn verify_consistent_dependency_versions(lf: &Lockfile, m: &Manifest) -> LalResult<()> { 182 | for (name, vers) in lf.find_all_dependency_versions() { 183 | debug!("Found version(s) for {} as {:?}", name, vers); 184 | assert!(!vers.is_empty(), "found versions"); 185 | if vers.len() != 1 && m.dependencies.contains_key(&name) { 186 | warn!("Multiple version requirements on {} found in lockfile", 187 | name.clone()); 188 | warn!("If you are trying to propagate {0} into the tree, \ 189 | you need to follow `lal propagate {0}`", 190 | name); 191 | return Err(CliError::MultipleVersions(name.clone())); 192 | } 193 | } 194 | Ok(()) 195 | } 196 | 197 | /// Strict requirement for verifier - all deps must be built in same environment 198 | pub fn verify_environment_consistency(lf: &Lockfile, env: &str) -> LalResult<()> { 199 | for (name, envs) in lf.find_all_environments() { 200 | debug!("Found environment(s) for {} as {:?}", name, envs); 201 | if envs.len() != 1 { 202 | warn!("Multiple environments used to build {}", name.clone()); 203 | return Err(CliError::MultipleEnvironments(name.clone())); 204 | } else { 205 | let used_env = envs.iter().next().unwrap(); 206 | if used_env != env { 207 | return Err(CliError::EnvironmentMismatch(name.clone(), used_env.clone())); 208 | } 209 | } 210 | } 211 | Ok(()) 212 | } 213 | -------------------------------------------------------------------------------- /src/core/lockfile.rs: -------------------------------------------------------------------------------- 1 | use serde_json; 2 | use chrono::UTC; 3 | use rand; 4 | 5 | use std::path::{Path, PathBuf}; 6 | use std::fs::File; 7 | use std::io::prelude::*; 8 | 9 | use std::collections::{HashMap, BTreeMap}; 10 | use std::collections::BTreeSet; 11 | use std::fmt; 12 | 13 | use super::{CliError, LalResult, input}; 14 | 15 | /// Representation of a docker container image 16 | #[derive(Serialize, Deserialize, Debug, Clone)] 17 | pub struct Container { 18 | /// The fully qualified image name 19 | pub name: String, 20 | /// The tag to use 21 | pub tag: String, 22 | } 23 | 24 | impl Container { 25 | /// Container struct with latest tag 26 | pub fn latest(name: &str) -> Self { 27 | Container { 28 | name: name.into(), 29 | tag: "latest".into(), 30 | } 31 | } 32 | } 33 | 34 | impl fmt::Display for Container { 35 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}:{}", self.name, self.tag) } 36 | } 37 | 38 | /// Convenience default for functions that require Lockfile inspection 39 | /// Intentionally kept distinct from normal build images 40 | impl Default for Container { 41 | fn default() -> Self { 42 | Container { 43 | name: "ubuntu".into(), 44 | tag: "xenial".into(), 45 | } 46 | } 47 | } 48 | 49 | impl Container { 50 | /// Initialize a container struct 51 | /// 52 | /// This will split the container on `:` to actually fetch the tag, and if no tag 53 | /// was present, it will assume tag is latest as per docker conventions. 54 | pub fn new(container: &str) -> Container { 55 | let split: Vec<&str> = container.split(':').collect(); 56 | let tag = if split.len() == 2 { split[1] } else { "latest" }; 57 | let cname = if split.len() == 2 { split[0] } else { container }; 58 | Container { 59 | name: cname.into(), 60 | tag: tag.into(), 61 | } 62 | } 63 | } 64 | 65 | /// Representation of `lockfile.json` 66 | #[allow(non_snake_case)] 67 | #[derive(Serialize, Deserialize, Debug)] 68 | pub struct Lockfile { 69 | /// Name of the component built 70 | pub name: String, 71 | /// Build configuration used 72 | pub config: String, 73 | /// Container and tag used to build 74 | pub container: Container, 75 | /// Name of the environment for the container at the time 76 | pub environment: String, 77 | /// Name of the default environment set in the manifest 78 | pub defaultEnv: Option, 79 | /// Revision id from version control 80 | pub sha: Option, 81 | /// Version of the component built 82 | pub version: String, 83 | /// Version of the lal tool 84 | pub tool: String, 85 | /// Built timestamp 86 | pub built: Option, 87 | /// Recursive map of dependencies used 88 | pub dependencies: BTreeMap, 89 | } 90 | 91 | /// Generates a temporary empty lockfile for internal analysis 92 | impl Default for Lockfile { 93 | fn default() -> Self { Lockfile::new("templock", &Container::default(), "none", None, None) } 94 | } 95 | 96 | impl Lockfile { 97 | /// Initialize an empty Lockfile with defaults 98 | /// 99 | /// If no version is given, the version is EXPERIMENTAL-{randhex} for Colony. 100 | pub fn new( 101 | name: &str, 102 | container: &Container, 103 | env: &str, 104 | v: Option, 105 | build_cfg: Option<&str>, 106 | ) -> Self { 107 | let def_version = format!("EXPERIMENTAL-{:x}", rand::random::()); 108 | let time = UTC::now(); 109 | Lockfile { 110 | name: name.to_string(), 111 | version: v.unwrap_or(def_version), 112 | config: build_cfg.unwrap_or("release").to_string(), 113 | container: container.clone(), 114 | tool: env!("CARGO_PKG_VERSION").to_string(), 115 | built: Some(time.format("%Y-%m-%d %H:%M:%S").to_string()), 116 | defaultEnv: Some(env.into()), 117 | environment: env.into(), 118 | dependencies: BTreeMap::new(), 119 | sha: None, 120 | } 121 | } 122 | 123 | /// Opened lockfile at a path 124 | pub fn from_path(lock_path: &PathBuf, name: &str) -> LalResult { 125 | if !lock_path.exists() { 126 | return Err(CliError::MissingLockfile(name.to_string())); 127 | } 128 | let mut lock_str = String::new(); 129 | File::open(lock_path)?.read_to_string(&mut lock_str)?; 130 | Ok(serde_json::from_str(&lock_str)?) 131 | } 132 | 133 | /// A reader from ARTIFACT directory 134 | pub fn release_build() -> LalResult { 135 | let lpath = Path::new("ARTIFACT").join("lockfile.json"); 136 | Ok(Lockfile::from_path(&lpath, "release build")?) 137 | } 138 | 139 | // Helper constructor for input populator below 140 | fn from_input_component(component: &str) -> LalResult { 141 | let lock_path = Path::new("./INPUT").join(component).join("lockfile.json"); 142 | Ok(Lockfile::from_path(&lock_path, component)?) 143 | } 144 | 145 | 146 | /// Read all the lockfiles in INPUT to generate the full lockfile 147 | /// 148 | /// NB: This currently reads all the lockfiles partially in `analyze`, 149 | /// the re-reads them fully in `read_lockfile_from_component` so can be sped up. 150 | pub fn populate_from_input(mut self) -> LalResult { 151 | debug!("Reading all lockfiles"); 152 | let deps = input::analyze()?; 153 | for name in deps.keys() { 154 | trace!("Populating lockfile with {}", name); 155 | let deplock = Lockfile::from_input_component(name)?; 156 | self.dependencies.insert(name.clone(), deplock); 157 | } 158 | Ok(self) 159 | } 160 | 161 | /// Attach a default environment to the lockfile 162 | pub fn set_default_env(mut self, default: String) -> Self { 163 | self.defaultEnv = Some(default); 164 | self 165 | } 166 | 167 | /// Attach a revision id from source control 168 | pub fn attach_revision_id(mut self, sha: Option) -> Self { 169 | self.sha = sha; 170 | self 171 | } 172 | 173 | /// Attach a name to the lockfile 174 | pub fn set_name(mut self, name: &str) -> Self { 175 | self.name = name.into(); 176 | self 177 | } 178 | 179 | /// Write the current `Lockfile` struct to a Path 180 | pub fn write(&self, pth: &Path) -> LalResult<()> { 181 | let encoded = serde_json::to_string_pretty(self)?; 182 | let mut f = File::create(pth)?; 183 | write!(f, "{}\n", encoded)?; 184 | debug!("Wrote lockfile {}: \n{}", pth.display(), encoded); 185 | Ok(()) 186 | } 187 | } 188 | 189 | 190 | // name of component -> (value1, value2, ..) 191 | pub type ValueUsage = HashMap>; 192 | 193 | // The hardcore dependency analysis parts 194 | impl Lockfile { 195 | // helper to extract specific keys out of a struct 196 | fn get_value(&self, key: &str) -> String { 197 | if key == "version" { 198 | self.version.clone() 199 | } else if key == "environment" { 200 | self.environment.clone() 201 | } else { 202 | unreachable!("Only using get_value internally"); 203 | } 204 | } 205 | 206 | /// Recursive function to check for multiple version/environment (key) use 207 | fn find_all_values(&self, key: &str) -> ValueUsage { 208 | let mut acc = HashMap::new(); 209 | // for each entry in dependencies 210 | for (main_name, dep) in &self.dependencies { 211 | // Store the dependency 212 | if !acc.contains_key(main_name) { 213 | acc.insert(main_name.clone(), BTreeSet::new()); 214 | } 215 | { 216 | // Only borrow as mutable once - so creating a temporary scope 217 | let first_value_set = acc.get_mut(main_name).unwrap(); 218 | first_value_set.insert(dep.get_value(key)); 219 | } 220 | 221 | // Recurse into its dependencies 222 | trace!("Recursing into deps for {}, acc is {:?}", main_name, acc); 223 | for (name, value_set) in dep.find_all_values(key) { 224 | trace!("Found {} for for {} under {} as {:?}", 225 | key, 226 | name, 227 | main_name, 228 | value_set); 229 | // ensure each entry from above exists in current accumulator 230 | if !acc.contains_key(&name) { 231 | acc.insert(name.clone(), BTreeSet::new()); 232 | } 233 | // union the entry of value for the current name 234 | let full_value_set = acc.get_mut(&name).unwrap(); // know this exists now 235 | for value in value_set { 236 | full_value_set.insert(value); 237 | } 238 | } 239 | } 240 | acc 241 | } 242 | 243 | /// List all used versions used of each dependency 244 | pub fn find_all_dependency_versions(&self) -> ValueUsage { self.find_all_values("version") } 245 | 246 | /// List all used environments used of each dependency 247 | pub fn find_all_environments(&self) -> ValueUsage { self.find_all_values("environment") } 248 | 249 | /// List all dependency names used by each dependency (not transitively) 250 | pub fn find_all_dependency_names(&self) -> ValueUsage { 251 | let mut acc = HashMap::new(); 252 | // ensure root node exists 253 | acc.entry(self.name.clone()) 254 | .or_insert_with(|| self.dependencies.keys().cloned().collect()); 255 | for dep in self.dependencies.values() { 256 | // recurse and merge into parent acc: 257 | for (n, d) in dep.find_all_dependency_names() { 258 | acc.entry(n).or_insert(d); 259 | } 260 | } 261 | acc 262 | } 263 | } 264 | 265 | /// Reverse dependency methods 266 | /// 267 | /// Similar to the above ones - requires a populated lockfile to make sense. 268 | impl Lockfile { 269 | /// List all dependees for each dependency 270 | pub fn get_reverse_deps(&self) -> ValueUsage { 271 | let mut acc = HashMap::new(); 272 | // ensure the root node exists (matters for first iteration) 273 | if !acc.contains_key(&self.name) { 274 | // don't expand the tree further outside self 275 | acc.insert(self.name.clone(), BTreeSet::new()); 276 | } 277 | 278 | // for each entry in dependencies 279 | for (main_name, dep) in &self.dependencies { 280 | // ensure each entry from above exists in current accumulator 281 | if !acc.contains_key(&dep.name) { 282 | acc.insert(dep.name.clone(), BTreeSet::new()); 283 | } 284 | { 285 | // Only borrow as mutable once - so creating a temporary scope 286 | let first_value_set = acc.get_mut(&dep.name).unwrap(); 287 | first_value_set.insert(self.name.clone()); 288 | } 289 | 290 | // Recurse into its dependencies 291 | trace!("Recursing into deps for {}, acc is {:?}", main_name, acc); 292 | 293 | // merge results recursively 294 | for (name, value_set) in dep.get_reverse_deps() { 295 | trace!("Found revdeps for {} as {:?}", name, value_set); 296 | // if we don't already have new entries, add them: 297 | if !acc.contains_key(&name) { 298 | acc.insert(name.clone(), BTreeSet::new()); // blank first 299 | } 300 | // merge in values from recursion 301 | let full_value_set = acc.get_mut(&name).unwrap(); // know this exists now 302 | // union in values from recursion 303 | for value in value_set { 304 | full_value_set.insert(value); 305 | } 306 | } 307 | } 308 | acc 309 | } 310 | 311 | /// List all dependees for a dependency transitively 312 | pub fn get_reverse_deps_transitively_for(&self, component: String) -> BTreeSet { 313 | let revdeps = self.get_reverse_deps(); 314 | trace!("Got rev deps: {:?}", revdeps); 315 | let mut res = BTreeSet::new(); 316 | 317 | if !revdeps.contains_key(&component) { 318 | warn!("Could not find {} in the dependency tree for {}", 319 | component, 320 | self.name); 321 | return res; 322 | } 323 | 324 | let mut current_cycle = vec![component]; 325 | while !current_cycle.is_empty() { 326 | let mut next_cycle = vec![]; 327 | for name in current_cycle { 328 | // get revdeps for it (must exist by construction) 329 | for dep in &revdeps[&name] { 330 | res.insert(dep.clone()); 331 | next_cycle.push(dep.clone()); 332 | } 333 | } 334 | current_cycle = next_cycle; 335 | } 336 | res 337 | } 338 | } 339 | -------------------------------------------------------------------------------- /src/core/manifest.rs: -------------------------------------------------------------------------------- 1 | use std::io::prelude::*; 2 | use std::fs::{self, File}; 3 | use std::collections::BTreeMap; 4 | use std::vec::Vec; 5 | use serde_json; 6 | use std::path::{Path, PathBuf}; 7 | 8 | use super::{CliError, LalResult}; 9 | 10 | /// A startup helper used in a few places 11 | pub fn create_lal_subdir(pwd: &PathBuf) -> LalResult<()> { 12 | let loc = pwd.join(".lal"); 13 | if !loc.is_dir() { 14 | fs::create_dir(&loc)? 15 | } 16 | Ok(()) 17 | } 18 | 19 | /// Representation of a value of the manifest.components hash 20 | #[allow(non_snake_case)] 21 | #[derive(Serialize, Deserialize, Clone)] 22 | pub struct ComponentConfiguration { 23 | /// The default config to use if not passed in - default is "release" 24 | pub defaultConfig: String, 25 | /// List of allowed configurations (must contain defaultConfig) 26 | pub configurations: Vec, 27 | } 28 | 29 | impl Default for ComponentConfiguration { 30 | fn default() -> ComponentConfiguration { 31 | ComponentConfiguration { 32 | configurations: vec!["release".to_string()], 33 | defaultConfig: "release".to_string(), 34 | } 35 | } 36 | } 37 | 38 | /// Representation of `manifest.json` 39 | #[allow(non_snake_case)] 40 | #[derive(Serialize, Deserialize, Clone, Default)] 41 | pub struct Manifest { 42 | /// Name of the main component 43 | pub name: String, 44 | /// Default environment to build in 45 | pub environment: String, 46 | /// All the environments dependencies can currently be found in 47 | pub supportedEnvironments: Vec, 48 | /// Components and their available configurations that are buildable 49 | pub components: BTreeMap, 50 | /// Dependencies that are always needed 51 | pub dependencies: BTreeMap, 52 | /// Development dependencies 53 | pub devDependencies: BTreeMap, 54 | 55 | /// Internal path of this manifest 56 | #[serde(skip_serializing, skip_deserializing)] 57 | location: String, 58 | } 59 | 60 | /// An enum to clarify intent 61 | pub enum ManifestLocation { 62 | /// Plain style (old default) 63 | RepoRoot, 64 | /// In the .lal subfolder 65 | LalSubfolder, 66 | } 67 | impl Default for ManifestLocation { 68 | fn default() -> ManifestLocation { ManifestLocation::LalSubfolder } 69 | } 70 | impl ManifestLocation { 71 | /// Generate path for Manifest assuming pwd is the root 72 | pub fn as_path(&self, pwd: &PathBuf) -> PathBuf { 73 | match *self { 74 | ManifestLocation::RepoRoot => pwd.join("manifest.json"), 75 | ManifestLocation::LalSubfolder => pwd.join(".lal/manifest.json"), 76 | } 77 | } 78 | 79 | /// Find the manifest file 80 | /// 81 | /// Looks first in `./.lal/manifest.json` and falls back to `./manifest.json` 82 | pub fn identify(pwd: &PathBuf) -> LalResult { 83 | if ManifestLocation::LalSubfolder.as_path(pwd).exists() { 84 | // Show a warning if we have two manifests - we only use the new one then 85 | // This could happen on other codebases - some javascript repos use manifest.json 86 | // if both are for lal though, then this is user error, make it explicit: 87 | if ManifestLocation::RepoRoot.as_path(pwd).exists() { 88 | warn!("manifest.json found in both .lal/ and current directory"); 89 | warn!("Using the default: .lal/manifest.json"); 90 | } 91 | Ok(ManifestLocation::LalSubfolder) 92 | } else if ManifestLocation::RepoRoot.as_path(pwd).exists() { 93 | Ok(ManifestLocation::RepoRoot) // allow people to migrate for a while 94 | } else { 95 | Err(CliError::MissingManifest) 96 | } 97 | } 98 | } 99 | 100 | 101 | impl Manifest { 102 | /// Initialize a manifest struct based on a name 103 | /// 104 | /// The name is assumed to be the default component and will create a 105 | /// component configuration for it with its default values. 106 | pub fn new(name: &str, env: &str, location: PathBuf) -> Manifest { 107 | let mut comps = BTreeMap::new(); 108 | comps.insert(name.into(), ComponentConfiguration::default()); 109 | Manifest { 110 | name: name.into(), 111 | components: comps, 112 | environment: env.into(), 113 | supportedEnvironments: vec![env.into()], 114 | location: location.to_string_lossy().into(), 115 | ..Default::default() 116 | } 117 | } 118 | /// Merge dependencies and devDependencies into one convenience map 119 | pub fn all_dependencies(&self) -> BTreeMap { 120 | let mut deps = self.dependencies.clone(); 121 | for (k, v) in &self.devDependencies { 122 | deps.insert(k.clone(), *v); 123 | } 124 | deps 125 | } 126 | /// Read a manifest file in PWD 127 | pub fn read() -> LalResult { Ok(Manifest::read_from(&Path::new(".").to_path_buf())?) } 128 | 129 | /// Read a manifest file in an arbitrary path 130 | pub fn read_from(pwd: &PathBuf) -> LalResult { 131 | let mpath = ManifestLocation::identify(pwd)?.as_path(pwd); 132 | trace!("Using manifest in {}", mpath.display()); 133 | let mut f = File::open(&mpath)?; 134 | let mut data = String::new(); 135 | f.read_to_string(&mut data)?; 136 | let mut res: Manifest = serde_json::from_str(&data)?; 137 | // store the location internally (not serialized to disk) 138 | res.location = mpath.to_string_lossy().into(); 139 | Ok(res) 140 | } 141 | 142 | /// Update the manifest file in the current folder 143 | pub fn write(&self) -> LalResult<()> { 144 | let encoded = serde_json::to_string_pretty(self)?; 145 | trace!("Writing manifest in {}", self.location); 146 | let mut f = File::create(&self.location)?; 147 | write!(f, "{}\n", encoded)?; 148 | debug!("Wrote manifest in {}: \n{}", self.location, encoded); 149 | Ok(()) 150 | } 151 | 152 | /// Verify assumptions about configurations 153 | pub fn verify(&self) -> LalResult<()> { 154 | for (name, conf) in &self.components { 155 | if &name.to_lowercase() != name { 156 | return Err(CliError::InvalidComponentName(name.clone())); 157 | } 158 | // Verify ComponentSettings (manifest.components[x]) 159 | debug!("Verifying component {}", name); 160 | if !conf.configurations.contains(&conf.defaultConfig) { 161 | let ename = format!("default configuration '{}' not found in configurations list", 162 | conf.defaultConfig); 163 | return Err(CliError::InvalidBuildConfiguration(ename)); 164 | } 165 | } 166 | for (name, _) in &self.dependencies { 167 | if &name.to_lowercase() != name { 168 | return Err(CliError::InvalidComponentName(name.clone())); 169 | } 170 | } 171 | for (name, _) in &self.devDependencies { 172 | if &name.to_lowercase() != name { 173 | return Err(CliError::InvalidComponentName(name.clone())); 174 | } 175 | } 176 | if self.supportedEnvironments.is_empty() { 177 | return Err(CliError::NoSupportedEnvironments); 178 | } 179 | if !self.supportedEnvironments.iter().any(|x| x == &self.environment) { 180 | return Err(CliError::UnsupportedEnvironment); 181 | } 182 | Ok(()) 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /src/core/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::errors::{CliError, LalResult}; 2 | pub use self::manifest::{Manifest, ComponentConfiguration, ManifestLocation}; 3 | pub use self::lockfile::{Lockfile, Container}; 4 | pub use self::config::{Config, ConfigDefaults, Mount, config_dir}; 5 | pub use self::sticky::StickyOptions; 6 | pub use self::ensure::ensure_dir_exists_fresh; 7 | 8 | mod config; 9 | mod errors; 10 | mod lockfile; 11 | mod sticky; 12 | mod ensure; 13 | 14 | /// Manifest module can be used directly 15 | pub mod manifest; 16 | 17 | /// Simple INPUT folder analyzer module can be used directly 18 | pub mod input; 19 | 20 | /// Simple OUTPUT folder helper module 21 | pub mod output; 22 | -------------------------------------------------------------------------------- /src/core/output.rs: -------------------------------------------------------------------------------- 1 | use std::process::Command; 2 | use std::path::Path; 3 | 4 | use super::{CliError, LalResult}; 5 | 6 | /// Helper for stash and build 7 | pub fn tar(tarball: &Path) -> LalResult<()> { 8 | info!("Taring OUTPUT"); 9 | let mut args: Vec = vec![ 10 | "czf".into(), 11 | tarball.to_str().unwrap().into(), // path created internally - always valid unicode 12 | "--transform=s,^OUTPUT/,,".into(), // remove leading OUTPUT 13 | ]; 14 | 15 | // Avoid depending on wildcards (which would also hide hidden files) 16 | // All links, hidden files, and regular files should go into the tarball. 17 | let findargs = vec!["OUTPUT/", "-type", "f", "-o", "-type", "l"]; 18 | debug!("find {}", findargs.join(" ")); 19 | let find_output = Command::new("find").args(&findargs).output()?; 20 | let find_str = String::from_utf8_lossy(&find_output.stdout); 21 | 22 | // append each file as an arg to the main tar process 23 | for f in find_str.trim().split('\n') { 24 | args.push(f.into()) 25 | } 26 | 27 | // basically `tar czf component.tar.gz --transform.. $(find OUTPUT -type f -o -type l)`: 28 | debug!("tar {}", args.join(" ")); 29 | let s = Command::new("tar").args(&args).status()?; 30 | 31 | if !s.success() { 32 | return Err(CliError::SubprocessFailure(s.code().unwrap_or(1001))); 33 | } 34 | Ok(()) 35 | } 36 | -------------------------------------------------------------------------------- /src/core/sticky.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | use std::env; 3 | use std::io::prelude::{Read, Write}; 4 | use std::path::Path; 5 | use serde_json; 6 | 7 | use super::LalResult; 8 | use manifest::create_lal_subdir; 9 | 10 | /// Representation of .lal/opts 11 | /// 12 | /// This contains the currently supported, directory-wide, sticky options. 13 | #[derive(Serialize, Deserialize, Clone, Default)] 14 | pub struct StickyOptions { 15 | /// Environment to be used implicitally instead of the default 16 | pub env: Option, 17 | } 18 | 19 | impl StickyOptions { 20 | /// Initialize a StickyOptions with defaults 21 | pub fn new() -> StickyOptions { Default::default() } 22 | /// Read and deserialize a StickyOptions from `.lal/opts` 23 | pub fn read() -> LalResult { 24 | let opts_path = Path::new(".lal/opts"); 25 | if !opts_path.exists() { 26 | return Ok(StickyOptions::default()); // everything off 27 | } 28 | let mut opts_data = String::new(); 29 | fs::File::open(&opts_path)?.read_to_string(&mut opts_data)?; 30 | let res = serde_json::from_str(&opts_data)?; 31 | Ok(res) 32 | } 33 | 34 | /// Overwrite `.lal/opts` with current settings 35 | pub fn write(&self) -> LalResult<()> { 36 | let pwd = env::current_dir()?; 37 | create_lal_subdir(&pwd)?; // create the `.lal` subdir if it's not there already 38 | let opts_path = Path::new(".lal/opts"); 39 | let encoded = serde_json::to_string_pretty(self)?; 40 | 41 | let mut f = fs::File::create(&opts_path)?; 42 | write!(f, "{}\n", encoded)?; 43 | debug!("Wrote {}: \n{}", opts_path.display(), encoded); 44 | Ok(()) 45 | } 46 | /// Delete local `.lal/opts` 47 | pub fn delete_local() -> LalResult<()> { 48 | let opts_path = Path::new(".lal/opts"); 49 | Ok(fs::remove_file(&opts_path)?) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/env.rs: -------------------------------------------------------------------------------- 1 | use std::process::Command; 2 | use std::vec::Vec; 3 | 4 | use super::{StickyOptions, LalResult, CliError, Container, Config}; 5 | 6 | /// Pull the current environment from docker 7 | pub fn update(container: &Container, env: &str) -> LalResult<()> { 8 | info!("Updating {} container", env); 9 | let args: Vec = vec!["pull".into(), format!("{}", container)]; 10 | trace!("Docker pull {}", container); 11 | let s = Command::new("docker").args(&args).status()?; 12 | trace!("Exited docker"); 13 | if !s.success() { 14 | return Err(CliError::SubprocessFailure(s.code().unwrap_or(1001))); 15 | } 16 | Ok(()) 17 | } 18 | 19 | /// Creates and sets the environment in the local .lal/opts file 20 | pub fn set(opts_: &StickyOptions, cfg: &Config, env: &str) -> LalResult<()> { 21 | if !cfg.environments.contains_key(env) { 22 | return Err(CliError::MissingEnvironment(env.into())); 23 | } 24 | // mutate a temporary copy - lal binary is done after this function anyway 25 | let mut opts = opts_.clone(); 26 | opts.env = Some(env.into()); 27 | opts.write()?; 28 | Ok(()) 29 | } 30 | 31 | /// Clears the local .lal/opts file 32 | pub fn clear() -> LalResult<()> { 33 | let _ = StickyOptions::delete_local(); 34 | Ok(()) 35 | } 36 | -------------------------------------------------------------------------------- /src/export.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | use std::path::Path; 3 | 4 | use storage::CachedBackend; 5 | use super::{LalResult, CliError}; 6 | 7 | /// Export a specific component from the storage backend 8 | pub fn export( 9 | backend: &T, 10 | comp: &str, 11 | output: Option<&str>, 12 | _env: Option<&str>, 13 | ) -> LalResult<()> { 14 | let env = match _env { 15 | None => { 16 | error!("export is no longer allowed without an explicit environment"); 17 | return Err(CliError::EnvironmentUnspecified) 18 | }, 19 | Some(e) => e 20 | }; 21 | 22 | if comp.to_lowercase() != comp { 23 | return Err(CliError::InvalidComponentName(comp.into())); 24 | } 25 | 26 | let dir = output.unwrap_or("."); 27 | info!("Export {} {} to {}", env, comp, dir); 28 | 29 | let mut component_name = comp; // this is only correct if no =version suffix 30 | let tarname = if comp.contains('=') { 31 | let pair: Vec<&str> = comp.split('=').collect(); 32 | if let Ok(n) = pair[1].parse::() { 33 | // standard fetch with an integer version 34 | component_name = pair[0]; // save so we have sensible tarball names 35 | backend.retrieve_published_component(pair[0], Some(n), env)?.0 36 | } else { 37 | // string version -> stash 38 | component_name = pair[0]; // save so we have sensible tarball names 39 | backend.retrieve_stashed_component(pair[0], pair[1])? 40 | } 41 | } else { 42 | // fetch without a specific version (latest) 43 | backend.retrieve_published_component(comp, None, env)?.0 44 | }; 45 | 46 | let dest = Path::new(dir).join(format!("{}.tar.gz", component_name)); 47 | debug!("Copying {:?} to {:?}", tarname, dest); 48 | 49 | fs::copy(tarname, dest)?; 50 | Ok(()) 51 | } 52 | -------------------------------------------------------------------------------- /src/fetch.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | use std::path::Path; 3 | 4 | use storage::CachedBackend; 5 | use super::{CliError, LalResult, Lockfile, Manifest}; 6 | 7 | fn clean_input() { 8 | let input = Path::new("./INPUT"); 9 | if input.is_dir() { 10 | fs::remove_dir_all(&input).unwrap(); 11 | } 12 | } 13 | 14 | /// Fetch all dependencies from `manifest.json` 15 | /// 16 | /// This will read, and HTTP GET all the dependencies at the specified versions. 17 | /// If the `core` bool is set, then `devDependencies` are not installed. 18 | pub fn fetch( 19 | manifest: &Manifest, 20 | backend: &T, 21 | core: bool, 22 | env: &str, 23 | ) -> LalResult<()> { 24 | // first ensure manifest is sane: 25 | manifest.verify()?; 26 | 27 | debug!("Installing dependencies{}", 28 | if !core { " and devDependencies" } else { "" }); 29 | 30 | // create the joined hashmap of dependencies and possibly devdependencies 31 | let mut deps = manifest.dependencies.clone(); 32 | if !core { 33 | for (k, v) in &manifest.devDependencies { 34 | deps.insert(k.clone(), *v); 35 | } 36 | } 37 | let mut extraneous = vec![]; // stuff we should remove 38 | 39 | // figure out what we have already 40 | let lf = Lockfile::default() 41 | .populate_from_input() 42 | .map_err(|e| { 43 | // Guide users a bit if they did something dumb - see #77 44 | warn!("Populating INPUT data failed - your INPUT may be corrupt"); 45 | warn!("This can happen if you CTRL-C during `lal fetch`"); 46 | warn!("Try to `rm -rf INPUT` and `lal fetch` again."); 47 | e 48 | })?; 49 | // filter out what we already have (being careful to examine env) 50 | for (name, d) in lf.dependencies { 51 | // if d.name at d.version in d.environment matches something in deps 52 | if let Some(&cand) = deps.get(&name) { 53 | // version found in manifest 54 | // ignore non-integer versions (stashed things must be overwritten) 55 | if let Ok(n) = d.version.parse::() { 56 | if n == cand && d.environment == env { 57 | info!("Reuse {} {} {}", env, name, n); 58 | deps.remove(&name); 59 | } 60 | } 61 | } else { 62 | extraneous.push(name.clone()); 63 | } 64 | } 65 | 66 | let mut err = None; 67 | for (k, v) in deps { 68 | info!("Fetch {} {} {}", env, k, v); 69 | 70 | // first kill the folders we actually need to fetch: 71 | let cmponent_dir = Path::new("./INPUT").join(&k); 72 | if cmponent_dir.is_dir() { 73 | // Don't think this can fail, but we are dealing with NFS 74 | fs::remove_dir_all(&cmponent_dir) 75 | .map_err(|e| { 76 | warn!("Failed to remove INPUT/{} - {}", k, e); 77 | warn!("Please clean out your INPUT folder yourself to avoid corruption"); 78 | e 79 | })?; 80 | } 81 | 82 | let _ = backend.unpack_published_component(&k, Some(v), env).map_err(|e| { 83 | warn!("Failed to completely install {} ({})", k, e); 84 | // likely symlinks inside tarball that are being dodgy 85 | // this is why we clean_input 86 | err = Some(e); 87 | }); 88 | } 89 | 90 | // remove extraneous deps 91 | for name in extraneous { 92 | info!("Remove {}", name); 93 | let pth = Path::new("./INPUT").join(&name); 94 | if pth.is_dir() { 95 | fs::remove_dir_all(&pth)?; 96 | } 97 | } 98 | 99 | if err.is_some() { 100 | warn!("Cleaning potentially broken INPUT"); 101 | clean_input(); // don't want to risk having users in corrupted states 102 | return Err(CliError::InstallFailure); 103 | } 104 | Ok(()) 105 | } 106 | -------------------------------------------------------------------------------- /src/init.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | use super::{Config, CliError, LalResult}; 4 | use core::manifest::*; 5 | 6 | 7 | /// Generates a blank manifest in the current directory 8 | /// 9 | /// This will use the directory name as the assumed default component name 10 | /// Then fill in the blanks as best as possible. 11 | /// 12 | /// The function will not overwrite an existing `manifest.json`, 13 | /// unless the `force` bool is set. 14 | pub fn init(cfg: &Config, force: bool, env: &str) -> LalResult<()> { 15 | cfg.get_container(env.into())?; 16 | 17 | let pwd = env::current_dir()?; 18 | let last_comp = pwd.components().last().unwrap(); // std::path::Component 19 | let dirname = last_comp.as_os_str().to_str().unwrap(); 20 | 21 | let mpath = ManifestLocation::identify(&pwd); 22 | if !force && mpath.is_ok() { 23 | return Err(CliError::ManifestExists); 24 | } 25 | 26 | // we are allowed to overwrite or write a new manifest if we are here 27 | // always create new manifests in new default location 28 | create_lal_subdir(&pwd)?; // create the `.lal` subdir if it's not there already 29 | Manifest::new(dirname, env, ManifestLocation::default().as_path(&pwd)).write()?; 30 | 31 | // if the manifest already existed, warn about this now being placed elsewhere 32 | if let Ok(ManifestLocation::RepoRoot) = mpath { 33 | warn!("Created manifest in new location under .lal"); 34 | warn!("Please delete the old manifest - it will not be read anymore"); 35 | } 36 | 37 | Ok(()) 38 | } 39 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![warn(missing_docs)] 2 | 3 | //! This is the rust doc for the `lal` *library* - what the `lal` *binary* 4 | //! depends on to do all the work. This documentation is likely only of use to you 5 | //! if you need to know the internals of `lal` for figuring out how to modify it. 6 | //! 7 | //! ## Testing 8 | //! The library contains all the logic because the binary is only an argument parser, 9 | //! and elaborate decision making engine to log, call one of the libraries functions, 10 | //! then simply `process::exit`. 11 | //! Tests do not cover the binary part, because these failures would be trivially 12 | //! detectable, and also require a subprocess type of testing. Tests instead 13 | //! cover a couple of common use flows through the library. 14 | //! 15 | //! 16 | //! ## Dependencies 17 | //! This tool depends on the rust ecosystem and their crates. Dependencies referenced 18 | //! explicitly or implicitly is listed on the left of this page. 19 | 20 | #[macro_use] 21 | extern crate hyper; 22 | extern crate hyper_native_tls; 23 | extern crate openssl_probe; 24 | #[macro_use] 25 | extern crate serde_derive; 26 | extern crate serde_json; 27 | extern crate regex; 28 | extern crate tar; 29 | extern crate flate2; 30 | extern crate ansi_term; 31 | extern crate sha1; 32 | #[macro_use] 33 | extern crate log; 34 | extern crate walkdir; 35 | extern crate chrono; 36 | extern crate filetime; 37 | extern crate rand; 38 | extern crate semver; 39 | #[cfg(feature = "progress")] 40 | extern crate indicatif; 41 | 42 | // re-exports 43 | mod core; 44 | pub use core::*; 45 | 46 | mod storage; 47 | pub use storage::*; 48 | 49 | /// Env module for env subcommand (which has further subcommands) 50 | pub mod env; 51 | /// List module for all the list-* subcommands 52 | pub mod list; 53 | /// Propagation module with all structs describing the steps 54 | pub mod propagate; 55 | 56 | 57 | // lift most other pub functions into our libraries main scope 58 | // this avoids having to type lal::build::build in tests and main.rs 59 | pub use build::{build, BuildOptions}; 60 | pub use configure::configure; 61 | pub use init::init; 62 | pub use shell::{shell, docker_run, script, DockerRunFlags, ShellModes}; 63 | pub use fetch::fetch; 64 | pub use update::{update, update_all}; 65 | pub use remove::remove; 66 | pub use export::export; 67 | pub use status::status; 68 | pub use verify::verify; 69 | pub use stash::stash; 70 | pub use clean::clean; 71 | pub use query::query; 72 | pub use publish::publish; 73 | 74 | mod configure; 75 | mod init; 76 | mod shell; 77 | mod build; 78 | mod query; 79 | mod update; 80 | mod fetch; 81 | mod remove; 82 | mod export; 83 | mod clean; 84 | mod verify; 85 | mod stash; 86 | mod status; 87 | mod publish; 88 | 89 | #[cfg(feature = "upgrade")] 90 | pub use upgrade::upgrade; 91 | #[cfg(feature = "upgrade")] 92 | mod upgrade; 93 | -------------------------------------------------------------------------------- /src/list.rs: -------------------------------------------------------------------------------- 1 | /// This file contains all the hidden `lal list-*` subcommands 2 | /// If you are looking for `lal ls` go to status.rs 3 | 4 | use super::{Manifest, Config, LalResult}; 5 | 6 | /// Print the buildable components from the `Manifest` 7 | pub fn buildables(manifest: &Manifest) -> LalResult<()> { 8 | for k in manifest.components.keys() { 9 | println!("{}", k); 10 | } 11 | Ok(()) 12 | } 13 | 14 | /// Print the supported environments from the `Manifest` 15 | pub fn supported_environments(manifest: &Manifest) -> LalResult<()> { 16 | for env in &manifest.supportedEnvironments { 17 | println!("{}", env); 18 | } 19 | Ok(()) 20 | } 21 | 22 | /// Print the available configurations for a buildable Component 23 | pub fn configurations(component: &str, manifest: &Manifest) -> LalResult<()> { 24 | let component_settings = match manifest.components.get(component) { 25 | Some(c) => c, 26 | None => return Ok(()), // invalid component - but this is for completion 27 | }; 28 | for c in &component_settings.configurations { 29 | println!("{}", c); 30 | } 31 | Ok(()) 32 | } 33 | 34 | /// Print the configured environments from the config 35 | pub fn environments(cfg: &Config) -> LalResult<()> { 36 | for k in cfg.environments.keys() { 37 | println!("{}", k); 38 | } 39 | Ok(()) 40 | } 41 | 42 | /// Print the dependencies from the manifest 43 | pub fn dependencies(mf: &Manifest, core: bool) -> LalResult<()> { 44 | let deps = if core { mf.dependencies.clone() } else { mf.all_dependencies() }; 45 | for k in deps.keys() { 46 | println!("{}", k); 47 | } 48 | Ok(()) 49 | } 50 | -------------------------------------------------------------------------------- /src/propagate.rs: -------------------------------------------------------------------------------- 1 | use serde_json; 2 | use std::collections::BTreeSet; 3 | use super::{LalResult, Manifest, Lockfile}; 4 | 5 | 6 | /// A single update of of a propagation 7 | #[derive(Serialize)] 8 | pub struct SingleUpdate { 9 | /// Where to update dependencies 10 | pub repo: String, 11 | /// Dependencies to update 12 | pub dependencies: Vec, 13 | } 14 | 15 | /// A parallelizable update stage of a propagation 16 | #[derive(Serialize, Default)] 17 | pub struct UpdateStage { 18 | /// Updates to perform at this stage 19 | pub updates: Vec, 20 | } 21 | 22 | /// A set of sequential update steps that describe a propagation 23 | #[derive(Serialize, Default)] 24 | pub struct UpdateSequence { 25 | /// Update stages needed 26 | pub stages: Vec, 27 | } 28 | 29 | /// Compute the update sequence for a propagation 30 | pub fn compute(lf: &Lockfile, component: &str) -> LalResult { 31 | // 1. collect the list of everything we want to build in between root and component 32 | let all_required = lf.get_reverse_deps_transitively_for(component.into()); 33 | let dependencies = lf.find_all_dependency_names(); // map String -> Set(names) 34 | 35 | debug!("Needs updating: {:?}", all_required); 36 | debug!("Dependency table: {:?}", dependencies); 37 | 38 | // initialize mutables 39 | let mut result = UpdateSequence::default(); 40 | let mut remaining = all_required.clone(); 41 | // assume we already updated the component itself 42 | let mut handled = vec![component.to_string()].into_iter().collect(); 43 | 44 | // create update stages while there is something left to update 45 | while !remaining.is_empty() { 46 | let mut stage = UpdateStage::default(); 47 | debug!("Remaining set: {:?}", remaining); 48 | 49 | for dep in remaining.clone() { 50 | debug!("Processing {}", dep); 51 | // Consider transitive deps for dep, and check they are not in remaining 52 | let deps_for_name = dependencies[&dep].clone(); 53 | debug!("Deps for {} is {:?}", dep, deps_for_name); 54 | let intersection = deps_for_name.intersection(&remaining).collect::>(); 55 | debug!("Intersection: {:?}", intersection); 56 | if intersection.is_empty() { 57 | // what to update is `handled` intersected with deps for this repo 58 | stage.updates.push(SingleUpdate { 59 | repo: dep, 60 | dependencies: deps_for_name 61 | .intersection(&handled) 62 | .cloned() 63 | .collect(), 64 | }); 65 | } 66 | } 67 | 68 | // remove what we are doing in this stage from remaining 69 | for dep in &stage.updates { 70 | remaining.remove(&dep.repo); 71 | handled.insert(dep.repo.clone()); 72 | } 73 | result.stages.push(stage); 74 | } 75 | Ok(result) 76 | } 77 | 78 | 79 | /// Outputs the update path to the current manifest for a specific component 80 | /// 81 | /// Given a component to propagate to the current one in your working directory, 82 | /// work out how to propagate it through the dependency tree fully. 83 | /// 84 | /// This will produce a set of sequential steps, each set itself being parallelizable. 85 | /// The resulting update steps can be performed in order to ensure `lal verify` is happy. 86 | pub fn print(manifest: &Manifest, component: &str, json_output: bool) -> LalResult<()> { 87 | debug!("Calculating update path for {}", component); 88 | 89 | // TODO: allow taking a custom lockfile to be used outside a repo. 90 | let lf = Lockfile::default().set_name(&manifest.name).populate_from_input()?; 91 | 92 | let result = compute(&lf, component)?; 93 | 94 | if json_output { 95 | let encoded = serde_json::to_string_pretty(&result)?; 96 | println!("{}", encoded); 97 | } else { 98 | println!("Assuming {} has been updated:", component); 99 | let mut i = 1; 100 | for stage in result.stages { 101 | println!("Stage {}:", i); 102 | for update in stage.updates { 103 | println!("- update [{}] in {}", 104 | update.dependencies.join(", "), 105 | update.repo); 106 | } 107 | i += 1; 108 | } 109 | } 110 | 111 | Ok(()) 112 | } 113 | -------------------------------------------------------------------------------- /src/publish.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | 3 | // Need both the struct and the trait 4 | use storage::Backend; 5 | use super::{LalResult, CliError, Lockfile}; 6 | 7 | /// Publish a release build to the storage backend 8 | /// 9 | /// Meant to be done after a `lal build -r ` 10 | /// and requires publish credentials in the local `Config`. 11 | pub fn publish(name: &str, backend: &T) -> LalResult<()> { 12 | let artdir = Path::new("./ARTIFACT"); 13 | let tarball = artdir.join(format!("{}.tar.gz", name)); 14 | if !artdir.is_dir() || !tarball.exists() { 15 | warn!("Missing: {}", tarball.display()); 16 | return Err(CliError::MissingReleaseBuild); 17 | } 18 | 19 | let lock = Lockfile::release_build()?; 20 | 21 | let version = lock.version 22 | .parse::() 23 | .map_err(|e| { 24 | error!("Release build not done --with-version=$BUILD_VERSION"); 25 | debug!("Error: {}", e); 26 | CliError::MissingReleaseBuild 27 | })?; 28 | 29 | if lock.sha.is_none() { 30 | warn!("Release build not done --with-sha=$(git rev-parse HEAD)"); 31 | } 32 | 33 | // always publish to the environment in the lockfile 34 | let env = lock.environment; 35 | 36 | info!("Publishing {}={} to {}", name, version, env); 37 | backend.publish_artifact(name, version, &env)?; 38 | 39 | Ok(()) 40 | } 41 | -------------------------------------------------------------------------------- /src/query.rs: -------------------------------------------------------------------------------- 1 | use std::io::{self, Write}; 2 | 3 | use storage::Backend; 4 | use super::{LalResult, CliError}; 5 | 6 | /// Prints a list of versions associated with a component 7 | pub fn query(backend: &Backend, _env: Option<&str>, component: &str, last: bool) -> LalResult<()> { 8 | if component.to_lowercase() != component { 9 | return Err(CliError::InvalidComponentName(component.into())); 10 | } 11 | let env = match _env { 12 | None => { 13 | error!("query is no longer allowed without an explicit environment"); 14 | return Err(CliError::EnvironmentUnspecified) 15 | }, 16 | Some(e) => e 17 | }; 18 | 19 | if last { 20 | let ver = backend.get_latest_version(component, env)?; 21 | println!("{}", ver); 22 | } else { 23 | let vers = backend.get_versions(component, env)?; 24 | for v in vers { 25 | println!("{}", v); 26 | // needed because sigpipe handling is broken for stdout atm 27 | // see #36 - can probably be taken out in rust 1.16 or 1.17 28 | // if `lal query media-engine | head` does not crash 29 | if io::stdout().flush().is_err() { 30 | return Ok(()); 31 | } 32 | } 33 | } 34 | Ok(()) 35 | } 36 | -------------------------------------------------------------------------------- /src/remove.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | use std::path::Path; 3 | 4 | use super::{CliError, LalResult, Manifest}; 5 | 6 | /// Remove specific components from `./INPUT` and the manifest. 7 | /// 8 | /// This takes multiple components strings (without versions), and if the component 9 | /// is found in `./INPUT` it is deleted. 10 | /// 11 | /// If one of `save` or `savedev` was set, `manifest.json` is also updated to remove 12 | /// the specified components from the corresponding dictionary. 13 | pub fn remove(manifest: &Manifest, xs: Vec, save: bool, savedev: bool) -> LalResult<()> { 14 | debug!("Removing dependencies {:?}", xs); 15 | 16 | // remove entries in xs from manifest. 17 | if save || savedev { 18 | let mut mf = manifest.clone(); 19 | let mut hmap = if save { mf.dependencies.clone() } else { mf.devDependencies.clone() }; 20 | for component in xs.clone() { 21 | // We could perhaps allow people to just specify ANY dependency 22 | // and have a generic save flag, which we could infer from 23 | // thus we could modify both maps if listing many components 24 | 25 | // This could work, but it's not currently what install does, so not doing it. 26 | // => all components uninstalled from either dependencies, or all from devDependencies 27 | // if doing multiple components from different maps, do multiple calls 28 | if !hmap.contains_key(&component) { 29 | return Err(CliError::MissingComponent(component.to_string())); 30 | } 31 | debug!("Removing {} from manifest", component); 32 | hmap.remove(&component); 33 | } 34 | if save { 35 | mf.dependencies = hmap; 36 | } else { 37 | mf.devDependencies = hmap; 38 | } 39 | info!("Updating manifest with removed dependencies"); 40 | mf.write()?; 41 | } 42 | 43 | // delete the folder (ignore if the folder does not exist) 44 | let input = Path::new("./INPUT"); 45 | if !input.is_dir() { 46 | return Ok(()); 47 | } 48 | for component in xs { 49 | let pth = Path::new(&input).join(&component); 50 | if pth.is_dir() { 51 | debug!("Deleting INPUT/{}", component); 52 | fs::remove_dir_all(&pth)?; 53 | } 54 | } 55 | Ok(()) 56 | } 57 | -------------------------------------------------------------------------------- /src/shell.rs: -------------------------------------------------------------------------------- 1 | use std::process::Command; 2 | use std::env; 3 | use std::path::Path; 4 | use std::vec::Vec; 5 | 6 | use super::{Config, Container, CliError, LalResult}; 7 | 8 | /// Verifies that `id -u` and `id -g` are both 1000 9 | /// 10 | /// Docker user namespaces are not properly supported by our setup, 11 | /// so for builds to work with the default containers, user ids and group ids 12 | /// should match a defined linux setup of 1000:1000. 13 | fn permission_sanity_check() -> LalResult<()> { 14 | let uid_output = Command::new("id").arg("-u").output()?; 15 | let uid_str = String::from_utf8_lossy(&uid_output.stdout); 16 | let uid = uid_str.trim().parse::().unwrap(); // trust `id -u` is sane 17 | 18 | let gid_output = Command::new("id").arg("-g").output()?; 19 | let gid_str = String::from_utf8_lossy(&gid_output.stdout); 20 | let gid = gid_str.trim().parse::().unwrap(); // trust `id -g` is sane 21 | 22 | if uid != 1000 || gid != 1000 { 23 | return Err(CliError::DockerPermissionSafety(format!("UID and GID are not 1000:1000"), 24 | uid, 25 | gid)); 26 | } 27 | 28 | Ok(()) 29 | } 30 | 31 | /// Gets the ID of a docker container 32 | /// 33 | /// Uses the `docker images` command to find the image ID of the specified 34 | /// container. 35 | /// Will return a trimmed String containing the image ID requested, wrapped in 36 | /// a Result::Ok, or CliError::DockerImageNotFound wrapped in a Result::Err if 37 | /// docker images returns no output. 38 | fn get_docker_image_id(container: &Container) -> LalResult { 39 | trace!("Using docker images to find ID of container {}", container); 40 | let image_id_output = 41 | Command::new("docker").arg("images").arg("-q").arg(container.to_string()).output()?; 42 | let image_id_str: String = String::from_utf8_lossy(&image_id_output.stdout).trim().into(); 43 | match image_id_str.len() { 44 | 0 => { 45 | trace!("Could not find ID"); 46 | Err(CliError::DockerImageNotFound(container.to_string())) 47 | } 48 | _ => { 49 | trace!("Found ID {}", image_id_str); 50 | Ok(image_id_str.into()) 51 | } 52 | } 53 | } 54 | 55 | /// Pulls a docker container 56 | /// 57 | /// Uses `docker pull` to pull the specified container from the docker repository. 58 | /// Returns Ok(()) if the command is successful, Err(CliError::SubprocessFailure) 59 | /// if `docker pull` fails or is interrupted by a signal, Err(CliError::Io) if the 60 | /// command status() call fails for a different reason. 61 | fn pull_docker_image(container: &Container) -> LalResult<()> { 62 | trace!("Pulling container {}", container); 63 | let s = Command::new("docker").arg("pull").arg(container.to_string()).status()?; 64 | if !s.success() { 65 | trace!("Pull failed"); 66 | return Err(CliError::SubprocessFailure(s.code().unwrap_or(1001))); 67 | }; 68 | trace!("Pull succeeded"); 69 | Ok(()) 70 | } 71 | 72 | /// Builds a docker container 73 | /// 74 | /// Uses `docker build` to build a docker container with the specified 75 | /// instructions. It uses the --tag option to tag it with the given information. 76 | /// Returns Ok(()) if the command is successful, Err(CliError::SubprocessFailure) 77 | /// if `bash -c` fails or is interrupted by a signal, Err(CliError::Io) if the 78 | /// command status() call fails for a different reason. 79 | fn build_docker_image(container: &Container, instructions: Vec) -> LalResult<()> { 80 | trace!("Building docker image for {}", container); 81 | let instruction_strings = instructions.join("\\n"); 82 | trace!("Build instructions: \n{}", instruction_strings); 83 | // More safety 84 | let instruction_strings = instruction_strings.replace("'", "'\\''"); 85 | let s = Command::new("bash") 86 | .arg("-c") 87 | .arg(format!("echo -e '{}' | docker build --tag {} -", 88 | instruction_strings, 89 | container)) 90 | .status()?; 91 | if !s.success() { 92 | trace!("Build failed"); 93 | return Err(CliError::SubprocessFailure(s.code().unwrap_or(1001))); 94 | }; 95 | trace!("Build succeeded"); 96 | Ok(()) 97 | } 98 | 99 | /// Flags for docker run that vary for different use cases 100 | /// 101 | /// `interactive` should be on by default, but machine accounts should turn this off 102 | /// `privileged` is needed on some setups for `gdb` and other low level tools to work 103 | /// 104 | /// NB: The derived default should only be used by tests (all false/zero) 105 | #[derive(Default)] 106 | pub struct DockerRunFlags { 107 | /// Pass --interactive (allows ctrl-c on builds/scripts/shell commands) 108 | pub interactive: bool, 109 | /// Pass --privileged (situational) 110 | pub privileged: bool, 111 | } 112 | 113 | /// Fixes up docker container for use with given uid and gid 114 | /// 115 | /// Returns a container derived from the one passed as an argument, with the `lal` 116 | /// user having its uid and gid modified to match the ones passed. 117 | /// The container is built if necessary (e.g. new base container from upstream) 118 | fn fixup_docker_container(container: &Container, u: u32, g: u32) -> LalResult { 119 | info!("Using appropriate container for user {}:{}", u, g); 120 | // Find image id of regular docker container 121 | // We might have to pull it 122 | let image_id = get_docker_image_id(container) 123 | .or_else(|_| { 124 | pull_docker_image(container)?; 125 | get_docker_image_id(container) 126 | })?; 127 | 128 | // Produce name and tag of modified container 129 | let modified_container = Container { 130 | name: format!("{}-u{}_g{}", container.name, u, g), 131 | tag: format!("from_{}", image_id), 132 | }; 133 | 134 | info!("Using container {}", modified_container); 135 | 136 | // Try to find image id of modified container 137 | // If we fail we need to build it 138 | match get_docker_image_id(&modified_container) { 139 | Ok(id) => { 140 | info!("Found container {}, image id is {}", modified_container, id); 141 | } 142 | Err(_) => { 143 | let instructions: Vec = 144 | vec![ 145 | format!("FROM {}", container), 146 | "USER root".into(), 147 | format!("RUN groupmod -g {} lal && usermod -u {} lal", g, u), 148 | "USER lal".into(), 149 | ]; 150 | info!("Attempting to build container {}...", modified_container); 151 | build_docker_image(&modified_container, instructions)?; 152 | } 153 | }; 154 | trace!("Fixup for user {}:{} succeeded", u, g); 155 | Ok(modified_container) 156 | } 157 | 158 | /// Runs an arbitrary command in the configured docker environment 159 | /// 160 | /// This will mount the current directory as `~/volume` as well as a few conveniences, 161 | /// and absorb the `Stdio` supplied by this `Command`. 162 | /// 163 | /// This is the most general function, used by both `lal build` and `lal shell`. 164 | pub fn docker_run( 165 | cfg: &Config, 166 | container: &Container, 167 | command: Vec, 168 | flags: &DockerRunFlags, 169 | modes: &ShellModes, 170 | ) -> LalResult<()> { 171 | 172 | let mut modified_container_option: Option = None; 173 | 174 | trace!("Performing docker permission sanity check"); 175 | if let Err(e) = permission_sanity_check() { 176 | match e { 177 | CliError::DockerPermissionSafety(_, u, g) => { 178 | if u == 0 { 179 | // Do not run as root 180 | return Err(CliError::DockerPermissionSafety("Cannot run container as root user" 181 | .into(), 182 | u, 183 | g)); 184 | } 185 | modified_container_option = Some(fixup_docker_container(container, u, g)?); 186 | } 187 | x => { 188 | return Err(x); 189 | } 190 | } 191 | }; 192 | 193 | // Shadow container here 194 | let container = modified_container_option.as_ref().unwrap_or(container); 195 | 196 | trace!("Finding home and cwd"); 197 | let home = env::home_dir().unwrap(); // crash if no $HOME 198 | let pwd = env::current_dir().unwrap(); 199 | 200 | // construct arguments vector 201 | let mut args: Vec = vec!["run".into(), "--rm".into()]; 202 | for mount in cfg.mounts.clone() { 203 | trace!(" - mounting {}", mount.src); 204 | args.push("-v".into()); 205 | let mnt = format!("{}:{}{}", 206 | mount.src, 207 | mount.dest, 208 | if mount.readonly { ":ro" } else { "" }); 209 | args.push(mnt); 210 | } 211 | trace!(" - mounting {}", pwd.display()); 212 | args.push("-v".into()); 213 | args.push(format!("{}:/home/lal/volume", pwd.display())); 214 | 215 | // X11 forwarding 216 | if modes.x11_forwarding { 217 | // requires calling `xhost local:docker` first 218 | args.push("-v".into()); 219 | args.push("/tmp/.X11-unix:/tmp/.X11-unix:ro".into()); 220 | args.push("--env=DISPLAY".into()); 221 | args.push("-v".into()); 222 | // xauth also needed for `ssh -X` through `lal -X` 223 | args.push(format!("{}/.Xauthority:/home/lal/.Xauthority:ro", home.display())); 224 | // QT compat 225 | args.push("--env=QT_X11_NO_MITSHM=1".into()); 226 | } 227 | if modes.host_networking { 228 | // also needed for for `ssh -X` into `lal -X` 229 | args.push("--net=host".into()); 230 | } 231 | for var in modes.env_vars.clone() { 232 | args.push(format!("--env={}", var)); 233 | } 234 | 235 | if flags.privileged { 236 | args.push("--privileged".into()) 237 | } 238 | 239 | args.push("-w".into()); 240 | args.push("/home/lal/volume".into()); 241 | args.push("--user".into()); 242 | args.push("lal".into()); 243 | 244 | // If no command, then override entrypoint to /bin/bash 245 | // This happens when we use `lal shell` without args 246 | if command.is_empty() { 247 | args.push("--entrypoint".into()); 248 | args.push("/bin/bash".into()); 249 | } 250 | args.push((if flags.interactive { "-it" } else { "-t" }).into()); 251 | 252 | args.push(format!("{}:{}", container.name, container.tag)); 253 | for c in command { 254 | args.push(c); 255 | } 256 | 257 | // run or print docker command 258 | if modes.printonly { 259 | print!("docker"); 260 | for arg in args { 261 | if arg.contains(' ') { 262 | // leave quoted args quoted 263 | print!(" \"{}\"", arg); 264 | } else { 265 | print!(" {}", arg); 266 | } 267 | } 268 | println!(""); 269 | } else { 270 | trace!("Entering docker"); 271 | let s = Command::new("docker").args(&args).status()?; 272 | trace!("Exited docker"); 273 | if !s.success() { 274 | return Err(CliError::SubprocessFailure(s.code().unwrap_or(1001))); 275 | } 276 | } 277 | Ok(()) 278 | } 279 | 280 | /// Various ways to invoke `docker_run` 281 | #[derive(Default, Clone)] 282 | pub struct ShellModes { 283 | /// Just print the command used rather than do it 284 | pub printonly: bool, 285 | /// Attempt to forward the X11 socket and all it needs 286 | pub x11_forwarding: bool, 287 | /// Host networking 288 | pub host_networking: bool, 289 | /// Environment variables 290 | pub env_vars: Vec, 291 | } 292 | 293 | 294 | 295 | /// Mounts and enters `.` in an interactive bash shell using the configured container. 296 | /// 297 | /// If a command vector is given, this is called non-interactively instead of /bin/bash 298 | /// You can thus do `lal shell ./BUILD target` or ``lal shell bash -c "cmd1; cmd2"` 299 | pub fn shell( 300 | cfg: &Config, 301 | container: &Container, 302 | modes: &ShellModes, 303 | cmd: Option>, 304 | privileged: bool, 305 | ) -> LalResult<()> { 306 | if !modes.printonly { 307 | info!("Entering {}", container); 308 | } 309 | 310 | let flags = DockerRunFlags { 311 | interactive: cmd.is_none() || cfg.interactive, 312 | privileged: privileged, 313 | }; 314 | let mut bash = vec![]; 315 | if let Some(cmdu) = cmd { 316 | for c in cmdu { 317 | bash.push(c.to_string()) 318 | } 319 | } 320 | docker_run(cfg, container, bash, &flags, modes) 321 | } 322 | 323 | /// Runs a script in `.lal/scripts/` with supplied arguments in a docker shell 324 | /// 325 | /// This is a convenience helper for running things that aren't builds. 326 | /// E.g. `lal run my-large-test RUNONLY=foo` 327 | pub fn script( 328 | cfg: &Config, 329 | container: &Container, 330 | name: &str, 331 | args: Vec<&str>, 332 | modes: &ShellModes, 333 | privileged: bool, 334 | ) -> LalResult<()> { 335 | let pth = Path::new(".").join(".lal").join("scripts").join(&name); 336 | if !pth.exists() { 337 | return Err(CliError::MissingScript(name.into())); 338 | } 339 | 340 | let flags = DockerRunFlags { 341 | interactive: cfg.interactive, 342 | privileged: privileged, 343 | }; 344 | 345 | // Simply run the script by adding on the arguments 346 | let cmd = vec![ 347 | "bash".into(), 348 | "-c".into(), 349 | format!("source {}; main {}", pth.display(), args.join(" ")), 350 | ]; 351 | Ok(docker_run(cfg, container, cmd, &flags, modes)?) 352 | } 353 | -------------------------------------------------------------------------------- /src/stash.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | 3 | use storage::CachedBackend; 4 | use super::{CliError, LalResult, Manifest, Lockfile}; 5 | 6 | 7 | /// Saves current build `./OUTPUT` to the local cache under a specific name 8 | /// 9 | /// This tars up `/OUTPUT` similar to how `build` is generating a tarball, 10 | /// then copies this to `~/.lal/cache/stash/${name}/`. 11 | /// 12 | /// This file can then be installed via `update` using a component=${name} argument. 13 | pub fn stash(backend: &T, mf: &Manifest, name: &str) -> LalResult<()> { 14 | info!("Stashing OUTPUT into cache under {}/{}", mf.name, name); 15 | // sanity: verify name does NOT parse as a u32 16 | if let Ok(n) = name.parse::() { 17 | return Err(CliError::InvalidStashName(n)); 18 | } 19 | 20 | let outputdir = Path::new("./OUTPUT"); 21 | if !outputdir.is_dir() { 22 | return Err(CliError::MissingBuild); 23 | } 24 | 25 | // convenience edit for lal status here: 26 | // we edit the lockfile's version key to be "${stashname}" 27 | // rather than the ugly colony default of "EXPERIMENTAL-${hex}" 28 | // stashed builds are only used locally so this allows easier inspection 29 | // full version list is available in `lal ls -f` 30 | let lf_path = Path::new("OUTPUT").join("lockfile.json"); 31 | let mut lf = Lockfile::from_path(&lf_path, &mf.name)?; 32 | lf.version = name.to_string(); 33 | lf.write(&lf_path)?; 34 | 35 | // main operation: 36 | backend.stash_output(&mf.name, name)?; 37 | 38 | Ok(()) 39 | } 40 | -------------------------------------------------------------------------------- /src/status.rs: -------------------------------------------------------------------------------- 1 | use ansi_term::{Colour, ANSIString}; 2 | use core::input; 3 | use super::{Lockfile, CliError, LalResult, Manifest}; 4 | 5 | fn version_string(lf: Option<&Lockfile>, show_ver: bool, show_time: bool) -> ANSIString<'static> { 6 | if let Some(lock) = lf { 7 | let ver_color = if lock.version.parse::().is_ok() { 12 } else { 11 }; 8 | let verstr = Colour::Fixed(ver_color) 9 | .paint(format!("({}-{})", lock.version, lock.environment.clone())); 10 | let timestr = if let Some(ref time) = lock.built { 11 | Colour::Fixed(14).paint(format!("({})", time)) 12 | } else { 13 | ANSIString::from("") 14 | }; 15 | if !show_ver && !show_time { 16 | ANSIString::from("") 17 | } else if show_ver && !show_time { 18 | verstr 19 | } else if !show_ver && show_time { 20 | timestr 21 | } else { 22 | ANSIString::from(format!("{} {}", verstr, timestr)) 23 | } 24 | } else { 25 | ANSIString::from("") 26 | } 27 | } 28 | 29 | fn status_recurse( 30 | dep: &str, 31 | lf: &Lockfile, 32 | n: usize, 33 | parent_indent: Vec, 34 | show_ver: bool, 35 | show_time: bool, 36 | ) { 37 | assert_eq!(dep, &lf.name); 38 | let len = lf.dependencies.len(); 39 | for (i, (k, sublock)) in lf.dependencies.iter().enumerate() { 40 | let has_children = !sublock.dependencies.is_empty(); 41 | let fork_char = if has_children { "┬" } else { "─" }; 42 | let is_last = i == len - 1; 43 | let turn_char = if is_last { "└" } else { "├" }; 44 | 45 | let ws: String = parent_indent.iter().fold(String::new(), |res, &ws_only| { 46 | res + (if ws_only { " " } else { "│ " }) 47 | }); 48 | 49 | println!("│ {}{}─{} {} {}", 50 | ws, 51 | turn_char, 52 | fork_char, 53 | k, 54 | version_string(Some(sublock), show_ver, show_time)); 55 | 56 | let mut next_indent = parent_indent.clone(); 57 | next_indent.push(is_last); 58 | 59 | status_recurse(k, sublock, n + 1, next_indent, show_ver, show_time); 60 | } 61 | } 62 | 63 | /// Prints a fancy dependency tree of `./INPUT` to stdout. 64 | /// 65 | /// This is the quick version information of what you currently have in `./INPUT`. 66 | /// It prints the tree and highlights versions, as well as both missing and extraneous 67 | /// dependencies in `./INPUT`. 68 | /// 69 | /// If the full flag is given, then the full dependency tree is also spliced in 70 | /// from lockfile data. 71 | /// 72 | /// It is not intended as a verifier, but will nevertheless produce a summary at the end. 73 | pub fn status(manifest: &Manifest, full: bool, show_ver: bool, show_time: bool) -> LalResult<()> { 74 | let mut error = None; 75 | 76 | let lf = Lockfile::default().populate_from_input()?; 77 | 78 | println!("{}", manifest.name); 79 | let deps = input::analyze_full(manifest)?; 80 | let len = deps.len(); 81 | for (i, (d, dep)) in deps.iter().enumerate() { 82 | let notes = if dep.missing && !dep.development { 83 | error = Some(CliError::MissingDependencies); 84 | Colour::Red.paint("(missing)").to_string() 85 | } else if dep.missing { 86 | Colour::Yellow.paint("(missing)").to_string() 87 | } else if dep.development { 88 | "(dev)".to_string() 89 | } else if dep.extraneous { 90 | error = Some(CliError::ExtraneousDependencies(dep.name.clone())); 91 | Colour::Green.paint("(extraneous)").to_string() 92 | } else { 93 | "".to_string() 94 | }; 95 | // list children in --full mode 96 | // NB: missing deps will not be populatable 97 | let has_children = full && !dep.missing && 98 | !&lf.dependencies[&dep.name].dependencies.is_empty(); 99 | let fork_char = if has_children { "┬" } else { "─" }; 100 | let is_last = i == len - 1; 101 | let turn_char = if is_last { "└" } else { "├" }; 102 | 103 | // first level deps are formatted with more metadata 104 | let level1 = format!("{} {}", d, notes); 105 | let ver_str = version_string(lf.dependencies.get(&dep.name), show_ver, show_time); 106 | println!("{}─{} {} {}", turn_char, fork_char, level1, ver_str); 107 | 108 | if has_children { 109 | trace!("Attempting to get {} out of lockfile deps {:?}", 110 | dep.name, 111 | lf.dependencies); 112 | // dep unwrap relies on populate_from_input try! reading all lockfiles earlier 113 | let sub_lock = &lf.dependencies[&dep.name]; 114 | status_recurse(&dep.name, sub_lock, 1, vec![], show_ver, show_time); 115 | } 116 | } 117 | 118 | // Return one of the errors as the main one (no need to vectorize these..) 119 | if let Some(e) = error { 120 | return Err(e); 121 | } 122 | Ok(()) 123 | } 124 | -------------------------------------------------------------------------------- /src/storage/artifactory.rs: -------------------------------------------------------------------------------- 1 | #![allow(missing_docs)] 2 | 3 | use std::vec::Vec; 4 | use std::io::{Read, Write}; 5 | use std::fs::File; 6 | use std::path::{Path, PathBuf}; 7 | 8 | #[cfg(feature = "upgrade")] 9 | use semver::Version; 10 | 11 | use serde_json; 12 | use sha1; 13 | use hyper::{self, Client}; 14 | use hyper::net::HttpsConnector; 15 | use hyper::header::{Authorization, Basic}; 16 | use hyper::status::StatusCode; 17 | use hyper_native_tls::NativeTlsClient; 18 | 19 | use core::{CliError, LalResult}; 20 | 21 | 22 | /// Artifactory credentials 23 | #[derive(Serialize, Deserialize, Clone)] 24 | pub struct Credentials { 25 | /// Upload username 26 | pub username: String, 27 | /// Upload password 28 | pub password: String, 29 | } 30 | 31 | /// Static Artifactory locations 32 | #[derive(Serialize, Deserialize, Clone, Default)] 33 | pub struct ArtifactoryConfig { 34 | /// Location of artifactory API master (for API queries) 35 | pub master: String, 36 | /// Location of artifactory slave (for fetching artifacts) 37 | pub slave: String, 38 | /// Release group name (for API queries) 39 | pub release: String, 40 | /// Virtual group (for downloads) 41 | pub vgroup: String, 42 | /// Optional publish credentials 43 | pub credentials: Option, 44 | } 45 | 46 | 47 | // Need these to query for stored artifacts: 48 | // This query has tons of info, but we only care about the version 49 | // And the version is encoded in children.uri with leading slash 50 | #[derive(Deserialize)] 51 | struct ArtifactoryVersion { 52 | uri: String, // folder: bool, 53 | } 54 | #[derive(Deserialize)] 55 | struct ArtifactoryStorageResponse { 56 | children: Vec, 57 | } 58 | 59 | // simple request body fetcher 60 | fn hyper_req(url: &str) -> LalResult { 61 | let client = Client::with_connector(HttpsConnector::new(NativeTlsClient::new().unwrap())); 62 | let mut res = client.get(url).send()?; 63 | if res.status != hyper::Ok { 64 | return Err(CliError::BackendFailure(format!("GET request with {}", res.status))); 65 | } 66 | let mut body = String::new(); 67 | res.read_to_string(&mut body)?; 68 | Ok(body) 69 | } 70 | 71 | // simple request downloader 72 | pub fn http_download_to_path(url: &str, save: &PathBuf) -> LalResult<()> { 73 | debug!("GET {}", url); 74 | let client = Client::with_connector(HttpsConnector::new(NativeTlsClient::new().unwrap())); 75 | let mut res = client.get(url).send()?; 76 | if res.status != hyper::Ok { 77 | return Err(CliError::BackendFailure(format!("GET request with {}", res.status))); 78 | } 79 | 80 | if cfg!(feature = "progress") { 81 | #[cfg(feature = "progress")] 82 | { 83 | use indicatif::{ProgressBar, ProgressStyle}; 84 | let total_size = res.headers.get::().unwrap().0; 85 | let mut downloaded = 0; 86 | let mut buffer = [0; 1024 * 64]; 87 | let mut f = File::create(save)?; 88 | let pb = ProgressBar::new(total_size); 89 | pb.set_style(ProgressStyle::default_bar() 90 | .template("{bar:40.yellow/black} {bytes}/{total_bytes} ({eta})")); 91 | 92 | while downloaded < total_size { 93 | let read = res.read(&mut buffer)?; 94 | f.write_all(&buffer[0..read])?; 95 | downloaded += read as u64; 96 | pb.set_position(downloaded); 97 | } 98 | f.flush()?; 99 | } 100 | } else { 101 | let mut buffer: Vec = Vec::new(); 102 | res.read_to_end(&mut buffer)?; 103 | let mut f = File::create(save)?; 104 | f.write_all(&buffer)?; 105 | } 106 | Ok(()) 107 | } 108 | 109 | 110 | /// Query the Artifactory storage api 111 | /// 112 | /// This will get, then parse all results as u32s, and return this list. 113 | /// This assumes versoning is done via a single integer. 114 | fn get_storage_versions(uri: &str) -> LalResult> { 115 | debug!("GET {}", uri); 116 | 117 | let resp = hyper_req(uri) 118 | .map_err(|e| { 119 | warn!("Failed to GET {}: {}", uri, e); 120 | CliError::BackendFailure("No version information found on API".into()) 121 | })?; 122 | 123 | trace!("Got body {}", resp); 124 | 125 | let res: ArtifactoryStorageResponse = serde_json::from_str(&resp)?; 126 | let mut builds: Vec = res.children 127 | .iter() 128 | .map(|r| r.uri.as_str()) 129 | .map(|r| r.trim_matches('/')) 130 | .filter_map(|b| b.parse().ok()) 131 | .collect(); 132 | builds.sort_by(|a, b| b.cmp(a)); // sort by version number descending 133 | Ok(builds) 134 | } 135 | 136 | // artifactory extra headers 137 | header! {(XCheckSumDeploy, "X-Checksum-Deploy") => [String]} 138 | header! {(XCheckSumSha1, "X-Checksum-Sha1") => [String]} 139 | 140 | /// Upload a tarball to artifactory 141 | /// 142 | /// This is using a http basic auth PUT to artifactory using config credentials. 143 | fn upload_artifact(arti: &ArtifactoryConfig, uri: &str, f: &mut File) -> LalResult<()> { 144 | if let Some(creds) = arti.credentials.clone() { 145 | let client = Client::new(); 146 | 147 | let mut buffer: Vec = Vec::new(); 148 | f.read_to_end(&mut buffer)?; 149 | 150 | let full_uri = format!("{}/{}/{}", arti.slave, arti.release, uri); 151 | 152 | let mut sha = sha1::Sha1::new(); 153 | sha.update(&buffer); 154 | 155 | let auth = Authorization(Basic { 156 | username: creds.username, 157 | password: Some(creds.password), 158 | }); 159 | 160 | // upload the artifact 161 | info!("PUT {}", full_uri); 162 | let resp = client.put(&full_uri[..]).header(auth.clone()).body(&buffer[..]).send()?; 163 | debug!("resp={:?}", resp); 164 | let respstr = format!("{} from PUT {}", resp.status, full_uri); 165 | if resp.status != StatusCode::Created { 166 | return Err(CliError::UploadFailure(respstr)); 167 | } 168 | debug!("{}", respstr); 169 | 170 | // do another request to get the hash on artifactory 171 | // jfrog api does not allow do do both at once - and this also creates the md5 (somehow) 172 | // this creates ${full_uri}.sha1 and ${full_uri}.md5 (although we just gave it the sha..) 173 | // This `respsha` can fail if engci-maven becomes inconsistent. NotFound has been seen. 174 | // And that makes no sense because the above must have returned Created to get here.. 175 | info!("PUT {} (X-Checksum-Sha1)", full_uri); 176 | let respsha = client 177 | .put(&full_uri[..]) 178 | .header(XCheckSumDeploy("true".into())) 179 | .header(XCheckSumSha1(sha.digest().to_string())) 180 | .header(auth) 181 | .send()?; 182 | debug!("respsha={:?}", respsha); 183 | let respshastr = format!("{} from PUT {} (X-Checksum-Sha1)", respsha.status, full_uri); 184 | if respsha.status != StatusCode::Created { 185 | return Err(CliError::UploadFailure(respshastr)); 186 | } 187 | debug!("{}", respshastr); 188 | 189 | Ok(()) 190 | } else { 191 | Err(CliError::MissingBackendCredentials) 192 | } 193 | } 194 | 195 | /// Get the maximal version number from the storage api 196 | fn get_storage_as_u32(uri: &str) -> LalResult { 197 | if let Some(&latest) = get_storage_versions(uri)?.iter().max() { 198 | Ok(latest) 199 | } else { 200 | Err(CliError::BackendFailure("No version information found on API".into())) 201 | } 202 | } 203 | 204 | // The URL for a component tarball under the one of the environment trees 205 | fn get_dependency_env_url( 206 | art_cfg: &ArtifactoryConfig, 207 | name: &str, 208 | version: u32, 209 | env: &str, 210 | ) -> String { 211 | let tar_url = format!("{}/{}/env/{}/{}/{}/{}.tar.gz", 212 | art_cfg.slave, 213 | art_cfg.vgroup, 214 | env, 215 | name, 216 | version.to_string(), 217 | name); 218 | 219 | trace!("Inferring tarball location as {}", tar_url); 220 | tar_url 221 | } 222 | 223 | fn get_dependency_url_latest( 224 | art_cfg: &ArtifactoryConfig, 225 | name: &str, 226 | env: &str, 227 | ) -> LalResult { 228 | let url = format!("{}/api/storage/{}/{}/{}/{}", 229 | art_cfg.master, 230 | art_cfg.release, 231 | "env", 232 | env, 233 | name); 234 | let v = get_storage_as_u32(&url)?; 235 | 236 | debug!("Found latest version as {}", v); 237 | Ok(Component { 238 | location: get_dependency_env_url(art_cfg, name, v, env), 239 | version: v, 240 | name: name.into(), 241 | }) 242 | } 243 | 244 | // This queries the API for the default location 245 | // if a default exists, then all our current multi-builds must exist 246 | fn get_latest_versions(art_cfg: &ArtifactoryConfig, name: &str, env: &str) -> LalResult> { 247 | let url = format!("{}/api/storage/{}/{}/{}/{}", 248 | art_cfg.master, 249 | art_cfg.release, 250 | "env", 251 | env, 252 | name); 253 | 254 | get_storage_versions(&url) 255 | } 256 | 257 | /// Main entry point for install 258 | fn get_tarball_uri( 259 | art_cfg: &ArtifactoryConfig, 260 | name: &str, 261 | version: Option, 262 | env: &str, 263 | ) -> LalResult { 264 | if let Some(v) = version { 265 | Ok(Component { 266 | location: get_dependency_env_url(art_cfg, name, v, env), 267 | version: v, 268 | name: name.into(), 269 | }) 270 | } else { 271 | get_dependency_url_latest(art_cfg, name, env) 272 | } 273 | } 274 | 275 | /// Latest lal version - as seen on artifactory 276 | #[cfg(feature = "upgrade")] 277 | pub struct LatestLal { 278 | /// URL of the latest tarball 279 | pub url: String, 280 | /// Semver::Version of the latest tarball 281 | pub version: Version, 282 | } 283 | 284 | /// Entry point for `lal::upgrade` 285 | /// 286 | /// This mostly duplicates the behaviour in `get_storage_as_u32`, however, 287 | /// it is parsing the version as a `semver::Version` struct rather than a u32. 288 | /// This is used regardless of your used backend because we want people to use our 289 | /// main release of lal on CME-release on cisco artifactory at the moment. 290 | #[cfg(feature = "upgrade")] 291 | pub fn get_latest_lal_version() -> LalResult { 292 | // canonical latest url 293 | let uri = "https://engci-maven-master.cisco.com/artifactory/api/storage/CME-release/lal"; 294 | debug!("GET {}", uri); 295 | let resp = hyper_req(uri) 296 | .map_err(|e| { 297 | warn!("Failed to GET {}: {}", uri, e); 298 | CliError::BackendFailure("No version information found on API".into()) 299 | })?; 300 | trace!("Got body {}", resp); 301 | 302 | let res: ArtifactoryStorageResponse = serde_json::from_str(&resp)?; 303 | let latest: Option = res.children 304 | .iter() 305 | .map(|r| r.uri.trim_matches('/').to_string()) 306 | .inspect(|v| trace!("Found lal version {}", v)) 307 | .filter_map(|v| Version::parse(&v).ok()) 308 | .max(); // Semver::Version implements an order 309 | 310 | if let Some(l) = latest { 311 | Ok(LatestLal { 312 | version: l.clone(), 313 | url: format!("https://engci-maven.cisco.com/artifactory/CME-group/lal/{}/lal.tar.gz", 314 | l), 315 | }) 316 | } else { 317 | warn!("Failed to parse version information from artifactory storage api for lal"); 318 | Err(CliError::BackendFailure("No version information found on API".into())) 319 | } 320 | } 321 | 322 | use super::{Backend, Component}; 323 | 324 | /// Everything we need for Artifactory to implement the Backend trait 325 | pub struct ArtifactoryBackend { 326 | /// Artifactory config and credentials 327 | pub config: ArtifactoryConfig, 328 | /// Cache directory 329 | pub cache: String, 330 | } 331 | 332 | impl ArtifactoryBackend { 333 | pub fn new(cfg: &ArtifactoryConfig, cache: &str) -> Self { 334 | // TODO: create hyper clients in here rather than once per download 335 | ArtifactoryBackend { 336 | config: cfg.clone(), 337 | cache: cache.into(), 338 | } 339 | } 340 | } 341 | 342 | /// Artifact backend trait for `ArtifactoryBackend` 343 | /// 344 | /// This is intended to be used by the caching trait `CachedBackend`, but for 345 | /// specific low-level use cases, these methods can be used directly. 346 | impl Backend for ArtifactoryBackend { 347 | fn get_versions(&self, name: &str, loc: &str) -> LalResult> { 348 | get_latest_versions(&self.config, name, loc) 349 | } 350 | 351 | fn get_latest_version(&self, name: &str, loc: &str) -> LalResult { 352 | let latest = get_dependency_url_latest(&self.config, name, loc)?; 353 | Ok(latest.version) 354 | } 355 | 356 | fn get_component_info( 357 | &self, 358 | name: &str, 359 | version: Option, 360 | loc: &str, 361 | ) -> LalResult { 362 | get_tarball_uri(&self.config, name, version, loc) 363 | } 364 | 365 | fn publish_artifact(&self, name: &str, version: u32, env: &str) -> LalResult<()> { 366 | // this fn basically assumes all the sanity checks have been performed 367 | // files must exist and lockfile must be sensible 368 | let artdir = Path::new("./ARTIFACT"); 369 | let tarball = artdir.join(format!("{}.tar.gz", name)); 370 | let lockfile = artdir.join("lockfile.json"); 371 | 372 | // uri prefix if specific env upload 373 | let prefix = format!("env/{}/", env); 374 | 375 | let tar_uri = format!("{}{}/{}/{}.tar.gz", prefix, name, version, name); 376 | let mut tarf = File::open(tarball)?; 377 | upload_artifact(&self.config, &tar_uri, &mut tarf)?; 378 | 379 | let mut lockf = File::open(lockfile)?; 380 | let lf_uri = format!("{}{}/{}/lockfile.json", prefix, name, version); 381 | upload_artifact(&self.config, &lf_uri, &mut lockf)?; 382 | Ok(()) 383 | } 384 | 385 | fn get_cache_dir(&self) -> String { self.cache.clone() } 386 | 387 | fn raw_fetch(&self, url: &str, dest: &PathBuf) -> LalResult<()> { 388 | http_download_to_path(url, dest) 389 | } 390 | } 391 | -------------------------------------------------------------------------------- /src/storage/download.rs: -------------------------------------------------------------------------------- 1 | use std::fs; 2 | use std::path::{Path, PathBuf}; 3 | 4 | use storage::{Backend, CachedBackend, Component}; 5 | use core::{CliError, LalResult, output}; 6 | 7 | fn is_cached(backend: &T, name: &str, version: u32, env: &str) -> bool { 8 | get_cache_dir(backend, name, version, env).is_dir() 9 | } 10 | 11 | fn get_cache_dir(backend: &T, name: &str, version: u32, env: &str) -> PathBuf { 12 | let cache = backend.get_cache_dir(); 13 | Path::new(&cache).join("environments").join(env).join(name).join(version.to_string()) 14 | } 15 | 16 | fn store_tarball( 17 | backend: &T, 18 | name: &str, 19 | version: u32, 20 | env: &str, 21 | ) -> Result<(), CliError> { 22 | // 1. mkdir -p cacheDir/$name/$version 23 | let destdir = get_cache_dir(backend, name, version, env); 24 | if !destdir.is_dir() { 25 | fs::create_dir_all(&destdir)?; 26 | } 27 | // 2. stuff $PWD/$name.tar.gz in there 28 | let tarname = [name, ".tar.gz"].concat(); 29 | let dest = Path::new(&destdir).join(&tarname); 30 | let src = Path::new(".").join(&tarname); 31 | if !src.is_file() { 32 | return Err(CliError::MissingTarball); 33 | } 34 | debug!("Move {:?} -> {:?}", src, dest); 35 | fs::copy(&src, &dest)?; 36 | fs::remove_file(&src)?; 37 | 38 | Ok(()) 39 | } 40 | 41 | // helper for the unpack_ functions 42 | fn extract_tarball_to_input(tarname: PathBuf, component: &str) -> LalResult<()> { 43 | use tar::Archive; 44 | use flate2::read::GzDecoder; 45 | 46 | let extract_path = Path::new("./INPUT").join(component); 47 | let _ = fs::remove_dir_all(&extract_path); // remove current dir if exists 48 | fs::create_dir_all(&extract_path)?; 49 | 50 | // Open file, conditionally wrap a progress bar around the file reading 51 | if cfg!(feature = "progress") { 52 | #[cfg(feature = "progress")] 53 | { 54 | use super::progress::ProgressReader; 55 | let data = fs::File::open(tarname)?; 56 | let progdata = ProgressReader::new(data)?; 57 | let decompressed = GzDecoder::new(progdata)?; // decoder reads data (proxied) 58 | let mut archive = Archive::new(decompressed); // Archive reads decoded 59 | archive.unpack(&extract_path)?; 60 | } 61 | } else { 62 | let data = fs::File::open(tarname)?; 63 | let decompressed = GzDecoder::new(data)?; // decoder reads data 64 | let mut archive = Archive::new(decompressed); // Archive reads decoded 65 | archive.unpack(&extract_path)?; 66 | }; 67 | 68 | Ok(()) 69 | } 70 | 71 | /// Cacheable trait implemented for all Backends. 72 | /// 73 | /// As long as we have the Backend trait implemented, we can add a caching layer 74 | /// around this, which implements the basic compression ops and file gymnastics. 75 | /// 76 | /// Most subcommands should be OK with just using this trait rather than using 77 | /// `Backend` directly as this does the stuff you normally would want done. 78 | impl CachedBackend for T 79 | where 80 | T: Backend, 81 | { 82 | /// Get the latest versions of a component across all supported environments 83 | /// 84 | /// Because the versions have to be available in all environments, these numbers may 85 | /// not contain the highest numbers available on specific environments. 86 | fn get_latest_supported_versions( 87 | &self, 88 | name: &str, 89 | environments: Vec, 90 | ) -> LalResult> { 91 | use std::collections::BTreeSet; 92 | let mut result = BTreeSet::new(); 93 | let mut first_pass = true; 94 | for e in environments { 95 | let eres: BTreeSet<_> = self.get_versions(name, &e)?.into_iter().take(100).collect(); 96 | info!("Last versions for {} in {} env is {:?}", name, e, eres); 97 | if first_pass { 98 | // if first pass, can't take intersection with something empty, start with first result 99 | result = eres; 100 | first_pass = false; 101 | } else { 102 | result = result.clone().intersection(&eres).cloned().collect(); 103 | } 104 | } 105 | debug!("Intersection of allowed versions {:?}", result); 106 | Ok(result.into_iter().collect()) 107 | } 108 | 109 | /// Locate a proper component, downloading it and caching if necessary 110 | fn retrieve_published_component( 111 | &self, 112 | name: &str, 113 | version: Option, 114 | env: &str, 115 | ) -> LalResult<(PathBuf, Component)> { 116 | trace!("Locate component {}", name); 117 | 118 | let component = self.get_component_info(name, version, env)?; 119 | 120 | if !is_cached(self, &component.name, component.version, env) { 121 | // download to PWD then move it to stash immediately 122 | let local_tarball = Path::new(".").join(format!("{}.tar.gz", name)); 123 | self.raw_fetch(&component.location, &local_tarball)?; 124 | store_tarball(self, name, component.version, env)?; 125 | } 126 | assert!(is_cached(self, &component.name, component.version, env), 127 | "cached component"); 128 | 129 | trace!("Fetching {} from cache", name); 130 | let tarname = get_cache_dir(self, &component.name, component.version, env) 131 | .join(format!("{}.tar.gz", name)); 132 | Ok((tarname, component)) 133 | } 134 | 135 | // basic functionality for `fetch`/`update` 136 | fn unpack_published_component( 137 | &self, 138 | name: &str, 139 | version: Option, 140 | env: &str, 141 | ) -> LalResult { 142 | let (tarname, component) = self.retrieve_published_component(name, version, env)?; 143 | 144 | debug!("Unpacking tarball {} for {}", 145 | tarname.to_str().unwrap(), 146 | component.name); 147 | extract_tarball_to_input(tarname, name)?; 148 | 149 | Ok(component) 150 | } 151 | 152 | /// helper for `update` 153 | fn unpack_stashed_component(&self, name: &str, code: &str) -> LalResult<()> { 154 | let tarpath = self.retrieve_stashed_component(name, code)?; 155 | 156 | extract_tarball_to_input(tarpath, name)?; 157 | Ok(()) 158 | } 159 | 160 | /// helper for unpack_, `export` 161 | fn retrieve_stashed_component(&self, name: &str, code: &str) -> LalResult { 162 | let tarpath = Path::new(&self.get_cache_dir()) 163 | .join("stash") 164 | .join(name) 165 | .join(code) 166 | .join(format!("{}.tar.gz", name)); 167 | if !tarpath.is_file() { 168 | return Err(CliError::MissingStashArtifact(format!("{}/{}", name, code))); 169 | } 170 | Ok(tarpath) 171 | } 172 | 173 | // helper for `stash` 174 | fn stash_output(&self, name: &str, code: &str) -> LalResult<()> { 175 | let destdir = Path::new(&self.get_cache_dir()).join("stash").join(name).join(code); 176 | debug!("Creating {:?}", destdir); 177 | fs::create_dir_all(&destdir)?; 178 | 179 | // Tar it straight into destination 180 | output::tar(&destdir.join(format!("{}.tar.gz", name)))?; 181 | 182 | // Copy the lockfile there for users inspecting the stashed folder 183 | // NB: this is not really needed, as it's included in the tarball anyway 184 | fs::copy("./OUTPUT/lockfile.json", destdir.join("lockfile.json"))?; 185 | Ok(()) 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /src/storage/local.rs: -------------------------------------------------------------------------------- 1 | #![allow(missing_docs)] 2 | 3 | use std::fs; 4 | use std::str::FromStr; 5 | use std::vec::Vec; 6 | use std::path::{Path, PathBuf}; 7 | 8 | use core::{CliError, LalResult, config_dir, ensure_dir_exists_fresh}; 9 | 10 | 11 | /// LocalBackend configuration options (currently none) 12 | #[derive(Serialize, Deserialize, Clone, Default)] 13 | pub struct LocalConfig {} 14 | 15 | use super::{Backend, Component}; 16 | 17 | /// Artifact storage on the local machine 18 | pub struct LocalBackend { 19 | /// Local config 20 | pub config: LocalConfig, 21 | /// Cache directory 22 | pub cache: String, 23 | } 24 | 25 | impl LocalBackend { 26 | pub fn new(cfg: &LocalConfig, cache: &str) -> Self { 27 | LocalBackend { 28 | config: cfg.clone(), 29 | cache: cache.into(), 30 | } 31 | } 32 | } 33 | 34 | /// Artifact backend trait for `LocalBackend` 35 | /// 36 | /// This is intended to be used by the caching trait `CachedBackend`, but for 37 | /// specific low-level use cases, these methods can be used directly. 38 | impl Backend for LocalBackend { 39 | fn get_versions(&self, name: &str, loc: &str) -> LalResult> { 40 | let tar_dir = format!("{}/environments/{}/{}/", self.cache, loc, name); 41 | let dentries = fs::read_dir(config_dir().join(tar_dir)); 42 | let mut versions = vec![]; 43 | for entry in dentries? { 44 | let path = entry?; 45 | if let Some(filename) = path.file_name().to_str() { 46 | if let Ok(version) = u32::from_str(filename) { 47 | versions.push(version); 48 | } 49 | } 50 | } 51 | Ok(versions) 52 | } 53 | 54 | fn get_latest_version(&self, name: &str, loc: &str) -> LalResult { 55 | if let Some(&last) = self.get_versions(name, loc)?.last() { 56 | return Ok(last); 57 | } 58 | Err(CliError::BackendFailure("No versions found on local storage".into())) 59 | } 60 | 61 | fn get_component_info( 62 | &self, 63 | name: &str, 64 | version: Option, 65 | loc: &str, 66 | ) -> LalResult { 67 | info!("get_component_info: {} {:?} {}", name, version, loc); 68 | 69 | let v = if let Some(ver) = version { 70 | ver 71 | } else { 72 | self.get_latest_version(name, loc)? 73 | }; 74 | let loc = format!("{}/environments/{}/{}/{}/{}.tar.gz", self.cache, loc, name, v, name); 75 | Ok(Component { 76 | name: name.into(), 77 | version: v, 78 | location: loc, 79 | }) 80 | } 81 | 82 | fn publish_artifact(&self, name: &str, version: u32, env: &str) -> LalResult<()> { 83 | // this fn basically assumes all the sanity checks have been performed 84 | // files must exist and lockfile must be sensible 85 | let artifactdir = Path::new("./ARTIFACT"); 86 | let tarball = artifactdir.join(format!("{}.tar.gz", name)); 87 | let lockfile = artifactdir.join("lockfile.json"); 88 | 89 | // prefix with environment 90 | let tar_dir = format!("{}/environments/{}/{}/{}/", self.cache, env, name, version); 91 | let tar_path = format!("{}/environments/{}/{}/{}/{}.tar.gz", self.cache, env, name, version, name); 92 | let lock_path = format!("{}/environments/{}/{}/{}/lockfile.json", self.cache, env, name, version); 93 | 94 | if let Some(full_tar_dir) = config_dir().join(tar_dir).to_str() { 95 | ensure_dir_exists_fresh(full_tar_dir)?; 96 | } 97 | 98 | fs::copy(tarball, config_dir().join(tar_path))?; 99 | fs::copy(lockfile, config_dir().join(lock_path))?; 100 | 101 | Ok(()) 102 | } 103 | 104 | fn get_cache_dir(&self) -> String { self.cache.clone() } 105 | 106 | fn raw_fetch(&self, src: &str, dest: &PathBuf) -> LalResult<()> { 107 | debug!("raw fetch {} -> {}", src, dest.display()); 108 | fs::copy(src, dest)?; 109 | Ok(()) 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /src/storage/mod.rs: -------------------------------------------------------------------------------- 1 | pub use self::traits::{BackendConfiguration, Backend, CachedBackend, Component}; 2 | 3 | pub use self::artifactory::{ArtifactoryConfig, Credentials, ArtifactoryBackend}; 4 | pub use self::local::{LocalConfig, LocalBackend}; 5 | 6 | // Some special exports for lal upgrade - canonical releases are on artifactory atm 7 | #[cfg(feature = "upgrade")] 8 | pub use self::artifactory::{LatestLal, get_latest_lal_version, http_download_to_path}; 9 | 10 | mod traits; 11 | mod artifactory; 12 | mod local; 13 | mod download; 14 | 15 | #[cfg(feature = "progress")] 16 | mod progress; 17 | -------------------------------------------------------------------------------- /src/storage/progress.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::io::{Read, Seek, SeekFrom}; 3 | //use std::io::{Write}; 4 | use indicatif::{ProgressBar, ProgressStyle}; 5 | 6 | /// Wrapper around a `Read` that reports the progress made. 7 | /// 8 | /// Used to monitor slow IO readers 9 | /// Unfortunately cannot use this with http client yet as it does not implement seek 10 | pub struct ProgressReader { 11 | rdr: R, 12 | pb: ProgressBar, 13 | } 14 | 15 | /*pub fn copy_with_progress(progress: &ProgressBar, 16 | reader: &mut R, writer: &mut W) 17 | -> io::Result 18 | where R: Read, W: Write 19 | { 20 | let mut buf = [0; 16384]; 21 | let mut written = 0; 22 | loop { 23 | let len = match reader.read(&mut buf) { 24 | Ok(0) => return Ok(written), 25 | Ok(len) => len, 26 | Err(ref e) if e.kind() == io::ErrorKind::Interrupted => continue, 27 | Err(e) => return Err(e), 28 | }; 29 | writer.write_all(&buf[..len])?; 30 | written += len as u64; 31 | progress.inc(len as u64); 32 | } 33 | } 34 | */ 35 | impl ProgressReader { 36 | pub fn new(mut rdr: R) -> io::Result> { 37 | let len = rdr.seek(SeekFrom::End(0))?; 38 | rdr.seek(SeekFrom::Start(0))?; 39 | let pb = ProgressBar::new(len); 40 | pb.set_style(ProgressStyle::default_bar() 41 | .template("{bar:40.green/black} {bytes}/{total_bytes} ({eta})")); 42 | Ok(ProgressReader { rdr, pb }) 43 | } 44 | } 45 | 46 | /*impl ProgressReader { 47 | pub fn progress(&self) -> &ProgressBar { 48 | &self.pb 49 | } 50 | }*/ 51 | 52 | impl Read for ProgressReader { 53 | fn read(&mut self, buf: &mut [u8]) -> io::Result { 54 | let rv = self.rdr.read(buf)?; 55 | self.pb.inc(rv as u64); 56 | Ok(rv) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /src/storage/traits.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use core::LalResult; 4 | use super::{ArtifactoryConfig, LocalConfig}; 5 | 6 | /// An enum struct for the currently configured `Backend` 7 | /// 8 | /// Any other implementations should be imported and listed here. 9 | /// Currently only artifactory is supported. 10 | #[derive(Serialize, Deserialize, Clone)] 11 | pub enum BackendConfiguration { 12 | /// Config for the `ArtifactoryBackend` 13 | #[serde(rename = "artifactory")] 14 | Artifactory(ArtifactoryConfig), 15 | 16 | /// Config for the `LocalBackend` 17 | #[serde(rename = "local")] 18 | Local(LocalConfig), 19 | } 20 | 21 | /// Artifactory is the default backend 22 | impl Default for BackendConfiguration { 23 | fn default() -> Self { BackendConfiguration::Artifactory(ArtifactoryConfig::default()) } 24 | } 25 | 26 | 27 | /// The basic definition of a component as it exists online 28 | /// 29 | /// A component may have many build artifacts from many environments. 30 | pub struct Component { 31 | /// Name of the component 32 | pub name: String, 33 | /// Version number 34 | pub version: u32, 35 | /// The raw location of the component at the specified version number 36 | /// 37 | /// No restriction on how this information is encoded, but it must work with `raw_fetch` 38 | pub location: String, 39 | } 40 | 41 | /// Properties a storage backend of artifacts should have 42 | /// 43 | /// We are not really relying on Artifactory specific quirks in our default usage 44 | /// so that in case it fails it can be switched over. 45 | /// We do rely on there being a basic API that can implement this trait though. 46 | pub trait Backend { 47 | /// Get a list of versions for a component in descending order 48 | fn get_versions(&self, name: &str, loc: &str) -> LalResult>; 49 | /// Get the latest version of a component 50 | fn get_latest_version(&self, name: &str, loc: &str) -> LalResult; 51 | 52 | /// Get the version and location information of a component 53 | /// 54 | /// If no version is given, figure out what latest is 55 | fn get_component_info(&self, name: &str, ver: Option, loc: &str) -> LalResult; 56 | 57 | /// Publish a release build's ARTIFACT to a specific location 58 | /// 59 | /// This will publish everything inside the ARTIFACT dir created by `lal build -r` 60 | fn publish_artifact(&self, name: &str, version: u32, env: &str) -> LalResult<()>; 61 | 62 | /// Raw fetch of location to a destination 63 | /// 64 | /// location can be a HTTPS url / a system path / etc (depending on the backend) 65 | fn raw_fetch(&self, location: &str, dest: &PathBuf) -> LalResult<()>; 66 | 67 | /// Return the base directory to be used to dump cached downloads 68 | /// 69 | /// This has to be in here for `CachedBackend` to have a straight dependency 70 | fn get_cache_dir(&self) -> String; 71 | } 72 | 73 | /// A secondary trait that builds upon the Backend trait 74 | /// 75 | /// This wraps the common fetch commands in a caching layer on the cache dir. 76 | pub trait CachedBackend { 77 | /// Get the latest version of a component across all supported environments 78 | fn get_latest_supported_versions( 79 | &self, 80 | name: &str, 81 | environments: Vec, 82 | ) -> LalResult>; 83 | 84 | /// Retrieve the location to a cached published component (downloading if necessary) 85 | fn retrieve_published_component( 86 | &self, 87 | name: &str, 88 | version: Option, 89 | env: &str, 90 | ) -> LalResult<(PathBuf, Component)>; 91 | 92 | /// Retrieve the location to a stashed component 93 | fn retrieve_stashed_component(&self, name: &str, code: &str) -> LalResult; 94 | 95 | /// Retrieve and unpack a cached component in INPUT 96 | fn unpack_published_component( 97 | &self, 98 | name: &str, 99 | version: Option, 100 | env: &str, 101 | ) -> LalResult; 102 | 103 | /// Retrieve and unpack a stashed component to INPUT 104 | fn unpack_stashed_component(&self, name: &str, code: &str) -> LalResult<()>; 105 | 106 | /// Add a stashed component from a folder 107 | fn stash_output(&self, name: &str, code: &str) -> LalResult<()>; 108 | } 109 | -------------------------------------------------------------------------------- /src/update.rs: -------------------------------------------------------------------------------- 1 | use storage::CachedBackend; 2 | use super::{LalResult, Manifest, CliError}; 3 | 4 | /// Update specific dependencies outside the manifest 5 | /// 6 | /// Multiple "components=version" strings can be supplied, where the version is optional. 7 | /// If no version is supplied, latest is fetched. 8 | /// 9 | /// If installation was successful, the fetched tarballs are unpacked into `./INPUT`. 10 | /// If one `save` or `savedev` was set, the fetched versions are also updated in the 11 | /// manifest. This provides an easy way to not have to deal with strict JSON manually. 12 | pub fn update( 13 | manifest: &Manifest, 14 | backend: &T, 15 | components: Vec, 16 | save: bool, 17 | savedev: bool, 18 | env: &str, 19 | ) -> LalResult<()> { 20 | debug!("Update specific deps: {:?}", components); 21 | 22 | let mut error = None; 23 | let mut updated = Vec::with_capacity(components.len()); 24 | for comp in &components { 25 | info!("Fetch {} {}", env, comp); 26 | if comp.contains('=') { 27 | let pair: Vec<&str> = comp.split('=').collect(); 28 | if let Ok(n) = pair[1].parse::() { 29 | if pair[0].to_lowercase() != pair[0] { 30 | return Err(CliError::InvalidComponentName(pair[0].into())); 31 | } 32 | // standard fetch with an integer version 33 | match backend.unpack_published_component(pair[0], Some(n), env) { 34 | Ok(c) => updated.push(c), 35 | Err(e) => { 36 | warn!("Failed to update {} ({})", pair[0], e); 37 | error = Some(e); 38 | } 39 | } 40 | } else { 41 | // fetch from stash - this does not go into `updated` it it succeeds 42 | // because we wont and cannot save stashed versions in the manifest 43 | let _ = backend.unpack_stashed_component(pair[0], pair[1]).map_err(|e| { 44 | warn!("Failed to update {} from stash ({})", pair[0], e); 45 | error = Some(e); 46 | }); 47 | } 48 | } else { 49 | if &comp.to_lowercase() != comp { 50 | return Err(CliError::InvalidComponentName(comp.clone())); 51 | } 52 | // fetch without a specific version (latest) 53 | 54 | // First, since this potentially goes in the manifest 55 | // make sure the version is found for all supported environments: 56 | let ver = backend 57 | .get_latest_supported_versions(comp, manifest.supportedEnvironments.clone())? 58 | .into_iter() 59 | .max() 60 | .ok_or(CliError::NoIntersectedVersion(comp.clone()))?; 61 | info!("Fetch {} {}={}", env, comp, ver); 62 | 63 | match backend.unpack_published_component(comp, Some(ver), env) { 64 | Ok(c) => updated.push(c), 65 | Err(e) => { 66 | warn!("Failed to update {} ({})", &comp, e); 67 | error = Some(e); 68 | } 69 | } 70 | } 71 | } 72 | if let Some(e) = error { 73 | return Err(e); 74 | } 75 | 76 | // Update manifest if saving in any way 77 | if save || savedev { 78 | let mut mf = manifest.clone(); 79 | // find reference to correct list 80 | let mut hmap = if save { mf.dependencies.clone() } else { mf.devDependencies.clone() }; 81 | for c in &updated { 82 | debug!("Successfully updated {} at version {}", &c.name, c.version); 83 | if hmap.contains_key(&c.name) { 84 | let val = hmap.get_mut(&c.name).unwrap(); 85 | if c.version < *val { 86 | warn!("Downgrading {} from {} to {}", c.name, *val, c.version); 87 | } else if c.version > *val { 88 | info!("Upgrading {} from {} to {}", c.name, *val, c.version); 89 | } else { 90 | info!("Maintaining {} at version {}", c.name, c.version); 91 | } 92 | *val = c.version; 93 | } else { 94 | hmap.insert(c.name.clone(), c.version); 95 | } 96 | } 97 | if save { 98 | mf.dependencies = hmap; 99 | } else { 100 | mf.devDependencies = hmap; 101 | } 102 | mf.write()?; 103 | } 104 | Ok(()) 105 | } 106 | 107 | /// Wrapper around update that updates all components 108 | /// 109 | /// This will pass all dependencies or devDependencies to update. 110 | /// If the save flag is set, then the manifest will be updated correctly. 111 | /// I.e. dev updates will update only the dev portions of the manifest. 112 | pub fn update_all( 113 | manifest: &Manifest, 114 | backend: &T, 115 | save: bool, 116 | dev: bool, 117 | env: &str, 118 | ) -> LalResult<()> { 119 | let deps: Vec = if dev { 120 | manifest.devDependencies.keys().cloned().collect() 121 | } else { 122 | manifest.dependencies.keys().cloned().collect() 123 | }; 124 | update(manifest, backend, deps, save && !dev, save && dev, env) 125 | } 126 | -------------------------------------------------------------------------------- /src/upgrade.rs: -------------------------------------------------------------------------------- 1 | //! This file controls the automatic upgrade procedure in lal for musl builds. 2 | //! 3 | //! It will, if a new version is available in the `Backend`, download it 4 | //! and overwrite the running executable using a file renaming dance. 5 | //! 6 | //! Be very careful about updating these functions without also testing the musl 7 | //! build on a variety of Cargo.toml.version strings. 8 | //! 9 | //! People should not have to be told to `curl lal.tar.gz | tar xz -C prefix` again. 10 | 11 | use semver::Version; 12 | use std::env; 13 | use std::path::{Path, PathBuf}; 14 | use std::fs; 15 | use std::process::Command; 16 | 17 | use super::{LalResult, CliError}; 18 | use super::{http_download_to_path, get_latest_lal_version, LatestLal}; 19 | 20 | struct ExeInfo { 21 | /// Whether ldd things its a dynamic executable 22 | dynamic: bool, 23 | /// Whether this is a debug build (only for dynamic executables) 24 | debug: bool, 25 | /// Path to current_exe 26 | path: String, 27 | /// Best guess at install prefix based on path (only for static executables) 28 | prefix: Option, 29 | /// Parsed semver version 30 | version: Version, 31 | } 32 | 33 | fn identify_exe() -> LalResult { 34 | let pth = env::current_exe()?; 35 | trace!("lal at {}", pth.display()); 36 | let ldd_output = Command::new("ldd").arg(&pth).output()?; 37 | let ldd_str = String::from_utf8_lossy(&ldd_output.stdout); 38 | let is_dynamic = !ldd_str.contains("not a dynamic executable"); 39 | let pthstr: String = pth.to_str().unwrap().into(); 40 | let prefix = if pthstr.contains("/bin/") { 41 | let v: Vec<&str> = pthstr.split("/bin/").collect(); 42 | if v.len() == 2 { Some(Path::new(v[0]).to_owned()) } else { None } 43 | } else { 44 | None 45 | }; 46 | Ok(ExeInfo { 47 | dynamic: is_dynamic, 48 | debug: pthstr.contains("debug"), // cheap check for compiled versions 49 | path: pthstr, 50 | prefix: prefix, 51 | version: Version::parse(env!("CARGO_PKG_VERSION")).unwrap(), 52 | }) 53 | } 54 | 55 | // basic tarball extractor 56 | // smaller than the INPUT extractor uses because it doesn't clear out anything 57 | fn extract_tarball(input: PathBuf, output: &PathBuf) -> LalResult<()> { 58 | use tar::Archive; 59 | use flate2::read::GzDecoder; 60 | 61 | let data = fs::File::open(input)?; 62 | let decompressed = GzDecoder::new(data)?; // decoder reads data 63 | let mut archive = Archive::new(decompressed); // Archive reads decoded 64 | 65 | archive.unpack(&output)?; 66 | Ok(()) 67 | } 68 | 69 | fn verify_permissions(exe: &ExeInfo) -> LalResult<()> { 70 | // this is sufficient unless the user copied it over manually with sudo 71 | // and then chowned it, but for all normal installs, touching the main file 72 | // would sufficiently check that we have write permissions 73 | let s = Command::new("touch").arg(&exe.path).status()?; 74 | if !s.success() { 75 | return Err(CliError::SubprocessFailure(s.code().unwrap_or(1001))); 76 | } 77 | Ok(()) 78 | } 79 | 80 | fn overwrite_exe(latest: &LatestLal, exe: &ExeInfo) -> LalResult<()> { 81 | let prefix = exe.prefix.clone().unwrap(); 82 | extract_tarball(prefix.join("lal.tar.gz"), &prefix)?; 83 | validate_exe(latest, exe)?; 84 | Ok(()) 85 | } 86 | 87 | fn validate_exe(latest: &LatestLal, exe: &ExeInfo) -> LalResult<()> { 88 | let lal_output = Command::new(&exe.path).arg("-V").output()?; 89 | let lal_str = String::from_utf8_lossy(&lal_output.stdout); 90 | debug!("Output from lal -V: {}", lal_str.trim()); 91 | debug!("Expecting to find: {}", latest.version.to_string()); 92 | if !lal_str.contains(&latest.version.to_string()) { 93 | let estr = format!("lal -V yielded {}", lal_str.trim()); 94 | return Err(CliError::UpgradeValidationFailure(estr)); 95 | } 96 | debug!("New version validated"); 97 | Ok(()) 98 | } 99 | 100 | fn upgrade_exe(latest: &LatestLal, exe: &ExeInfo) -> LalResult<()> { 101 | let prefix = exe.prefix.clone().unwrap(); 102 | // 0. sanity - could we actually upgrade if we tried? 103 | verify_permissions(exe) 104 | .map_err(|_| CliError::MissingPrefixPermissions(prefix.to_string_lossy().into()))?; 105 | debug!("Have permissions to write in {}", prefix.display()); 106 | 107 | // 1. rename current running executable to the same except _old suffix 108 | let old_file = prefix.join("bin").join("_lal_old"); 109 | if old_file.is_file() { 110 | // remove previous upgrade backup 111 | fs::remove_file(&old_file)?; 112 | } 113 | // 2. make sure we can download the tarball before starting 114 | let tar_dest = prefix.join("lal.tar.gz"); 115 | info!("Downloading tarball to {}", tar_dest.display()); 116 | http_download_to_path(&latest.url, &tar_dest)?; 117 | info!("Backing up {} to {}", exe.path, old_file.display()); 118 | fs::rename(&exe.path, &old_file)?; // need to undo this if we fail 119 | // NB: DO NOT INSERT CALLS THAT CAN FAIL HERE BEFORE THE OVERWRITE 120 | // 3. force dump lal tarball into exe.prefix - rollback if it failed 121 | info!("Unpacking new version of lal into {}", prefix.display()); 122 | match overwrite_exe(latest, exe) { 123 | // NB: This call takes a small amount of time - and can be aborted :/ 124 | // it is not an atomic operation, so recovery can unfortunately fail :| 125 | Ok(_) => trace!("overwrite successful"), 126 | Err(e) => { 127 | // tarball could potentially fail to extract here 128 | warn!("lal upgrade failed - rolling back"); 129 | warn!("Error: {}", e); 130 | fs::rename(&old_file, &exe.path)?; // better hope this works.. 131 | return Err(e); 132 | } 133 | } 134 | 135 | Ok(()) // we did it! 136 | } 137 | 138 | 139 | /// Check for and possibly upgrade lal when using musl releases 140 | /// 141 | /// This will query for the latest version, and upgrade in the one possible case. 142 | /// If a newer version found (> in semver), and it's a static executable, 143 | /// then an executable upgrade is attempted from the new release url. 144 | pub fn upgrade(silent: bool) -> LalResult { 145 | let latest = get_latest_lal_version()?; 146 | let exe = identify_exe()?; 147 | 148 | if latest.version > exe.version { 149 | // New version found - always full output now 150 | info!("A new version of lal is available: {}", latest.version); 151 | info!("You are running {} at {}", exe.version, exe.path); 152 | println!(""); 153 | 154 | if exe.dynamic { 155 | info!("Your version is built from source - please run (in source checkout):"); 156 | let build_flag = if exe.debug { "" } else { "--release" }; 157 | info!("rustup update stable && git pull && cargo build {}", 158 | build_flag); 159 | } else if exe.prefix.is_some() { 160 | // install lal in the prefix it's normally in 161 | info!("Upgrading..."); 162 | upgrade_exe(&latest, &exe)?; 163 | info!("lal upgraded successfully to {} at {}", 164 | latest.version, 165 | exe.path); 166 | println!(""); 167 | } else { 168 | // static, but no good guess of where to install - let user decide: 169 | info!("Your version is prebuilt but installed weirdly - please run:"); 170 | info!("curl {} | tar xz -C /usr/local", latest.url); 171 | } 172 | } else if silent { 173 | debug!("You are running the latest version of lal"); 174 | } else { 175 | info!("You are running the latest version of lal"); 176 | } 177 | Ok(latest.version > exe.version) 178 | } 179 | -------------------------------------------------------------------------------- /src/verify.rs: -------------------------------------------------------------------------------- 1 | use super::{Lockfile, Manifest, LalResult}; 2 | use input; 3 | 4 | /// Verifies that `./INPUT` satisfies all strictness conditions. 5 | /// 6 | /// This first verifies that there are no key mismatches between `defaultConfig` and 7 | /// `configurations` in the manifest. 8 | /// 9 | /// Once this is done, `INPUT` is analysed thoroughly via each components lockfiles. 10 | /// Missing dependencies, or multiple versions dependend on implicitly are both 11 | /// considered errors for verify, as are having custom versions in `./INPUT`. 12 | /// 13 | /// This function is meant to be a helper for when we want official builds, but also 14 | /// a way to tell developers that they are using things that differ from what jenkins 15 | /// would use. 16 | /// 17 | /// A simple verify was added to aid the workflow of stashed components. 18 | /// Users can use `lal verify --simple` or `lal build -s` aka. `--simple-verify`, 19 | /// instead of having to use `lal build --force` when just using stashed components. 20 | /// This avoids problems with different environments going undetected. 21 | pub fn verify(m: &Manifest, env: &str, simple: bool) -> LalResult<()> { 22 | // 1. Verify that the manifest is sane 23 | m.verify()?; 24 | 25 | // 2. dependencies in `INPUT` match `manifest.json`. 26 | if m.dependencies.is_empty() && !input::present() { 27 | // special case where lal fetch is not required and so INPUT may not exist 28 | // nothing needs to be verified in this case, so allow missing INPUT 29 | return Ok(()); 30 | } 31 | input::verify_dependencies_present(m)?; 32 | 33 | // get data for big verify steps 34 | let lf = Lockfile::default().populate_from_input()?; 35 | 36 | // 3. verify the root level dependencies match the manifest 37 | if !simple { 38 | input::verify_global_versions(&lf, m)?; 39 | } 40 | 41 | // 4. the dependency tree is flat, and deps use only global deps 42 | if !simple { 43 | input::verify_consistent_dependency_versions(&lf, m)?; 44 | } 45 | 46 | // 5. verify all components are built in the same environment 47 | input::verify_environment_consistency(&lf, env)?; 48 | 49 | info!("Dependencies fully verified"); 50 | Ok(()) 51 | } 52 | -------------------------------------------------------------------------------- /tests/.gitignore: -------------------------------------------------------------------------------- 1 | heylib/INPUT 2 | heylib/*.o 3 | heylib/*.a 4 | heylib/.lal 5 | prop*/INPUT 6 | 7 | helloworld/INPUT 8 | helloworld/*.o 9 | -------------------------------------------------------------------------------- /tests/helloworld/BUILD: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | make 3 | -------------------------------------------------------------------------------- /tests/helloworld/Makefile: -------------------------------------------------------------------------------- 1 | all: hello.o hello 2 | 3 | hello.o: hello.c 4 | $(CC) -IINPUT/heylib -O -c hello.c 5 | 6 | hello: hello.o INPUT/heylib/libhey.a INPUT/heylib/hey.h 7 | $(CC) --static -o OUTPUT/hello hello.o -LINPUT/heylib -lhey 8 | 9 | clean: 10 | rm hello.o 11 | -------------------------------------------------------------------------------- /tests/helloworld/README.md: -------------------------------------------------------------------------------- 1 | # helloworld 2 | 3 | Test component manipulated during tests. 4 | -------------------------------------------------------------------------------- /tests/helloworld/hello.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include "hey.h" 3 | 4 | int main() { 5 | printf("%s World!\n", greeting()); 6 | return 0; 7 | } 8 | -------------------------------------------------------------------------------- /tests/helloworld/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "hello", 3 | "environment": "alpine", 4 | "supportedEnvironments": [ 5 | "alpine" 6 | ], 7 | "components": { 8 | "hello": { 9 | "defaultConfig": "release", 10 | "configurations": [ 11 | "release" 12 | ] 13 | } 14 | }, 15 | "dependencies": { 16 | "heylib": 1 17 | }, 18 | "devDependencies": {} 19 | } 20 | -------------------------------------------------------------------------------- /tests/heylib/BUILD: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | make 3 | -------------------------------------------------------------------------------- /tests/heylib/Makefile: -------------------------------------------------------------------------------- 1 | all: output 2 | 3 | hey.o: hey.c hey.h 4 | $(CC) -O -c hey.c 5 | 6 | libhey.a: hey.o 7 | ar rcs libhey.a hey.o 8 | 9 | output: libhey.a hey.h 10 | cp libhey.a OUTPUT/ 11 | cp hey.h OUTPUT/ 12 | 13 | clean: 14 | rm hey.o libhey.a 15 | -------------------------------------------------------------------------------- /tests/heylib/README.md: -------------------------------------------------------------------------------- 1 | # heylib 2 | 3 | Test component manipulated during tests. 4 | This is fed into the helloworld component. 5 | -------------------------------------------------------------------------------- /tests/heylib/hey.c: -------------------------------------------------------------------------------- 1 | #include "hey.h" 2 | 3 | const char *greeting() { 4 | return "Hello there"; 5 | } 6 | -------------------------------------------------------------------------------- /tests/heylib/hey.h: -------------------------------------------------------------------------------- 1 | const char *greeting(); 2 | -------------------------------------------------------------------------------- /tests/heylib/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "heylib", 3 | "environment": "alpine", 4 | "supportedEnvironments": [ 5 | "alpine" 6 | ], 7 | "components": { 8 | "heylib": { 9 | "defaultConfig": "release", 10 | "configurations": [ 11 | "release" 12 | ] 13 | } 14 | }, 15 | "dependencies": {}, 16 | "devDependencies": {} 17 | } 18 | -------------------------------------------------------------------------------- /tests/prop-base/BUILD: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "pretend build" 3 | -------------------------------------------------------------------------------- /tests/prop-base/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "prop-base", 3 | "environment": "alpine", 4 | "supportedEnvironments": [ 5 | "alpine" 6 | ], 7 | "components": { 8 | "prop-base": { 9 | "defaultConfig": "release", 10 | "configurations": [ 11 | "release" 12 | ] 13 | } 14 | }, 15 | "dependencies": { 16 | "prop-mid-1": 1, 17 | "prop-mid-2": 1 18 | }, 19 | "devDependencies": {} 20 | } 21 | -------------------------------------------------------------------------------- /tests/prop-leaf/BUILD: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "pretend build" 3 | -------------------------------------------------------------------------------- /tests/prop-leaf/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "prop-leaf", 3 | "environment": "alpine", 4 | "supportedEnvironments": [ 5 | "alpine" 6 | ], 7 | "components": { 8 | "prop-leaf": { 9 | "defaultConfig": "release", 10 | "configurations": [ 11 | "release" 12 | ] 13 | } 14 | }, 15 | "dependencies": {}, 16 | "devDependencies": {} 17 | } 18 | -------------------------------------------------------------------------------- /tests/prop-mid-1/BUILD: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "pretend build" 3 | -------------------------------------------------------------------------------- /tests/prop-mid-1/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "prop-mid-1", 3 | "environment": "alpine", 4 | "supportedEnvironments": [ 5 | "alpine" 6 | ], 7 | "components": { 8 | "prop-mid-1": { 9 | "defaultConfig": "release", 10 | "configurations": [ 11 | "release" 12 | ] 13 | } 14 | }, 15 | "dependencies": { 16 | "prop-leaf": 1 17 | }, 18 | "devDependencies": {} 19 | } 20 | -------------------------------------------------------------------------------- /tests/prop-mid-2/BUILD: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "pretend build" 3 | -------------------------------------------------------------------------------- /tests/prop-mid-2/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "prop-mid-2", 3 | "environment": "alpine", 4 | "supportedEnvironments": [ 5 | "alpine" 6 | ], 7 | "components": { 8 | "prop-mid-2": { 9 | "defaultConfig": "release", 10 | "configurations": [ 11 | "release" 12 | ] 13 | } 14 | }, 15 | "dependencies": { 16 | "prop-leaf": 1 17 | }, 18 | "devDependencies": {} 19 | } 20 | --------------------------------------------------------------------------------