├── .gitignore ├── .travis.yml ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── Vagrantfile ├── bin └── lithos_mkdev ├── bulk-check.yaml ├── bulk.yaml ├── description-pak ├── docs ├── Makefile ├── changelog.rst ├── conf.py ├── config.rst ├── container_config.rst ├── faq.rst ├── index.rst ├── lithosdomain.py ├── master_config.rst ├── metrics.rst ├── process_config.rst ├── requirements.txt ├── sandbox_config.rst ├── tips │ ├── index.rst │ ├── secrets.rst │ ├── tcp-ports.rst │ └── vagga.rst └── volumes.rst ├── example_configs.sh ├── examples ├── multi_level │ ├── code │ │ ├── py.yaml │ │ ├── sock.py │ │ ├── socket.py │ │ └── socket.yaml │ └── configs │ │ ├── master.yaml │ │ ├── processes │ │ └── socket.yaml │ │ └── sandboxes │ │ └── socket.yaml ├── py │ ├── code │ │ ├── py.yaml │ │ ├── sock.py │ │ ├── socket.py │ │ └── socket.yaml │ └── configs │ │ ├── master.yaml │ │ ├── processes │ │ └── socket.yaml │ │ └── sandboxes │ │ └── socket.yaml ├── py_bridged │ ├── code │ │ ├── py.yaml │ │ ├── sock.py │ │ └── socket.yaml │ └── configs │ │ ├── master.yaml │ │ ├── processes │ │ └── socket.yaml │ │ └── sandboxes │ │ └── socket.yaml ├── py_systemd │ ├── code │ │ ├── py.yaml │ │ ├── sock.py │ │ ├── socket.py │ │ └── socket.yaml │ └── configs │ │ ├── master.yaml │ │ ├── processes │ │ └── socket.yaml │ │ └── sandboxes │ │ └── socket.yaml └── py_var │ ├── code │ ├── py.yaml │ ├── sock.py │ ├── socket.py │ └── socket.yaml │ └── configs │ ├── master.yaml │ ├── processes │ └── socket.yaml │ └── sandboxes │ └── socket.yaml ├── src ├── bin │ ├── lithos_check.rs │ ├── lithos_clean.rs │ ├── lithos_cmd.rs │ ├── lithos_crypt.rs │ ├── lithos_knot │ │ ├── config.rs │ │ ├── main.rs │ │ ├── secrets.rs │ │ ├── setup_filesystem.rs │ │ └── setup_network.rs │ ├── lithos_ps │ │ ├── ascii.rs │ │ └── main.rs │ ├── lithos_switch.rs │ └── lithos_tree │ │ ├── args.rs │ │ └── main.rs ├── cgroup.rs ├── child_config.rs ├── container_config.rs ├── id_map.rs ├── itertools.rs ├── knot_options.rs ├── lib.rs ├── limits.rs ├── master_config.rs ├── metrics.rs ├── mount.rs ├── nacl.rs ├── network.rs ├── pipe.rs ├── range.rs ├── sandbox_config.rs ├── setup.rs ├── timer_queue.rs ├── tree_options.rs └── utils.rs ├── systemd.service ├── upstart.conf └── vagga.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | /lithos_* 2 | /*.a 3 | /*.o 4 | /*.rlib 5 | 6 | *.orig 7 | *.pyc 8 | __pycache__ 9 | 10 | /tmp 11 | /run 12 | /pkg 13 | /dist 14 | /.vagga 15 | /.vagrant 16 | /todo.txt 17 | /docs/_build 18 | /target 19 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | rust: stable 3 | os: linux 4 | dist: trusty 5 | sudo: false 6 | addons: 7 | apt: 8 | packages: 9 | - fakeroot 10 | - musl-tools 11 | 12 | cache: 13 | - apt 14 | - cargo 15 | 16 | before_cache: 17 | - rm -r $TRAVIS_BUILD_DIR/target/debug 18 | 19 | script: 20 | - cargo build $CARGO_ARGS 21 | - cargo test $CARGO_ARGS 22 | 23 | jobs: 24 | include: 25 | - rust: stable 26 | - rust: beta 27 | - rust: nightly 28 | # deploy 29 | - stage: publish 30 | env: 31 | # CARGO_TOKEN 32 | - secure: "s1jFwyOLzZELLLOQ6INKx05B6NiNHTvuh6k8Vrgz57Vcsf8021rdWnBAQxf84vMcxjmkpWasDPYCLmUVVYZW3AVCPEtKpxMrUbIUk1I/4F5mdfJvtVZSkfg9BL5cXigoq7ye9C75rTqjGgh+z1SnAf6DIslYYOu1yeFpA9W+Eek=" 33 | # GH_TOKEN 34 | secure: "lugFTYZMCHe0sORskcN9Qx+2yFydDsedOrwW/HE7rO8fqJFNMk6fV0X//4w3VWQ8t/ZJjYG7kJmMtImP/rvD4WtFGl+ncOq7de56dTNTbryQBnYaFdm5SiOTSBYxTKqf+T46F19KonZbMaeAQZ3d9WAWd34ViMEMVZACacpZShQ=" 35 | install: true 36 | before_deploy: | 37 | mkdir -p dist 38 | 39 | cargo publish --token=$CARGO_TOKEN 40 | 41 | rustup target add x86_64-unknown-linux-musl 42 | cargo build --target=x86_64-unknown-linux-musl --release 43 | fakeroot sh -ecx ' 44 | install -D target/x86_64-unknown-linux-musl/release/lithos_check pkg/usr/bin/lithos_check 45 | tar -C pkg -czf dist/lithos_check-static-$TRAVIS_TAG.tar.gz usr 46 | ' 47 | rm -rf pkg 48 | fakeroot sh -ecx ' 49 | install -D target/x86_64-unknown-linux-musl/release/lithos_crypt pkg/usr/bin/lithos_crypt 50 | tar -C pkg -czf dist/lithos_crypt-static-$TRAVIS_TAG.tar.gz usr 51 | ' 52 | deploy: 53 | provider: releases 54 | api_key: 55 | secure: "si7BVNFjW3gny3nQoIt/yMLjS848BeWbBNy6dVgKUhKxqzq7lgvXsLMIMtO6isc7Zwqzg6QR83usrSnUw+qL8X6fKvicce5gxroF17aBXOQhXU4PEBCX8hB8cCQGhWsogPEdJr0Xl/zIrbBKE3Xg8LxSvaLjIzOubSkwGhNKiZg=" 56 | file: 57 | - "dist/lithos_check-static-$TRAVIS_TAG.tar.gz" 58 | - "dist/lithos_crypt-static-$TRAVIS_TAG.tar.gz" 59 | skip_cleanup: true 60 | on: 61 | tags: true 62 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | 3 | name = "lithos" 4 | description = "A containerization framework for linux" 5 | license = "MIT/Apache-2.0" 6 | readme = "README.md" 7 | keywords = ["linux", "namespaces", "containers"] 8 | homepage = "http://github.com/tailhook/lithos" 9 | version = "0.18.4" 10 | categories = [ 11 | "command-line-utilities", 12 | "os::unix-apis", 13 | ] 14 | authors = ["Paul Colomiets "] 15 | 16 | [dependencies] 17 | regex = "1.0.0" 18 | libc = "0.2.21" 19 | argparse = "0.2.1" 20 | quire = "0.4.0" 21 | fern = "0.4" 22 | log = "0.3" 23 | env_logger = "0.5.4" 24 | nix = "0.11.0" 25 | unshare = "0.5.0" 26 | signal = "0.6.0" 27 | syslog = "3.1.0" 28 | scan_dir = "0.3.3" 29 | libmount = "0.1.10" 30 | lazy_static = "1.0.0" 31 | serde = "1.0.0" 32 | serde_derive = "1.0.0" 33 | serde_json = "1.0.0" 34 | matches = "0.1.4" 35 | humantime = "1.1.0" 36 | libcantal = "0.3.0" 37 | ipnetwork = "0.13.0" 38 | serde_str = "0.1.0" 39 | failure = "0.1.0" 40 | blake2 = "0.7.0" 41 | rust-crypto = "0.2.36" 42 | ssh-keys = "0.1.1" 43 | structopt = "0.2.6" 44 | rand = "0.5.0" 45 | base64 = "0.9.0" 46 | sha2 = "0.7.0" 47 | 48 | [profile.release] 49 | debug = true 50 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014-2015 Paul Colomiets 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PREFIX ?= /usr 2 | DESTDIR ?= 3 | 4 | all: bin 5 | 6 | test: lithos_test 7 | ./lithos_test 8 | 9 | bin: 10 | cargo build --verbose 11 | 12 | 13 | install: bin-release _install 14 | 15 | bin-release: 16 | cargo build --release -v 17 | 18 | _install: 19 | install -d $(DESTDIR)$(PREFIX)/bin 20 | install -m 755 target/release/lithos_tree $(DESTDIR)$(PREFIX)/bin/lithos_tree 21 | install -m 755 target/release/lithos_knot $(DESTDIR)$(PREFIX)/bin/lithos_knot 22 | install -m 755 target/release/lithos_check $(DESTDIR)$(PREFIX)/bin/lithos_check 23 | install -m 755 target/release/lithos_clean $(DESTDIR)$(PREFIX)/bin/lithos_clean 24 | install -m 755 target/release/lithos_cmd $(DESTDIR)$(PREFIX)/bin/lithos_cmd 25 | install -m 755 target/release/lithos_switch $(DESTDIR)$(PREFIX)/bin/lithos_switch 26 | install -m 755 target/release/lithos_ps $(DESTDIR)$(PREFIX)/bin/lithos_ps 27 | install -m 755 target/release/lithos_crypt $(DESTDIR)$(PREFIX)/bin/lithos_crypt 28 | install -m 755 bin/lithos_mkdev $(DESTDIR)$(PREFIX)/bin/lithos_mkdev 29 | 30 | ubuntu-packages: version:=$(shell git describe --dirty) 31 | ubuntu-packages: codename:=$(shell lsb_release --codename --short) 32 | ubuntu-packages: 33 | rm -rf pkg 34 | rm -rf target/release/lithos_* 35 | bulk with-version "$(version)" cargo build --release 36 | make _install DESTDIR=/work/pkg 37 | bulk pack --package-version="$(version)+$(codename)1" 38 | 39 | ubuntu-lithos_check-package: version:=$(shell git describe --dirty) 40 | ubuntu-lithos_check-package: 41 | -rm -rf pkg 42 | -rm -rf target/x86_64-unknown-linux-musl/debug/lithos_* 43 | bulk with-version "$(version)" \ 44 | cargo build --target=x86_64-unknown-linux-musl --bin=lithos_check 45 | install -D ./target/x86_64-unknown-linux-musl/debug/lithos_check \ 46 | pkg/usr/bin/lithos_check 47 | bulk pack --config=bulk-check.yaml --package-version="$(version)" 48 | 49 | 50 | .PHONY: all bin install test _install bin-release ubuntu-packages ubuntu-lithos_check-package 51 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Lithos 2 | 3 | [Documentation](http://lithos.readthedocs.org) 4 | 5 | 6 | Lithos is a process supervisor and containerizer for running services. Lithos 7 | is not intended to be system init. But rather tries to be a base tool to build 8 | container orchestration. 9 | 10 | Features: 11 | 12 | * use linux namespaces and cgroups for containerization 13 | * immediate restart of failing processes (with rate limit of course) 14 | * in-place upgrade of lithos without touching child processes 15 | * written in Rust so memory-safe and has zero runtime dependencies 16 | 17 | It's designed to have absolutely minimal required functionality. In particular 18 | it doesn't include: 19 | 20 | * an image downloader (``rsync`` is super-cool) or builder (use any tool) 21 | * any network API 22 | 23 | 24 | ## Running Examples 25 | 26 | Testing it in vagrant:: 27 | 28 | vagrant up && vagrant ssh 29 | 30 | In vagrant shell:: 31 | 32 | $ ./example_configs.sh 33 | $ sudo lithos_tree 34 | 35 | If you want to change containers, sources or configs of this test vagrant 36 | deployment just rerun ``./example_configs.sh``. 37 | 38 | (Note: in this test deployment lithos doesn't properly reload configs, because 39 | images does not version properly. Just restart `lithos_tree` to apply the 40 | changes) 41 | 42 | 43 | License 44 | ======= 45 | 46 | Licensed under either of 47 | 48 | * Apache License, Version 2.0, 49 | (./LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) 50 | * MIT license (./LICENSE-MIT or http://opensource.org/licenses/MIT) 51 | at your option. 52 | 53 | Contribution 54 | ------------ 55 | 56 | Unless you explicitly state otherwise, any contribution intentionally 57 | submitted for inclusion in the work by you, as defined in the Apache-2.0 58 | license, shall be dual licensed as above, without any additional terms or 59 | conditions. 60 | 61 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | Vagrant.configure(2) do |config| 4 | 5 | config.vm.box = "ubuntu/trusty64" 6 | 7 | config.vm.provision "shell", inline: <<-SHELL 8 | set -ex 9 | echo 'deb [trusted=yes] http://ubuntu.zerogw.com vagga-testing main' | tee /etc/apt/sources.list.d/vagga.list 10 | apt-get update 11 | apt-get install -y vagga 12 | apt-get install cgroup-lite # until we migrate to trusty 13 | cd /vagrant 14 | vagga _build py-example 15 | vagga _build trusty 16 | ./example_configs.sh 17 | SHELL 18 | 19 | config.vm.provision "shell", run: "always", inline: <<-SHELL 20 | set -ex 21 | ensure_dir() { [ -d $1 ] || ( mkdir $1 && chown vagrant $1 ); } 22 | ensure_dir /vagrant/.vagga 23 | ensure_dir /vagrant/target 24 | ensure_dir /home/vagrant/.cache/_vagga 25 | ensure_dir /home/vagrant/.cache/_cargo 26 | mount --bind /home/vagrant/.cache/_vagga /vagrant/.vagga 27 | mount --bind /home/vagrant/.cache/_cargo /vagrant/target 28 | SHELL 29 | 30 | end 31 | -------------------------------------------------------------------------------- /bin/lithos_mkdev: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | target_dir="$1" 4 | 5 | if test -z "${target_dir}"; then 6 | echo "Usage: lithos-mkdev /var/lib/lithos/dev" >&2 7 | exit 1 8 | fi 9 | 10 | set -x 11 | 12 | mkdir -p "${target_dir}" 13 | mknod -m 666 "${target_dir}/null" c 1 3 14 | mknod -m 666 "${target_dir}/zero" c 1 5 15 | mknod -m 666 "${target_dir}/random" c 1 8 16 | mknod -m 666 "${target_dir}/urandom" c 1 9 17 | mknod -m 666 "${target_dir}/tty" c 5 0 18 | mknod -m 666 "${target_dir}/full" c 1 7 19 | mkdir "${target_dir}/pts" 20 | ln -s "pts/ptmx" "${target_dir}/ptmx" 21 | mkdir "${target_dir}/shm" 22 | 23 | ln -s /proc/self/fd "${target_dir}/fd" 24 | ln -s /proc/self/fd/0 "${target_dir}/stdin" 25 | ln -s /proc/self/fd/1 "${target_dir}/stdout" 26 | ln -s /proc/self/fd/2 "${target_dir}/stderr" 27 | -------------------------------------------------------------------------------- /bulk-check.yaml: -------------------------------------------------------------------------------- 1 | minimum-bulk: v0.4.5 2 | 3 | metadata: 4 | name: lithos-check 5 | short-description: Utility to check lithos configuration files offline 6 | long-description: | 7 | While this utility is in the lithos package itself you may want to rust 8 | have lithos_check without all other stuff on development machine. Just to 9 | check configuration file syntax. 10 | 11 | repositories: 12 | 13 | - kind: debian 14 | suite: static 15 | component: lithos-check 16 | keep-releases: 1 17 | match-version: ^\d+\.\d+\.\d+$ 18 | 19 | - kind: debian 20 | suite: static 21 | component: lithos-check-stable 22 | keep-releases: 1000 23 | match-version: ^\d+\.\d+\.\d+$ 24 | 25 | - kind: debian 26 | suite: static 27 | component: lithos-check-testing 28 | keep-releases: 100 29 | -------------------------------------------------------------------------------- /bulk.yaml: -------------------------------------------------------------------------------- 1 | minimum-bulk: v0.4.5 2 | 3 | metadata: 4 | name: lithos 5 | short-description: A process supervisor with containers 6 | long-description: | 7 | Lithos allows you to run and restart containers 8 | 9 | repositories: 10 | 11 | # trusty 12 | - kind: debian 13 | suite: trusty 14 | component: lithos 15 | keep-releases: 1 16 | match-version: ^\d+\.\d+\.\d+\+trusty1$ 17 | 18 | - kind: debian 19 | suite: trusty 20 | component: lithos-stable 21 | keep-releases: 1000 22 | match-version: ^\d+\.\d+\.\d+\+trusty1$ 23 | 24 | - kind: debian 25 | suite: trusty 26 | component: lithos-testing 27 | keep-releases: 100 28 | match-version: \+trusty1$ 29 | 30 | # xenial 31 | - kind: debian 32 | suite: xenial 33 | component: lithos 34 | keep-releases: 1 35 | match-version: ^\d+\.\d+\.\d+\+xenial1$ 36 | 37 | - kind: debian 38 | suite: xenial 39 | component: lithos-stable 40 | keep-releases: 1000 41 | match-version: ^\d+\.\d+\.\d+\+xenial1$ 42 | 43 | - kind: debian 44 | suite: xenial 45 | component: lithos-testing 46 | keep-releases: 100 47 | match-version: \+xenial1$ 48 | 49 | # bionic 50 | - kind: debian 51 | suite: bionic 52 | component: lithos 53 | keep-releases: 1 54 | match-version: ^\d+\.\d+\.\d+\+bionic1$ 55 | 56 | - kind: debian 57 | suite: bionic 58 | component: lithos-stable 59 | keep-releases: 1000 60 | match-version: ^\d+\.\d+\.\d+\+bionic1$ 61 | 62 | - kind: debian 63 | suite: bionic 64 | component: lithos-testing 65 | keep-releases: 100 66 | match-version: \+bionic1$ 67 | 68 | versions: 69 | 70 | - file: Cargo.toml 71 | block-start: ^\[package\] 72 | block-end: ^\[.*\] 73 | regex: ^version\s*=\s*"(\S+)" 74 | 75 | - file: docs/conf.py 76 | regex: ^version\s*=\s*'(\S+)' 77 | partial-version: ^\d+\.\d+ 78 | 79 | - file: docs/conf.py 80 | regex: ^release\s*=\s*'(\S+)' 81 | 82 | # for more automation we also update the lockfile 83 | 84 | - file: Cargo.lock 85 | block-start: ^name\s*=\s*"lithos" 86 | regex: ^version\s*=\s*"(\S+)" 87 | block-end: ^\[.*\] 88 | -------------------------------------------------------------------------------- /description-pak: -------------------------------------------------------------------------------- 1 | Process supervisor that supports linux containers 2 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Lithos.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Lithos.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Lithos" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Lithos" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Lithos Changes By Release 3 | ========================= 4 | 5 | 6 | .. _changelog 0.19.0: 7 | 8 | v0.19.0 9 | ======= 10 | 11 | * Feature: new process in bridged network gets CAP_NET_BIND_SERVICE 12 | capability in it's own network namespace (effectively allowing it to 13 | bind port 80, 443 or any other port < 1024) 14 | * Bugfix: made ``default-gateway`` in ``bridged-network`` optional 15 | * Bugfix: lithos now deletes veth interface if that exists, before starting 16 | a process (previously you needed to manually resolve this issue) 17 | 18 | Note: we're making this release major to show that it requires more testing 19 | than regular update. This is because we changed internals quite a bit to 20 | allow network namespace owned by process. 21 | 22 | 23 | .. _changelog 0.18.4: 24 | 25 | v0.18.4 26 | ======= 27 | 28 | * Bugfix: only send SIGTERM to the process once when upgrading or stopping it 29 | (this prevents certain issues with the applications themselves) 30 | * Bugfix: use don't reset kill timeout on SIGQUIT of lithos_tree 31 | * Bugfix: correctly wait for kill timeout for retired children (not in the 32 | config any more) 33 | 34 | 35 | .. _changelog 0.18.3: 36 | 37 | v0.18.3 38 | ======= 39 | 40 | * Bugfix: it looks like that reading through ``/proc/`` is inherently racy, 41 | i.e. some process may be skipped. This commit fixes walk faster and traverse 42 | directory twice. More elaborate fix will be implemented in future. 43 | 44 | 45 | .. _changelog 0.18.2: 46 | 47 | v0.18.2 48 | ======= 49 | 50 | * Feature: add ``secret-environ-file`` which can be used to offload secrets 51 | to a separate (perhaps shared) file 52 | 53 | .. _changelog 0.18.1: 54 | 55 | v0.18.1 56 | ======= 57 | 58 | * Feature: add ``set-non-block`` option to tcp-ports 59 | 60 | 61 | .. _changelog 0.18.0: 62 | 63 | v0.18.0 64 | ======= 65 | 66 | * Breaking: we don't run ``arping`` after container setup by default, 67 | as it `doesn't work in certain environments`__. 68 | Use :bopt:`after-setup-command` instead. 69 | 70 | __ https://github.com/tailhook/lithos/issues/17 71 | 72 | .. _changelog 0.17.8: 73 | 74 | v0.17.8 75 | ======= 76 | 77 | * Bugfix: fixes issue with bridged networking when host system 78 | is alpine (`#15`__) 79 | 80 | __ https://github.com/tailhook/lithos/issues/15 81 | 82 | 83 | .. _changelog 0.17.7: 84 | 85 | v0.17.7 86 | ======= 87 | 88 | * Bugfix: log name of the process when lithos_knot failed 89 | * Bugfix: more robust parsing of process names by lithos_ps 90 | * Feature: add ``@{lithos:pid}`` magic variable 91 | 92 | 93 | .. _changelog 0.17.6: 94 | 95 | v0.17.6 96 | ======= 97 | 98 | * Bugfix: systemd protocol support fixed: LISTEN_FDNAMES and LISTEN_PID 99 | 100 | 101 | .. _changelog 0.17.5: 102 | 103 | v0.17.5 104 | ======= 105 | 106 | * Feature: check variable substitution with ``lithos_check`` even in 107 | ``--check-container`` (out of system) mode 108 | 109 | .. _changelog 0.17.4: 110 | 111 | v0.17.4 112 | ======= 113 | 114 | * Feature: Add ``DottedName`` :ref:`variable type ` 115 | * Feature: Add ``activation`` parameter to ``TcpPort`` variable 116 | 117 | 118 | .. _changelog 0.17.3: 119 | 120 | v0.17.3 121 | ======= 122 | 123 | * Bugfix: fix EADDRINUSE error when all children requiring file descriptor 124 | where queued for restart (throttled), bug was due to duped socket lying in 125 | scheduled command (where main socket is closed to notify peers there are 126 | no listeners) 127 | 128 | 129 | .. _changelog 0.17.2: 130 | 131 | v0.17.2 132 | ======= 133 | 134 | * Bugfix: previously lithos_tree process after fork but before execing 135 | lithos_knot could be recognized as undefined child and killed. 136 | This race-condition sometimes led to closing sockets prematurely and being 137 | unable to listen them again 138 | 139 | 140 | .. _changelog 0.17.1: 141 | 142 | v0.17.1 143 | ======= 144 | 145 | * Bugfix: passing sockets as FDs in non-bridged network was broken in v0.17.0 146 | 147 | 148 | .. _changelog 0.17.0: 149 | 150 | v0.17.0 151 | ======= 152 | 153 | * Breaking: add ``external`` flag to :opt:`tcp-ports`, which by default is 154 | ``false`` (previous behavior was equal to ``external: true``) 155 | * Bugfix: ``lithos_cmd`` now returns exit code 0 if underlying command is 156 | exited successfully (was broken in 0.15.5) 157 | 158 | 159 | .. _changelog 0.16.0: 160 | 161 | v0.16.0 162 | ======= 163 | 164 | * Breaking: remove ``v1`` encryption for secrets (it was alive for a week) 165 | * Feature: add :opt:`secrets-namespaces` and :opt:`extra-secrets-namespaces` 166 | option to allow namespacing secrets on top of a single key 167 | * Feature: add ``v2`` key encryption scheme 168 | 169 | 170 | .. _changelog 0.15.6: 171 | 172 | v0.15.6 173 | ======= 174 | 175 | * Feature: add :opt:`secret-environ` and :opt:`secrets-private-key`` settings 176 | which allow to pass to the application decrypted environment variables 177 | * Bugfix: when bridged network is enabled we use ``arping`` to update ARP cache 178 | 179 | 180 | .. _changelog 0.15.5: 181 | 182 | v0.15.5 183 | ======= 184 | 185 | * Bugfix: add support for bridged-network and ip-addresses for lithos_cmd 186 | * Bugfix: initialize looppack interface in container when ``bridged-network`` 187 | is configured 188 | * Feature: allow ``lithos_cmd`` without ``ip_addresses`` (only loopback is 189 | initialized in this case) 190 | * Bugfix: return error result from ``lithos_cmd`` if inner process failed 191 | 192 | 193 | .. _changelog 0.15.4: 194 | 195 | v0.15.4 196 | ======= 197 | 198 | * First release that stops support of ubuntu precise and 199 | adds repository for ubuntu bionic 200 | * Bugfix: passing TCP port as fd < 3 didn't work before, now we allow ``fd: 0`` 201 | and fail gracefully on 1, 2. 202 | 203 | 204 | .. _changelog 0.15.3: 205 | 206 | v0.15.3 207 | ======= 208 | 209 | * feature: Add :opt:`default-user` and :opt:`default-group` to simplify 210 | container config 211 | * bugfix: fix containers having symlinks at ``/etc/{resolv.conf, hosts}`` 212 | (broken in v0.15.0) 213 | 214 | .. _changelog 0.15.2: 215 | 216 | v0.15.2 217 | ======= 218 | 219 | * bugfix: containers without bridged network work again 220 | 221 | 222 | .. _changelog 0.15.1: 223 | 224 | v0.15.1 225 | ======= 226 | 227 | * nothing changed, fixed tests only 228 | 229 | .. _changelog 0.15.0: 230 | 231 | v0.15.0 232 | ======= 233 | 234 | * feature: Add :opt:`normal-exit-codes` setting 235 | * feature: Add :opt:`resolv-conf` and :opt:`hosts-file` to sandbox config 236 | * feature: Add :opt:`bridged-network` option to sandbox config 237 | * breaking: By default ``/etc/hosts`` and ``/etc/resolv.conf`` will be mounted 238 | if they are proper mount points (can be opt out in container config) 239 | 240 | 241 | .. _changelog 0.14.3: 242 | 243 | v0.14.3 244 | ======= 245 | 246 | * Bugfix: when more than one variable is used lithos were restarting process 247 | every time (because of unstable serialization of hashmap) 248 | 249 | 250 | .. _changelog 0.14.2: 251 | 252 | v0.14.2 253 | ======= 254 | 255 | * Bugfix: if ``auto-clean`` is different in several sandboxes looking at the 256 | same image directory we skip cleaning the dir and print a warning 257 | * Add a timestamp to ``lithos_clean`` output (in ``--delete-unused`` mode) 258 | 259 | .. _changelog 0.14.1: 260 | 261 | v0.14.1 262 | ======= 263 | 264 | * Bugfix: variable substitution was broken in v0.14.0 265 | 266 | 267 | .. _changelog 0.14.0: 268 | 269 | v0.14.0 270 | ======= 271 | 272 | * Sets ``memory.memsw.limit_in_bytes`` if that exists (usually requires 273 | ``swapaccount=1`` in kernel params). 274 | * Adds a warning-level message on process startup 275 | * Duplicates startup and death messages into stderr log, so you can corelate 276 | them with application messages 277 | 278 | 279 | .. _changelog 0.13.2: 280 | 281 | v0.13.2 282 | ======= 283 | 284 | * Upgrades many dependencies, no significant changes or bugfixes 285 | 286 | 287 | .. _changelog 0.13.1: 288 | 289 | v0.13.1 290 | ======= 291 | 292 | * Adds :opt:`auto-clean` setting 293 | 294 | 295 | .. _changelog 0.13.0: 296 | 297 | v0.13.0 298 | ======= 299 | 300 | * ``/dev/pts/ptmx`` is created with ``ptmxmode=0666``, which makes it suitable 301 | for creating ptys by unprivileged users. We always used ``newinstance`` 302 | option, so it should be safe enough. And it also matches how ``ptmx`` is 303 | configured on most systems by default 304 | 305 | .. _changelog 0.12.1: 306 | 307 | v0.12.1 308 | ======= 309 | 310 | * Added ``image-dir-levels`` parameter which allows using images in 311 | form of ``xx/yy/zz`` (for value of ``3``) instead of bare name 312 | 313 | .. _changelog 0.12.0: 314 | 315 | v0.12.0 316 | ======= 317 | 318 | * Fixed order of ``sandbox-name.process-name`` in metrics 319 | * Dropped setting ``cantal-appname`` (never were useful, because cantal 320 | actually uses cgroup name, and lithos master process actually has one) 321 | 322 | .. _changelog 0.11.0: 323 | 324 | v0.11.0 325 | ======= 326 | 327 | * Option :opt:`cantal-appname` added to a config 328 | * If no ``CANTAL_PATH`` present in environment we set it to some default, 329 | along with ``CANTAL_APPNAME=lithos`` unless :opt:`cantal-appname` is 330 | overriden. 331 | * Added default container environment ``LITHOS_CONFIG``. It may be used to 332 | log config name, read metadata and other purposes. 333 | 334 | 335 | .. _changelog 0.10.7: 336 | 337 | v0.10.7 338 | ======= 339 | 340 | * Cantal_ metrics added 341 | 342 | .. _cantal: https://cantal.readthedocs.io 343 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Lithos documentation build configuration file, created by 4 | # sphinx-quickstart on Wed Jan 21 13:42:37 2015. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | 18 | # If extensions (or modules to document with autodoc) are in another directory, 19 | # add these directories to sys.path here. If the directory is relative to the 20 | # documentation root, use os.path.abspath to make it absolute, like shown here. 21 | sys.path.insert(0, os.path.abspath('.')) 22 | 23 | # -- General configuration ------------------------------------------------ 24 | 25 | # If your documentation needs a minimal Sphinx version, state it here. 26 | #needs_sphinx = '1.0' 27 | 28 | # Add any Sphinx extension module names here, as strings. They can be 29 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 30 | # ones. 31 | extensions = ['lithosdomain'] 32 | primary_domain = 'lithos' 33 | 34 | # Add any paths that contain templates here, relative to this directory. 35 | templates_path = ['_templates'] 36 | 37 | # The suffix of source filenames. 38 | source_suffix = '.rst' 39 | 40 | # The encoding of source files. 41 | #source_encoding = 'utf-8-sig' 42 | 43 | # The master toctree document. 44 | master_doc = 'index' 45 | 46 | # General information about the project. 47 | project = u'Lithos' 48 | copyright = u'2015, Paul Colomiets' 49 | 50 | # The version info for the project you're documenting, acts as replacement for 51 | # |version| and |release|, also used in various other places throughout the 52 | # built documents. 53 | # 54 | # The short X.Y version. 55 | version = '0.18' 56 | # The full version, including alpha/beta/rc tags. 57 | release = '0.18.4' 58 | 59 | # The language for content autogenerated by Sphinx. Refer to documentation 60 | # for a list of supported languages. 61 | #language = None 62 | 63 | # There are two options for replacing |today|: either, you set today to some 64 | # non-false value, then it is used: 65 | #today = '' 66 | # Else, today_fmt is used as the format for a strftime call. 67 | #today_fmt = '%B %d, %Y' 68 | 69 | # List of patterns, relative to source directory, that match files and 70 | # directories to ignore when looking for source files. 71 | exclude_patterns = ['_build'] 72 | 73 | # The reST default role (used for this markup: `text`) to use for all 74 | # documents. 75 | #default_role = None 76 | 77 | # If true, '()' will be appended to :func: etc. cross-reference text. 78 | #add_function_parentheses = True 79 | 80 | # If true, the current module name will be prepended to all description 81 | # unit titles (such as .. function::). 82 | #add_module_names = True 83 | 84 | # If true, sectionauthor and moduleauthor directives will be shown in the 85 | # output. They are ignored by default. 86 | #show_authors = False 87 | 88 | # The name of the Pygments (syntax highlighting) style to use. 89 | pygments_style = 'sphinx' 90 | 91 | # A list of ignored prefixes for module index sorting. 92 | #modindex_common_prefix = [] 93 | 94 | # If true, keep warnings as "system message" paragraphs in the built documents. 95 | #keep_warnings = False 96 | 97 | 98 | # -- Options for HTML output ---------------------------------------------- 99 | 100 | # The theme to use for HTML and HTML Help pages. See the documentation for 101 | # a list of builtin themes. 102 | html_theme = 'default' 103 | 104 | # Theme options are theme-specific and customize the look and feel of a theme 105 | # further. For a list of options available for each theme, see the 106 | # documentation. 107 | #html_theme_options = {} 108 | 109 | # Add any paths that contain custom themes here, relative to this directory. 110 | #html_theme_path = [] 111 | 112 | # The name for this set of Sphinx documents. If None, it defaults to 113 | # " v documentation". 114 | #html_title = None 115 | 116 | # A shorter title for the navigation bar. Default is the same as html_title. 117 | #html_short_title = None 118 | 119 | # The name of an image file (relative to this directory) to place at the top 120 | # of the sidebar. 121 | #html_logo = None 122 | 123 | # The name of an image file (within the static path) to use as favicon of the 124 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 125 | # pixels large. 126 | #html_favicon = None 127 | 128 | # Add any paths that contain custom static files (such as style sheets) here, 129 | # relative to this directory. They are copied after the builtin static files, 130 | # so a file named "default.css" will overwrite the builtin "default.css". 131 | html_static_path = ['_static'] 132 | 133 | # Add any extra paths that contain custom files (such as robots.txt or 134 | # .htaccess) here, relative to this directory. These files are copied 135 | # directly to the root of the documentation. 136 | #html_extra_path = [] 137 | 138 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 139 | # using the given strftime format. 140 | #html_last_updated_fmt = '%b %d, %Y' 141 | 142 | # If true, SmartyPants will be used to convert quotes and dashes to 143 | # typographically correct entities. 144 | #html_use_smartypants = True 145 | 146 | # Custom sidebar templates, maps document names to template names. 147 | #html_sidebars = {} 148 | 149 | # Additional templates that should be rendered to pages, maps page names to 150 | # template names. 151 | #html_additional_pages = {} 152 | 153 | # If false, no module index is generated. 154 | #html_domain_indices = True 155 | 156 | # If false, no index is generated. 157 | #html_use_index = True 158 | 159 | # If true, the index is split into individual pages for each letter. 160 | #html_split_index = False 161 | 162 | # If true, links to the reST sources are added to the pages. 163 | #html_show_sourcelink = True 164 | 165 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 166 | #html_show_sphinx = True 167 | 168 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 169 | #html_show_copyright = True 170 | 171 | # If true, an OpenSearch description file will be output, and all pages will 172 | # contain a tag referring to it. The value of this option must be the 173 | # base URL from which the finished HTML is served. 174 | #html_use_opensearch = '' 175 | 176 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 177 | #html_file_suffix = None 178 | 179 | # Output file base name for HTML help builder. 180 | htmlhelp_basename = 'Lithosdoc' 181 | 182 | 183 | # -- Options for LaTeX output --------------------------------------------- 184 | 185 | latex_elements = { 186 | # The paper size ('letterpaper' or 'a4paper'). 187 | #'papersize': 'letterpaper', 188 | 189 | # The font size ('10pt', '11pt' or '12pt'). 190 | #'pointsize': '10pt', 191 | 192 | # Additional stuff for the LaTeX preamble. 193 | #'preamble': '', 194 | } 195 | 196 | # Grouping the document tree into LaTeX files. List of tuples 197 | # (source start file, target name, title, 198 | # author, documentclass [howto, manual, or own class]). 199 | latex_documents = [ 200 | ('index', 'Lithos.tex', u'Lithos Documentation', 201 | u'Paul Colomiets', 'manual'), 202 | ] 203 | 204 | # The name of an image file (relative to this directory) to place at the top of 205 | # the title page. 206 | #latex_logo = None 207 | 208 | # For "manual" documents, if this is true, then toplevel headings are parts, 209 | # not chapters. 210 | #latex_use_parts = False 211 | 212 | # If true, show page references after internal links. 213 | #latex_show_pagerefs = False 214 | 215 | # If true, show URL addresses after external links. 216 | #latex_show_urls = False 217 | 218 | # Documents to append as an appendix to all manuals. 219 | #latex_appendices = [] 220 | 221 | # If false, no module index is generated. 222 | #latex_domain_indices = True 223 | 224 | 225 | # -- Options for manual page output --------------------------------------- 226 | 227 | # One entry per manual page. List of tuples 228 | # (source start file, name, description, authors, manual section). 229 | man_pages = [ 230 | ('index', 'lithos', u'Lithos Documentation', 231 | [u'Paul Colomiets'], 1) 232 | ] 233 | 234 | # If true, show URL addresses after external links. 235 | #man_show_urls = False 236 | 237 | 238 | # -- Options for Texinfo output ------------------------------------------- 239 | 240 | # Grouping the document tree into Texinfo files. List of tuples 241 | # (source start file, target name, title, author, 242 | # dir menu entry, description, category) 243 | texinfo_documents = [ 244 | ('index', 'Lithos', u'Lithos Documentation', 245 | u'Paul Colomiets', 'Lithos', 'One line description of project.', 246 | 'Miscellaneous'), 247 | ] 248 | 249 | # Documents to append as an appendix to all manuals. 250 | #texinfo_appendices = [] 251 | 252 | # If false, no module index is generated. 253 | #texinfo_domain_indices = True 254 | 255 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 256 | #texinfo_show_urls = 'footnote' 257 | 258 | # If true, do not generate a @detailmenu in the "Top" node's menu. 259 | #texinfo_no_detailmenu = False 260 | 261 | 262 | import os 263 | on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 264 | 265 | if not on_rtd: # only import and set the theme if we're building docs locally 266 | import sphinx_rtd_theme 267 | html_theme = 'sphinx_rtd_theme' 268 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 269 | 270 | -------------------------------------------------------------------------------- /docs/faq.rst: -------------------------------------------------------------------------------- 1 | ========================== 2 | Frequently Asked Questions 3 | ========================== 4 | 5 | 6 | How do I Start/Stop/Restart Processes Running By Lithos? 7 | ======================================================== 8 | 9 | Short answer: You can't. 10 | 11 | Long answer: Lithos keep running all the processes that it's configured to 12 | run. So: 13 | 14 | * To stop process: remove it from the config 15 | * To start process: add it to the config. If it's added, it will be restarted 16 | indefinitely. Sometimes may want to fix :opt:`restart-timeout` 17 | * To restart process: well, kill it (with whatever signal you want). 18 | 19 | The ergonomic of these operations is intentionally not very pleasing. This is 20 | because you are supposed to have higher-level tool to manage lithos. At least 21 | you want to use ansible_, chef_ or puppet_. 22 | 23 | .. _ansible: http://ansible.com/ 24 | .. _chef: http://chef.io/ 25 | .. _puppet: http://puppetlabs.com/ 26 | 27 | 28 | Why /run/lithos/mnt is empty? 29 | ============================= 30 | 31 | This is a mount point. It's never mounted in host system namespace (well it's 32 | never visible in guest namespace too). The containerization works as follows: 33 | 34 | 1. The mount namespace is *unshared* (which means no future mounts are visible 35 | in the host system) 36 | 2. The root filesystem image is mounted to ``/run/lithos/mnt`` 37 | 3. Other things set up in root file system (``/dev``, ``/etc/hosts``, whatever) 38 | 4. Pivot root is done, which means that ``/run/lithos/mnt`` is now visible as 39 | root dir, i.e. just plain ``/`` (you can think of it as good old ``chroot``) 40 | 41 | This all means that if you error like this:: 42 | 43 | [2015-11-17T10:29:40Z][ERROR] Fatal error: Can't mount pseudofs /run/lithos/mnt/dev/pts (newinstance, options: devpts): No such file or directory (os error 2) 44 | 45 | Or like this:: 46 | 47 | [2015-10-19T15:04:48Z][ERROR] Fatal error: Can't mount bind /whereever/external/storage/is to /run/lithos/mnt/storage: No such file or directory (os error 2) 48 | 49 | It means that lithos have failed on step #3. And that it failed to mount the 50 | directory in the guest container file system (``/dev/pts`` and ``/storage`` 51 | respectively) 52 | 53 | 54 | How to Organize Logging? 55 | ======================== 56 | 57 | There is variety of ways. Here are some hints... 58 | 59 | 60 | Syslog 61 | ------ 62 | 63 | You may accept logs by UDP. Since lithos has no network namespacing (yet). 64 | The UDP syslog just works. 65 | 66 | To setup syslog using unix sockets you may configure syslog daemon on the 67 | host system to listen for the socket inside the container's ``/dev``. 68 | For example, here is how to `configure rsyslog`__ for default lithos config:: 69 | 70 | module(load="imuxsock") # needs to be done just once 71 | input(type="imuxsock" Socket="/var/lib/lithos/dev/log") 72 | 73 | __ http://www.rsyslog.com/doc/v8-stable/configuration/modules/imuxsock.html 74 | 75 | Alternatively, (but *not* recommended) you may configure :opt:`devfs-dir`:: 76 | 77 | devfs-dir: /dev 78 | 79 | 80 | Stdout/Stderr 81 | ------------- 82 | 83 | It's recommended to use syslog or any similar solutions for logs. But there 84 | are still reasons to write logs to a file: 85 | 86 | 1. You may want to log early start errors (when you have not yet initialized 87 | the logging subsystem of the application) 88 | 2. If you have single server and don't want additional daemons 89 | 90 | Starting with version ``v0.5.0`` lithos has a per-sandbox log file which 91 | contains all the stdout/stderr output of the processes. By default it's in 92 | ``/var/log/lithos/stderr/.log``. See :opt:`stdio-log-dir` for 93 | more info. 94 | 95 | 96 | How to Update Configs? 97 | ====================== 98 | 99 | The best way to update config of *processes* is to put it into a temporary 100 | file and run ``lithos_switch`` (see ``lithos_switch --help`` for more info). 101 | This is a main kind config you update multiple times a day. 102 | 103 | In case you've already put config in place, or for *master* and *sandbox* 104 | config, you should first run ``lithos_check`` to check that all configs are 105 | valid. Then just send ``QUIT`` signal to the ``lithos_tree`` process. Usually 106 | the following command-line is enough for manual operation:: 107 | 108 | pkill -QUIT lithos_tree 109 | 110 | But if you for authomation it's better to use ``lithos_switch``. 111 | 112 | .. note:: note 113 | 114 | By sending ``QUIT`` signal we're effectivaly emulate crash of the supervisor 115 | daemon. It's designed in a way that allows it survive crash and keep all 116 | fresh child processes alive. After an **in-place restart** it checks 117 | configuration of child processes, kills outdated ones and executes new 118 | configs. 119 | 120 | 121 | .. _running-commands: 122 | 123 | How to Run Commands in Container? 124 | ================================= 125 | 126 | There are two common ways: 127 | 128 | 1. If you have container already running use ``nsenter`` 129 | 2. Prepare a special command for ``lithos_cmd`` 130 | 131 | 132 | Running ``nsenter`` 133 | ------------------- 134 | 135 | This way only works if you have a running container. It's hard to get work if 136 | your process crashes too fast after start. 137 | 138 | You must also have a working shell in container, we use ``/bin/sh`` 139 | in examples. 140 | 141 | You can use ``nsenter`` to join most namespaces, except user namespace. 142 | For example, if you know pid, the following command would allow you to run 143 | shell in container and investigate files:: 144 | 145 | nsenter -m -p --target 12345 /bin/sh 146 | 147 | If you don't know PID, you may easily discover it with ``lithos_ps`` or 148 | automate it with ``pgrep``:: 149 | 150 | nsenter -m -p \ 151 | --target=$(pgrep -f 'lithos_knot --name sandbox-name/process-name.0') \ 152 | /bin/sh 153 | 154 | .. warning:: This method is very insecure. It runs command in original user 155 | namespace with the host root user. While basic sandboxing (i.e. filesystem 156 | root) is enabled by `-m` and `-p`, the program that you're trying to 157 | run (i.e. the shell itself) can still escape that sandbox. 158 | 159 | Because we do mount namespaces and user namespaces in different stages of 160 | container initialization there is currently no way to join both 161 | user namespace and mount namespace. (You can join just user namespace 162 | by running ``nsenter -U --target=1235`` where 123 is the pid of the 163 | process inside the container, not lithos_knot. But this is probably useless) 164 | 165 | 166 | Running ``lithos_cmd`` 167 | ----------------------- 168 | 169 | In some cases you may want to have a special container with a shell to run 170 | with ``lithos_cmd``. This is just a normal lithos container configuration 171 | with ``kind: Command`` and ``interactive: true`` and shell being specified 172 | as a command. So you run your ``shell.yaml`` with:: 173 | 174 | lithos_cmd sandbox-name shell 175 | 176 | There are three important points about this method: 177 | 178 | 1. If you're trying to investigate problem with the daemon config you copy 179 | daemon config into this interactive command. It's your job to keep both 180 | configs in sync. This config must also be exposed in *processes* config 181 | just like any other. 182 | 183 | 2. It will run another (although identical) container on each run. You will 184 | not see processes running as daemons and other shells in ``ps`` or similar 185 | commands. 186 | 187 | 3. You must have shell in container to get use of it. Sometimes you just don't 188 | have it. But you may use any interactive interpreter, like ``python`` or 189 | even non-interactive commands. 190 | 191 | 192 | .. _find-files: 193 | 194 | How to Find Files Mounted in Container? 195 | ======================================= 196 | 197 | Linux provides many great tools to introspect running container. Here 198 | is short overview: 199 | 200 | 1. ``/proc//root`` is a directory where you can ``cd`` into and look 201 | at files 202 | 2. ``/proc//mountinfo`` is a mapping between host system directories 203 | and ones container 204 | 3. And you can :ref:`join container's namespace ` 205 | 206 | 207 | Example 1 208 | --------- 209 | 210 | Let's try to explore some common tasks. First, let's find container's pid:: 211 | 212 | $ pgrep -f 'lithos_name --name sandbox-name/process-name.0' 213 | 12345 214 | 215 | Now we can find out the OS release used to build container:: 216 | 217 | $ sudo cat /proc/12345/root/etc/alpine-release 218 | 3.4.6 219 | 220 | .. warning:: There is a caveat. Symlinks that point to paths starting with 221 | root are resolved differently that in container. So ensure that you're 222 | not accessing a symlink (and that any intermediate components is not 223 | a symlink). 224 | 225 | 226 | Example 2 227 | --------- 228 | 229 | Now, let's find out which volume is mounted as ``/app/data`` inside the 230 | container. 231 | 232 | If you have quire recent ``findmnt`` it's easy:: 233 | 234 | $ findmnt -N 12345 /app/data 235 | TARGET SOURCE FSTYPE OPTIONS 236 | /app/data /dev/mapper/Disk-main[/all-storages/myproject] ext4 rw,noatime,discard,data=ordered 237 | 238 | Here we can see that ``/app/data`` in container is a LVM partition ``main`` 239 | in group ``Disk`` with the path ``all-storages/myproject`` relative to 240 | the root of the partition. You can find out where this volume is mounted on 241 | host system by inspecting the output of ``mount`` or ``findmnt`` commands. 242 | 243 | Manual way is to look at ``/proc//mountinfo`` (stripped output):: 244 | 245 | 246 | $ cat /proc/12345/mountinfo 247 | 347 107 9:1 /all-images/sandbox-name/myproject.c17cb162 / ro,relatime - ext4 /dev/md1 rw,data=ordered 248 | 356 347 0:267 / /tmp rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=102400k 249 | 360 347 9:1 /all-storages/myproject /app/data rw,relatime - ext4 /dev/mapper/Disk-main rw,data=ordered 250 | 251 | Here you can observe same info. Important parts are: 252 | 253 | * Fifth column is the mountpoint (but be careful in complex cases there might 254 | be multiple overlapping mount points); 255 | * Fourth column is the path relative to the volume root; 256 | * And, 9th column (next to the last) is the volume name. 257 | 258 | Let's find out where it is on host system:: 259 | 260 | $ mount | grep Disk-main 261 | /dev/mapper/Disk-main on /srv type ext4 (rw,noatime,discard,data=ordered) 262 | 263 | That's it, now you can look at ``/srv/all-storages/myproject`` to find files 264 | seen by an application. 265 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. Lithos documentation master file, created by 2 | sphinx-quickstart on Wed Jan 21 13:42:37 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Lithos's documentation! 7 | ================================== 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 1 13 | 14 | config 15 | master_config 16 | sandbox_config 17 | process_config 18 | container_config 19 | metrics 20 | volumes 21 | tips/index 22 | faq 23 | changelog 24 | 25 | 26 | 27 | Indices and tables 28 | ================== 29 | 30 | * :ref:`genindex` 31 | 32 | -------------------------------------------------------------------------------- /docs/lithosdomain.py: -------------------------------------------------------------------------------- 1 | from sphinxcontrib.domaintools import custom_domain 2 | 3 | def setup(app): 4 | app.add_domain(custom_domain('LithosOptions', 5 | name = 'lithos', 6 | label = "Lithos Yaml Options", 7 | 8 | elements = dict( 9 | opt = dict( 10 | objname = "Yaml Option", 11 | indextemplate = "pair: %s; Option", 12 | ), 13 | popt = dict( 14 | objname = "Process Config Option", 15 | indextemplate = "pair: %s; Process Config Option", 16 | ), 17 | bopt = dict( 18 | objname = "Bridge Setup Option", 19 | indextemplate = "pair: %s; Bridge Setup Option", 20 | ), 21 | volume = dict( 22 | objname = "Volume Type", 23 | indextemplate = "pair: %s; Volume Type", 24 | ), 25 | ))) 26 | -------------------------------------------------------------------------------- /docs/master_config.rst: -------------------------------------------------------------------------------- 1 | .. _master_config: 2 | 3 | ============= 4 | Master Config 5 | ============= 6 | 7 | 8 | Master configuration file is the one that usually at 9 | ``/etc/lithos/master.yaml`` and defines small subset of global configuration 10 | parameters. Minimal configuration is an *empty file* but it **must exist** 11 | anyway. Here is the reference of the parameters along with the default values: 12 | 13 | See :ref:`overview ` for guidelines. 14 | 15 | .. opt:: sandboxes-dir 16 | 17 | The directory for per-application configuration files which contain limits 18 | of what application might use. If path is relative it's relative to 19 | the directory where configuration file is. Default is ``./sandboxes``. 20 | 21 | .. opt:: processes-dir 22 | 23 | The directory for per-application configuration files which contain name of 24 | image directory, instance number, etc., to run. If path is relative it's 25 | relative to the directory where configuration file is. Default is 26 | ``./processes``. 27 | 28 | .. opt:: runtime-dir 29 | 30 | The directory where ``pid`` file of master process is stored and also 31 | the base directory for ``state-dir`` and ``mount-dir``. Path must be 32 | absolute. It's expected to be stored on ``tmpfs``. Default 33 | ``/run/lithos``. 34 | 35 | .. opt:: state-dir 36 | 37 | The directory where to keep container's state dirs. If path is relative 38 | it's relative to ``runtime-dir``. Default ``state`` 39 | (i.e. ``/run/lithos/state``). Path should be on ``tmpfs``. 40 | 41 | .. opt:: mount-dir 42 | 43 | An empty directory to use for mounting. If path is relative it's relative 44 | to ``runtime-dir``. Default ``mnt``. 45 | 46 | .. opt:: devfs-dir 47 | 48 | The directory where ``/dev`` filesystem for container exists. If it's 49 | not ``/dev`` (which is not recommended), you should create the directory 50 | with ``lithos_mkdev`` script. Default ``/var/lib/lithos/dev``. 51 | 52 | .. opt:: cgroup-name 53 | 54 | The name of the root cgroup for all lithos processes. Specify ``null`` (or 55 | any other form of YAMLy null) to turn cgroups off completely. 56 | 57 | .. opt:: cgroup-controllers 58 | 59 | List of cgroup controllers to initialize for each container. Note: the 60 | empty list is treated as default. Default is 61 | ``[name, cpu, cpuacct, memory, blkio]``. If you have some controllers 62 | joined together like ``cpu,cpuacct`` it's ok. 63 | 64 | Use ``cgroup-name: null`` to turn cgroup tracking off (not empty list 65 | here). And use ``cgroup-controllers: [name]`` to only use cgroups for 66 | naming processes but not for resource control. 67 | 68 | .. note:: turning off cgroups means that resource limits does not work 69 | completely. lithos will not try to enforce them by polling or some 70 | other means 71 | 72 | .. opt:: default-log-dir 73 | 74 | (default ``/var/log/lithos``) The directory where master and each of the 75 | application logs are created (unless are overrided by sandbox config). 76 | 77 | .. opt:: config-log-dir 78 | 79 | (default ``/var/log/lithos/config``) The directory where configurations of 80 | the processes are stored. These are used by ``lithos_clean`` to find out 81 | when it's safe to clean directories. You may also reconstruct 82 | processes configuration at any point in time using this directory. 83 | 84 | .. versionchanged:: 0.10.2 85 | 86 | Parameter can be ``null``: 87 | 88 | .. code-block:: yaml 89 | 90 | config-log-dir: null 91 | 92 | In this case no configuration logging is done. This is mainly useful if 93 | you track configurations and versions by some other means. 94 | 95 | .. note:: This is enabled by default for backwards-compatibility reasons. 96 | We consider resetting this value to ``null`` by default 97 | in ``lithos 1.0`` as this parameter is not as useful as were expected. 98 | 99 | 100 | .. opt:: stdio-log-dir 101 | 102 | (default ``/var/log/lithos/stderr``) The directory where stderr of the 103 | processes will be forwarded. One file per sandbox is created. 104 | 105 | These files are created by lithos and file descriptor is passed to the 106 | application as both the stdout and stderr. Lithos does not parse, copy or 107 | otherwise proxy the data. The operating system does all the work. This also 108 | means lithos can't rotate or do any other magical things with the log. 109 | 110 | This should be used only to tackle the critical errors. Application should 111 | send log to a syslog or write some rotating log files on it's own, because 112 | there is no good tools to groups lines of the stderr into solid log messages 113 | that include tracebacks and other fancy stuff. 114 | 115 | Good utilities to manage the files: 116 | 117 | * ``logrotate`` in ``copytruncate`` mode 118 | * ``rsyslog`` with file input plugin 119 | 120 | This can be overridden in process by :opt:`stdout-stderr-file`. 121 | 122 | .. note:: The path is reopened on process restart. 123 | If :opt:`restart-process-only` is `true` then it's only reopened when 124 | configuration changes. This is good to know if you remove or rename 125 | the file by hand. 126 | 127 | .. opt:: log-file 128 | 129 | (default ``master.log``) Master log file. Relative paths are treated from 130 | :opt:`default-log-dir`. 131 | 132 | .. opt:: log-level 133 | 134 | (default ``warn``) Level of logging. Can be overriden on the command line. 135 | 136 | .. opt:: syslog-facility 137 | 138 | (no default) Enables logging to syslog (with specified facility) instead of 139 | file. 140 | 141 | .. opt:: syslog-name 142 | 143 | (default ``lithos``) Application name for master process in syslog. The 144 | child processes are prefixed by this value. For example ``lithos-django`` 145 | (where ``django`` is a sandbox name). 146 | -------------------------------------------------------------------------------- /docs/metrics.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | Metrics 3 | ======= 4 | 5 | Lithos submits metrics via a `cantal-compatible protocol`_. 6 | 7 | All metrics usually belong to lithos's cgroup, so for example in graphite 8 | you can find them under ``cantal...lithos.groups.*``. 9 | Or you cand find them without this prefix in 10 | ``http://hostname:22682/local/process_metrics`` without a prefix. 11 | 12 | In the following description we skip the common prefix and only show metric 13 | names. 14 | 15 | Metrics of lithos master process: 16 | 17 | * ``master.restarts`` (counter) amount of restarts of a master process. 18 | Usually restart equals to configuration reload via ``lithos_switch`` or any 19 | other way. 20 | * ``master.sandboxes`` (gauge) number of sandboxes configured 21 | * ``master.containers`` (gauge) number of containers (processes) conigured 22 | * ``master.queue`` (gauge) length of the internal queue, the queue consists of 23 | processes to run and hanging processes to kill 24 | 25 | Per-process metrics: 26 | 27 | * ``processes...started`` -- (counter) number of 28 | times process have been started 29 | * ``processes...deaths`` -- (counter) number of 30 | times process have exited for any reason 31 | * ``processes...failures`` -- (counter) number of 32 | times process have exited for failure reason, for whatever reason lithos 33 | thinks it was failure. See `Determining Failure`_ 34 | * ``processes...running`` -- (gauge) number of 35 | procesess that are currently running (was started but not yet found to be 36 | exited) 37 | 38 | 39 | Global metrics for all sandboxes and containers: 40 | 41 | * ``containers.started`` -- (counter) same as for ``processes.*`` but for all 42 | containers 43 | * ``containers.deaths`` -- (counter) see above 44 | * ``containers.failures`` -- (counter) see above 45 | * ``containers.running`` -- (gauge) see above 46 | * ``containers.unknown`` -- (gauge) number of child processes of lithos that 47 | are found to be running but do not belong to any of the process groups known 48 | to lithos (they are being killed, and they are probably from deleted configs) 49 | 50 | .. _cantal-compatible protocol: http://cantal.readthedocs.io/en/latest/mmap.html 51 | 52 | .. _failures: 53 | 54 | Determining Failure 55 | =================== 56 | 57 | Currently there are two kinds of process death that are considered non-failures: 58 | 59 | 1. Processes that had been sent ``SIGTERM`` signal to (with any exit status) 60 | or ones dead on ``SIGTERM`` signal are considered non-failed. 61 | 2. Processes exited with one of the exit codes specified in 62 | :opt:`normal-exit-codes` 63 | -------------------------------------------------------------------------------- /docs/process_config.rst: -------------------------------------------------------------------------------- 1 | .. highlight:: yaml 2 | 3 | .. _process_config: 4 | 5 | ============== 6 | Process Config 7 | ============== 8 | 9 | 10 | This config resides in ``/etc/lithos/processes/NAME.yaml`` (by default). 11 | Where ``NAME`` is the name of a sandbox. 12 | 13 | It mainly contains three things: 14 | 15 | * ``image`` the process is run from 16 | * ``config`` file name inside the image that specifies command-line and other 17 | process execution parameters 18 | * number ``instances`` of the process to run 19 | 20 | For example:: 21 | 22 | django: 23 | image: django.v3.5.7 24 | config: /config/worker_process.yaml 25 | instances: 3 26 | 27 | redis: 28 | image: redix.v1 29 | config: /config/redis.yaml 30 | instances: 1 31 | 32 | This will start three python ``django`` worker processes and one redis. 33 | 34 | .. hint:: Usually this config is generated by some tool like ansible_ or 35 | confd_. 36 | 37 | There is also a way to create **ad-hoc** commands. For example:: 38 | 39 | manage: 40 | kind: Command 41 | image: django.v3.5.7 42 | config: /config/manage_py.yaml 43 | 44 | This will allow to start a ``manage.py`` command with:: 45 | 46 | $ lithos_cmd SANDBOX_NAME manage syncdb 47 | 48 | This runs command in the same sandbox like the worker process itself but 49 | the command is actually attached to current shell. The commands may be freely 50 | mixed with ``Daemon`` items (which is default ``kind``) in same config. The 51 | only limitation is that names must not be duplicated 52 | 53 | The ``Command`` is occasionally useful, but should be used with care. To start 54 | a command you need root privileges on host system, so it's only useful for 55 | SysOp tasks or **may be** for cron tasks but not for normal operation of 56 | application. 57 | 58 | Options 59 | ======= 60 | 61 | .. popt:: instances 62 | 63 | Number of instances to run 64 | 65 | .. popt:: image 66 | 67 | Identifier of the image to run container from 68 | 69 | .. popt:: config 70 | 71 | Configuration file name (absolute name in container) to run 72 | 73 | .. popt:: ip-addresses 74 | 75 | A list of ip addresses if :opt:`bridged-network` is enforced in sandbox. 76 | Note the number of items in this list must match :popt:`instances` value. 77 | 78 | .. popt:: variables 79 | 80 | A mapping of `variable: value` for variables that can be used in process 81 | config. 82 | 83 | .. popt:: extra-secrets-namespaces 84 | 85 | Additional secrets namespaces allowed for this specific project. In 86 | addition to :opt:`secrets-namespaces`. See :ref:`encrypted-vars` for 87 | more info. 88 | 89 | .. _process_variables: 90 | 91 | Variables 92 | ========= 93 | 94 | You can also add variables for specific config: 95 | 96 | For example:: 97 | 98 | django: 99 | image: django.v3.5.7 100 | config: /config/worker_process.yaml 101 | variables: 102 | tcp_port: 10001 103 | instances: 3 104 | 105 | Only variables that are **declared** in :ref:`container config 106 | ` can be substituted. Extra variables are ignored. If 107 | there is a declared variable but it's not present in process config, it doesn't 108 | pass configuration check. 109 | 110 | .. _ansible: http://www.ansible.com/ 111 | .. _confd: https://github.com/kelseyhightower/confd 112 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinxcontrib-domaintools==0.1 2 | sphinx_rtd_theme 3 | -------------------------------------------------------------------------------- /docs/sandbox_config.rst: -------------------------------------------------------------------------------- 1 | .. _sandbox_config: 2 | 3 | ============== 4 | Sandbox Config 5 | ============== 6 | 7 | 8 | This config resides in ``/etc/lithos/sandboxes/NAME.yaml`` (by default). 9 | Where ``NAME`` is the name of a sandbox. 10 | 11 | The configuration file contains security and resource limits for the container. 12 | Including: 13 | 14 | * A directory where image resides 15 | * Set of directories that are mounted inside the container (i.e. all writable 16 | directories for the container, the ``/tmp``...) 17 | * User and group limits 18 | 19 | See :ref:`overview ` for guidelines. 20 | 21 | 22 | Reference 23 | ========= 24 | 25 | 26 | .. opt:: config-file 27 | 28 | The path for the :ref:`processes config`. In most cases 29 | should be left unset. Default is ``null`` which is results into 30 | ``/etc/lithos/processes/NAME.yaml`` with all other settings being defaults. 31 | 32 | .. opt:: image-dir 33 | 34 | Directory where application images are. Every subdir of the ``image-dir`` 35 | may be mounted as a root file system in the container. **Required**. 36 | 37 | .. opt:: image-dir-levels 38 | 39 | (default ``1``) A number of directory components required for image name 40 | in :opt:`image-dir` 41 | 42 | .. versionadded: 0.12.1 43 | 44 | .. opt:: log-file 45 | 46 | The file name where to put **supervisor** log of the container. Default is 47 | ``/var/log/lithos/SANDBOX_NAME.yaml``. 48 | 49 | .. opt:: log-level 50 | 51 | (default ``warn``). The logging level of the supervisor. 52 | 53 | .. opt:: readonly-paths 54 | 55 | The mapping of ``virtual_directory: host_system_directory`` of folders which 56 | are visible for the container in read-only mode. (Note currently if you 57 | have submounts in the source directory, thay may be available as writeable). 58 | See :ref:`Volumes` for more details. 59 | 60 | .. opt:: writable-paths 61 | 62 | The mapping of ``virtual_directory: host_system_directory`` of folders which 63 | are visible for the container in writable mode. 64 | See :ref:`Volumes` for more details. 65 | 66 | .. opt:: allow-users 67 | 68 | List of ranges of user ids which can be used by container. For containers 69 | without user namespaces, it's just a limit of the ``user-id`` setting. 70 | 71 | Example:: 72 | 73 | allow-users: [1, 99, 1000-2000] 74 | 75 | For containers which have uid maps enabled **in sandbox** this is a list of 76 | users available *after* uid mapping applied. For example, the following 77 | maps uid 100000 as root in namespace (e.g. for file permissions), 78 | but doesn't allow to start process as root (even if it's 100000 ouside):: 79 | 80 | uid-map: [{outside: 100000, inside: 0, count: 65536}] 81 | allow-users: [1-65535] 82 | 83 | For containers which do have uid maps enabled **in container config**, 84 | it limits all the user ids available to the namespace (i.e. for the 85 | outside setting of the uid map). 86 | 87 | .. opt:: default-user 88 | 89 | (no default) A user id used in the container if no ``user-id`` is specified 90 | in container config. By default ``user-id`` is required. 91 | 92 | Note: ``default-user`` value must be contained in the ``allow-users`` range 93 | 94 | .. versionadded: v0.15.3 95 | 96 | .. opt:: allow-groups 97 | 98 | List of ranges of group ids for the container. 99 | Works similarly to :opt:`allow-users`. 100 | 101 | .. opt:: default-group 102 | 103 | (default ``0``) A group id used in the container if no ``group-id`` 104 | is specified in container config. 105 | 106 | Note: ``default-group`` value must be contained in the ``allow-users`` range 107 | 108 | .. versionadded: v0.15.3 109 | 110 | In previous versions default group was always zero. 111 | 112 | .. opt:: allow-tcp-ports 113 | 114 | List of ranges of allowed TCP ports for container. This is currently not 115 | enforced in any way except: 116 | 117 | 1. Ports < 1024 are restricted by OS for non-root (but may be allowed here) 118 | 2. It restricts :opt:`bind-port` setting in container config 119 | 120 | .. note:: if you have overlapping TCP port for different sandboxes, only 121 | single file descriptor will be used for each port. The config for 122 | opening port will be used arbitrary from single config amonst all users, 123 | which have obvious security implications. 124 | 125 | .. warning:: :opt:`tcp-ports` bind at port in **host namespace**, i.e. it 126 | effectively discards :opt:`bridged-network` for that port this is both 127 | the feature and might be a pitfall. So most of the time you should avoid 128 | non-empty :opt:`allow-tcp-ports` if using `bridged-network`. 129 | 130 | .. opt:: additional-hosts 131 | 132 | Mapping of ``hostname: ip`` for names that will be added to ``/etc/hosts`` 133 | file. This is occasinally used for cheap but static service discovery. 134 | 135 | .. opt:: uid-map, gid-map 136 | 137 | The list of mapping for uids(gids) in the user namespace of the container. 138 | If they are not specified the user namespace is not used. This setting 139 | allows to run processes with ``uid`` zero without the risk of being 140 | the ``root`` on host system. 141 | 142 | Here is a example of maps:: 143 | 144 | uid-map: 145 | - {inside: 0, outside: 1000, count: 1} 146 | - {inside: 1, outside: 1, count: 1} 147 | gid-map: 148 | - {inside: 0, outside: 100, count: 1} 149 | 150 | .. note:: Currently you may have uid-map either in a sandbox or in a 151 | container config, not both. 152 | 153 | .. opt:: used-images-list 154 | 155 | (optional) A text file that is used by ``lithos_clean`` to keep images 156 | alive. It's not used by any other means except ``lithos_clean`` utility. 157 | 158 | Each line of the file should contain image name relative to the 159 | ``image_dir``. 160 | 161 | It's expected that the list is kept up by some orchestration system or 162 | by deployment scripts or by any other tool meaningful for ops team. 163 | 164 | This setting is only useful if ``auto-clean`` is ``true`` (default) 165 | 166 | .. opt:: auto-clean 167 | 168 | (default ``true``) Clean images of this sandbox when running 169 | ``lithos_clean``. This is a subject of the following caveats: 170 | 171 | 1. Lithos clean is not run by lithos automatically, you ought to run it 172 | using cron tab 173 | 2. If same ``image-dir`` is used for multiple sandboxes it will be cleaned 174 | if at least one of them has non-falsy ``auto-clean``. 175 | 176 | .. opt:: resolv-conf 177 | 178 | (default ``/etc/resolv.conf``) default place to copy ``resolv.conf`` from 179 | for containers. 180 | 181 | Note: Container itself can override it's own resolv.conf file, but can't 182 | read original ``/etc/resolv.conf`` if this setting is changed. 183 | 184 | .. opt:: hosts-file 185 | 186 | (default ``/etc/hosts``) default place to copy ``hosts`` from 187 | for containers. 188 | 189 | Note: Container itself can override it's own ``hosts`` file, but can't 190 | read original ``/etc/hosts`` if this setting is changed. 191 | 192 | .. opt:: bridged-network 193 | 194 | (default is absent) a network bridge configuration for all the cotainers in 195 | the bridge 196 | 197 | Example: 198 | 199 | .. code-block:: yaml 200 | 201 | bridged-network: 202 | bridge: br0 203 | network: 10.0.0.0/24 204 | default_gateway: 10.0.0.1 205 | after-setup-command: [/usr/bin/arping, -U, -c1, '@{container_ip}'] 206 | 207 | .. note:: when bridged network is active your :ref:`process_config` should 208 | contain a list of ip addresses one for each container. 209 | 210 | .. note:: this setting does not affect ``tcp-ports``. So usually you should 211 | keep :opt:`allow-tcp-ports` setting empty when using bridged network. 212 | 213 | .. versionchanged: 0.18.0 214 | 215 | Previously lithos always called `/usr/bin/arping` now it doesn't but 216 | the example of `after-setup-command` shown above does exactly same thing. 217 | 218 | Options: 219 | 220 | .. bopt:: after-setup-command 221 | 222 | Command to run after setting up container namespace but before running 223 | actual container. The example shown above sends unsolicited arp packet 224 | to notify router and other machines on the network that MAC address 225 | corresponding to container's IP is changed. 226 | 227 | Command must have absolute path, and has almost empty environment, so 228 | don't assume ``PATH`` is there if you're writing a script. Command runs 229 | in *container's network* namespace but with all other namespaces in host 230 | system (in particular in *host filesystem* and with permissions of root 231 | in host system) 232 | 233 | Replacement variables that work in command-line: 234 | 235 | * ``@{container_ip}`` -- replaced with IP address of a container being 236 | set up 237 | 238 | Few examples: 239 | 240 | 1. ``[/usr/bin/arping, -U, -c1, '@{container_ip}']`` -- default 241 | in v0.17.x. This notifies other peers that MAC address for 242 | this IP changed. 243 | 2. ``[/usr/bin/arping, -c1, '10.0.0.1']`` -- other way to do that, that 244 | often does the same as in (1) a side-effect 245 | (where 10.0.0.1 is a default gateway) 246 | 3. ``[/usr/bin/ping, -c1, '10.0.0.1']`` -- doing same as (2) but using 247 | ICMP instead of ARP directly 248 | 249 | Most of the time containers should work with empty 250 | ``after-setup-command``, but because container gets new MAC address each 251 | time it starts, there might be a small delay (~ 5 sec) after container's 252 | start where packets going to that IP are lost (so it appears that host 253 | is unavailable). 254 | 255 | .. version-added: v0.18.0 256 | 257 | 258 | .. opt:: secrets-private-key 259 | 260 | (default is absent) Use the specified private key(s) to decode secrets 261 | in container's :opt:`secret-environ` setting. 262 | 263 | The key in this file is openssh-compatible ed25519 private key 264 | (RSA keys are *not* supported). File can contain multiple keys 265 | (concatenated), if secret matches any of them it will be decoded. 266 | 267 | To create a key use normal ``ssh-keygen`` and leave the password empty 268 | (password-protected keys aren't supported):: 269 | 270 | ssh-keygen -t ed25519 -t /etc/lithos/keys/secret.key 271 | 272 | Note: the key must be owned by root with permissions of 0600 (default for 273 | ssh-keygen). 274 | 275 | .. opt:: secrets-namespaces 276 | 277 | (default is `[""]`) allow only secrets with listed namespaces. 278 | Useful only if ``secrets-private-key`` is set. 279 | 280 | For example: 281 | 282 | .. code-block:: yaml 283 | 284 | secrets-namespaces: 285 | - project1.web 286 | - project1.celery 287 | 288 | The idea is you might want to use single secret private key for a whole 289 | cluster. But diferent services having different "namespaces". This means 290 | you can use single public key for encyption and specify different 291 | namespace for each service. With this setup user can't just copy a 292 | key from one service to another if that another service isn't authorized 293 | to read the namespace using :opt:`secrets-namespaces`. 294 | 295 | To encrypt secret for a specific namespace use:: 296 | 297 | lithos_crypt encrypt -k key.pub -d "secret" -n "project1.web" 298 | 299 | By default both ``lithos_crypt`` and :opt:`secrets-namespaces` specify 300 | empty string as a namespace. This is good enough if you don't have 301 | multiple teams sharing the same cluster. 302 | 303 | Currently namespaces are limited to a regexp ``^[a-zA-Z0-9_.-]*$`` 304 | 305 | See :ref:`encrypted-vars` for more info. 306 | 307 | -------------------------------------------------------------------------------- /docs/tips/index.rst: -------------------------------------------------------------------------------- 1 | Tips and Conventions 2 | ==================== 3 | 4 | This documents describes how to prepare images to run by lithos. You don't have 5 | to obey all the rules. And you are free to create your own rules within the 6 | organization. But hopefully this will help you a lot when you're confused. 7 | 8 | Contents: 9 | 10 | .. toctree:: 11 | :maxdepth: 1 12 | 13 | tcp-ports 14 | vagga 15 | secrets 16 | -------------------------------------------------------------------------------- /docs/tips/secrets.rst: -------------------------------------------------------------------------------- 1 | =============== 2 | Storing Secrets 3 | =============== 4 | 5 | There are currently two ways to provide "secrets" for containers: 6 | 7 | 1. Encrypted values inserted into environment variable 8 | 2. Mount a directory from the host system 9 | 10 | .. contents:: :local: 11 | 12 | .. _encrypted-vars: 13 | 14 | Encrypted Variables 15 | =================== 16 | 17 | Guide 18 | ----- 19 | 20 | Note: this guide covers both server setup and configuring specific containers. 21 | Usually setup (steps 1-3) is done once. And adding keys to a container 22 | (steps 4-5) is more regular job. 23 | 24 | 1. Create a key private key on the server:: 25 | 26 | ssh-keygen -f /etc/lithos/keys/main.key -t ed25519 -P "" 27 | 28 | You can create a shared key or a per-project key. Depending on your 29 | convenience. Synchronize the key accross all the servers in the same cluster. 30 | This key should **never leave** that set of servers. 31 | 32 | 2. Add the reference to the key into your :ref:`sandbox_config` 33 | (e.g. ``/etc/lithos/sandboxes/myapp.yaml``): 34 | 35 | .. code-block:: yaml 36 | 37 | secrets-private-key: /etc/lithos/keys/main.key 38 | secrets-namespaces: [myapp] 39 | 40 | You can omit ``secrets-namespaces`` if you're sole owner of this 41 | server/cluster (it allows only empty string as a namespace). You can also 42 | make per-process namespaces (:popt:`extra-secrets-namespaces`). 43 | 44 | 3. Publish your public key ``/etc/lithos/keys/main.key.pub`` for your users. 45 | *(Cryptography guarantees that even if this key is shared publically, i.e. 46 | commited into a git repo, or accessible over non-authorized web URL system 47 | is safe)* 48 | 49 | 4. Your users may now fetch the public key and encrypt their secrets with 50 | ``lithos_crypt`` (get static binary on `releases page`_): 51 | 52 | .. code-block:: console 53 | 54 | $ lithos_crypt encrypt -k main.key.pub -n myapp -d the_secret 55 | v2:ROit92I5:KqWSX0BY:8MtOoWUX:nHcVCIWZG2hivi0rKa8MRnAIbt7TDTHB8YC8bBnac3IGMzk57R/HsBhxeqCdC7Ljyf8pszBBjIGD33f6lwBM7Q== 56 | 57 | The important thing here is to encrypt with the right key **and** 58 | the right namespace. 59 | 60 | 5. Then put a secret into your :ref:`container_config`: 61 | 62 | .. code-block:: yaml 63 | 64 | executable: /usr/bin/python3 65 | environ: 66 | DATABASE_URL: postgresql://myappuser@db.example.com/myappdb 67 | secret-environ: 68 | DATABASE_PASSWORD: v2:ROit92I5:KqWSX0BY:8MtOoWUX:nHcVCIWZG2hivi0rKa8MRnAIbt7TDTHB8YC8bBnac3IGMzk57R/HsBhxeqCdC7Ljyf8pszBBjIGD33f6lwBM7Q== 69 | 70 | That's it. To add a new password to the same or another container repeat 71 | steps 4-5. 72 | 73 | This scheme is specifically designed to be safe to store in a (public) git 74 | repository by using secure encryption. 75 | 76 | .. _releases page: https://github.com/tailhook/lithos/releases 77 | 78 | 79 | .. _key-structure: 80 | 81 | Ananomy of the Encrypted Key 82 | ---------------------------- 83 | 84 | As you might see there is a pattern in an encrypted key. Here is how it 85 | looks like:: 86 | 87 | v2:ROit92I5:KqWSX0BY:8MtOoWUX:nHcVCIWZG2hivi0rKa8MRnAIbt7TDTHB8YC8bBnac3IGM‥wBM7Q== 88 | ^-- encrypted "namespace:actual_secret" 89 | ^^^^^^^^-- short hash of the password itself 90 | ^^^^^^^^-- short hash of the secrets namespace 91 | ^^^^^^^^-- short hash of the public key used for encryption 92 | ^^-- encryption version 93 | 94 | Note the following things: 95 | 96 | 1. Only version ``v2`` is supported (``v1`` was broken and dropped in 0.16.0) 97 | 98 | 2. The short hash is base64-encoded 6-bytes length blake2b hash of the value. 99 | You can check in using ``b2sum`` utility from recent version of ``coreutils``: 100 | 101 | .. code-block:: console 102 | 103 | $ echo -n "the_secret" | b2sum -l48 | xxd -r -p | base64 104 | 8MtOoWUX 105 | 106 | (Note: we need ``xxd`` because ``b2sum`` outputs hexadecimal bytes, also 107 | note ``-n`` in ``echo`` command, as it's a common mistake, without the option 108 | ``echo`` outputs newline at the end). 109 | 110 | 3. The encrypted payload contains ``:`` prefix. While we could 111 | check just the hash. Prefix allows providing better error messages. 112 | 113 | The underlying encyrption is curve25519xsalsa20poly1305 which is compatible 114 | with libnacl and libsodium. 115 | 116 | Let's see how it might be helpful, here is the list of keys: 117 | 118 | .. code-block:: text 119 | :linenos: 120 | 121 | v2:h+M9Ue9x:82HdsExJ:Gd3ocJsr:/+f4ezLfKIP/mp0xdF7H6gfdM7onHWwbGFQX+M1aB+PoCNQidKyz/1yEGrwxD+i+qBGwLVBIXRqIc5FJ6/hw26CE 122 | v2:ROit92I5:cX9ciQzf:Gd3ocJsr:LMHBRtPFpMRRrljNnkaU6Y9JyVvEukRiDs4mitnTksNGSX5xU/zADWDwEOCOtYoelbJeyDdPhM7Q1mEOSwjeyO317Q== 123 | v2:ROit92I5:82HdsExJ:Gd3ocJsr:Hp3pngQZUos5b8ioKVUx40kegM1uDsYWwsWqC1cJ1/1KmQPQQWJZe86xgl1EOIxbuLj6PUlBH8yz5qCnWp//Ofbc 124 | 125 | You can see that: 126 | 127 | 1. All of them have same secret (3rd column) 128 | 2. Second and third ones have same encryption key (1st column) 129 | 3. First and third ones have the same namespace (2nd column) 130 | 131 | This is useful for versioning and debugging problems. You can't deduce the 132 | actual password from this data anyway unless your password is very simple 133 | (dictioanry attack) or you already know it. 134 | 135 | Note: even if all three {encryption key, namespace, secret} match, the 136 | last part of data (encrypted payload) will be different each time you encode 137 | that same value. All of the outputs are equally right. 138 | 139 | 140 | Security Notes 141 | -------------- 142 | 143 | 1. Namespaces allow to divide security zones between many projects without 144 | nightmare of generating, syncing and managing secret keys per project. 145 | 2. Namespaces match exactly they aren't prefixes or any other kind of pattern 146 | 3. If you rely on ``lithos_switch`` to switch containers securely (with 147 | untrusted :ref:`process_config`), you need to use different private key 148 | per project (as otherwise ``extra-secrets-namespaces`` can be used to steal 149 | keys) 150 | -------------------------------------------------------------------------------- /docs/tips/tcp-ports.rst: -------------------------------------------------------------------------------- 1 | .. _tcp-ports-tips: 2 | 3 | ================= 4 | Handing TCP Ports 5 | ================= 6 | 7 | There are couple of reasons you want ``lithos`` to open tcp port on behalf 8 | of your application: 9 | 10 | 1. Running multiple instances of the application, each sharing the same port 11 | 2. Smooth upgrade of you app, where some of processes are running old version 12 | of software and some run new one 13 | 3. Grow and shrink number of processes without any application code to support 14 | that 15 | 4. Using port < 1024 and not starting process as root 16 | 5. Each process is in separate cgroup, so monitoring tools can have 17 | fine-grained metrics over them 18 | 19 | .. note:: 20 | 21 | While you could use ``SO_REUSE_PORT`` socket option for solving #1 it's not 22 | universally available option. 23 | 24 | Forking inside the application doesn't work as well as running each 25 | process by lithos because in the former case your memory limits apply 26 | to all the processes rather than being fine-grained. 27 | 28 | Following sections describe how to configure various software stacks and 29 | frameworks to use tcp-ports opened by lithos. 30 | 31 | It's possible to run any software that supports `systemd socket activation`_ 32 | with :opt:`tcp-ports` of lithos. With the config similar to this: 33 | 34 | .. _systemd socket activation: http://0pointer.de/blog/projects/socket-activation.html 35 | 36 | .. _tp-systemd: 37 | 38 | .. code-block:: yaml 39 | 40 | environ: 41 | LISTEN_FDS: 1 # application receives single file descriptor 42 | # ... more env vars ... 43 | tcp-ports: 44 | 8080: # port number 45 | fd: 3 # SD_LISTEN_FDS_START, first fd number systemd passes 46 | host: 0.0.0.0 47 | listen-backlog: 128 # application may change this on its own 48 | reuse-addr: true 49 | # ... other process settings ... 50 | 51 | .. _tp-asyncio: 52 | 53 | Python3 + Asyncio 54 | ================= 55 | 56 | For development purposes you probably have the code like this: 57 | 58 | .. code-block:: python 59 | 60 | async def init(app): 61 | ... 62 | handler = app.make_handler() 63 | srv = await loop.create_server(handler, host, port) 64 | 65 | To use tcp-ports you should check environment variable and pass socket 66 | if that exists: 67 | 68 | .. code-block:: python 69 | 70 | import os 71 | import socket 72 | 73 | async def init(app): 74 | ... 75 | handler = app.make_handler() 76 | if os.environ.get("LISTEN_FDS") == "1": 77 | srv = await loop.create_server(handler, 78 | sock=socket.fromfd(3, socket.AF_INET, socket.SOCK_STREAM)) 79 | else: 80 | srv = await loop.create_server(handler, host, port) 81 | 82 | This assumes you are configured ``environ`` and ``tcp-ports`` as 83 | :ref:`described above`. 84 | 85 | .. _tp-werkzeug: 86 | 87 | Python + Werkzeug (Flask) 88 | ========================== 89 | 90 | Werkzeug supports the functionality out of the box, just put configure the 91 | environment: 92 | 93 | .. code-block:: yaml 94 | 95 | environ: 96 | WERKZEUG_SERVER_FD: 3 97 | # ... more env vars ... 98 | tcp-ports: 99 | 8080: # port number 100 | fd: 3 # this corresponds to WERKZEUG_SERVER_FD 101 | host: 0.0.0.0 102 | listen-backlog: 128 # default in werkzeug 103 | reuse-addr: true 104 | # ... other process settings ... 105 | 106 | Or you can pass ``fd=3`` to ``werkzeug.serving.BaseWSGIServer``. 107 | 108 | Another hint: **do not use processes != 1**. Better use lithos's 109 | ``instances`` to control the number of processes. 110 | 111 | 112 | .. _tp-twisted: 113 | 114 | Python + Twisted 115 | ================ 116 | 117 | Old code that looks like: 118 | 119 | .. code-block:: python 120 | 121 | reactor.listenTCP(PORT, factory) 122 | 123 | You need to change into something like this: 124 | 125 | .. code-block:: python 126 | 127 | if os.environ.get("LISTEN_FD") == "1": 128 | import socket 129 | sock = socket.fromfd(3, socket.AF_INET, socket.SOCK_STREAM) 130 | sock.set_blocking(False) 131 | reactor.adoptStreamPort(sock.fileno(), AF_INET, factory) 132 | sock.close() 133 | os.close(3) 134 | else: 135 | reactor.listenTCP(PORT, factory) 136 | 137 | .. _tp-golang: 138 | 139 | Golang + net/http 140 | ================= 141 | 142 | Previous code like this: 143 | 144 | .. code-block:: go 145 | 146 | import "net/http" 147 | 148 | srv := &http.Server{ .. } 149 | if err := srv.ListenAndServe(); err != nil { 150 | log.Fatalf("Error listening") 151 | } 152 | 153 | You should wrap into something like this: 154 | 155 | .. code-block:: go 156 | 157 | import "os" 158 | import "net" 159 | import "net/http" 160 | 161 | srv := &http.Server{ .. } 162 | if os.Getenv("LISTEN_FDS") == "1" { 163 | listener, err := net.FileListener(os.NewFile(3, "fd 3")) 164 | if err != nil { 165 | log.Fatalf("Can't open fd 3") 166 | } 167 | if err := srv.Serve(listener); err != nil { 168 | log.Fatalf("Error listening on fd 3") 169 | } 170 | } else { 171 | if err := srv.ListenAndServe(); err != nil { 172 | log.Fatalf("Error listening") 173 | } 174 | } 175 | 176 | .. _tp-nodejs-express: 177 | 178 | Node.js with Express Framework 179 | ============================== 180 | 181 | Normal way to run express: 182 | 183 | .. code-block:: javascript 184 | 185 | let port = 3000 186 | app.listen(port, function() { 187 | console.log('server is listening on', this.address().port); 188 | }) 189 | 190 | Turns into the following code: 191 | 192 | .. code-block:: javascript 193 | 194 | let port = 3000; 195 | if (process.env.LISTEN_FDS && parseInt(process.env.LISTEN_FDS, 10) === 1) { 196 | port = {fd:3}; 197 | } 198 | app.listen(port, function() { 199 | console.log('server is listening on', this.address().port); 200 | }) 201 | -------------------------------------------------------------------------------- /docs/tips/vagga.rst: -------------------------------------------------------------------------------- 1 | ========================== 2 | Deploying Vagga Containers 3 | ========================== 4 | 5 | Vagga_ is a common way to develop applications for later deployment using 6 | lithos. Also vagga is a common way to prepare a container image for use with 7 | lithos. 8 | 9 | Usually vagga_ does it's best to make containers as close to production as 10 | possible. Still vagga tries to make good trade-off to make it's easier to 11 | use for development, so there are few small quircks that you may or may not 12 | notice when deploying. 13 | 14 | Here is a boring list, later sections describe some things in more detail: 15 | 16 | 1. Unsurprisingly ``/work`` directory is absent in production container. 17 | Usually this means three things: 18 | 19 | a. Your sources must be copied/installed into container (e.g. using Copy_) 20 | b. There is no current working directory, unless you specify it explicitly 21 | current directory is root ``/`` 22 | c. You can't **write** into working directory or ``/work/somewhere`` 23 | 24 | 2. All directories are read-only by default. Basic consequences are: 25 | 26 | a. There is no writable ``/tmp`` unless you specify one. This also means 27 | there is no default for temporary dir, you have to chose whether this 28 | is an in-memory :volume:`Tmpfs` or on-disk :volume:`Persistent`. 29 | b. There is no ``/dev/shm`` by default. This is just another ``tmpfs`` 30 | volume in every system nowadays, so just measure how much you need and 31 | mount a :volume:`Tmpfs`. Be aware that each container even on same 32 | machine get's it's own instance. 33 | c. We can't even overwrite ``/etc/resolv.conf`` and ``/etc/hosts``, see 34 | below. 35 | 36 | 3. There are few environment variables that vagga sets in container by default: 37 | 38 | a. ``TERM`` -- is propagated from external environment. For daemons it 39 | should never matter. For :opt:`interactive` commands it may matter. 40 | b. ``PATH`` -- in vagga is set to hard-coded value. There is no default 41 | value in lithos. If your program runs any binaries (and usually lots of 42 | them do, even if you don't expect), you want to set ``PATH``. 43 | c. Various ``*_proxy`` variables are propagated. They are almost never 44 | useful for daemons. But are written here for completeness. 45 | 46 | 47 | 4. In vagga we don't update ``/etc/resolv.conf`` and ``/etc/hosts``, but in 48 | lithos we have such mechanism. The mechanism is following: 49 | 50 | a. In container you make the symlinks 51 | ``/etc/resolv.conf -> /state/resolv.conf``, 52 | ``/etc/hosts -> /state/hosts`` 53 | b. The ``/state`` directory is mounted as :volume:`Statedir` 54 | c. Lithos automatically puts ``resolv.conf`` and ``hosts`` into statedir 55 | when container is created (respecting :opt:`resolv-conf` 56 | and :opt:`hosts-file`) 57 | d. Then files can be updated by updating files 58 | in ``/var/run/lithos/state///`` 59 | 60 | 5. Because by default neither vagga nor lithos have network isolation, some 61 | things that are accessible in the dev system may not be accessible in the 62 | server system. This includes both, services on ``localhost`` as well as 63 | in **abstract unix socket namespace**. Known examples are: 64 | 65 | a. Dbus: for example if ``DBUS_SESSION_BUS_ADDRESS`` starts with 66 | ``unix:abstract=`` 67 | b. Xorg: X Window System, the thing you configure with ``DISPLAY`` 68 | c. nscd: name service cache daemon (this thing may resolve DNS names even 69 | if TCP/IP network is absent for your container) 70 | d. systemd-resolved: listens at ``127.0.0.53:53`` as well as on **dbus** 71 | 72 | .. _vagga: http://vagga.readthedocs.io/en/latest/ 73 | .. _copy: http://vagga.readthedocs.io/en/latest/build_steps.html?highlight=Copy#step-Copy 74 | -------------------------------------------------------------------------------- /docs/volumes.rst: -------------------------------------------------------------------------------- 1 | .. _volumes: 2 | 3 | ======= 4 | Volumes 5 | ======= 6 | 7 | Volumes in lithos are just some kind of mount-points. The mount points are not 8 | created by ``lithos`` itself. So they must exist either in original image. Or 9 | on respective volume (if mount point is inside a volume). 10 | 11 | There are the following kinds of volumes: 12 | 13 | .. volume:: Readonly 14 | 15 | Example: ``!Readonly "/path/to/dir"`` 16 | 17 | A **read-only** bind mount for some dir. The directory is mounted with 18 | ``ro,nosuid,noexec,nodev`` 19 | 20 | .. volume:: Persistent 21 | 22 | Example: ``!Persistent { path: /path/to/dir, mkdir: false, mode: 0o700, user: 0, group: 0 }`` 23 | 24 | A **writeable** bind mount. The directory is mounted with 25 | ``rw,nosuid,noexec,nodev``. If you need directory to be created set 26 | ``mkdir`` to ``true``. You also probably need to customize either the user 27 | (to the one running command e.g. same as ``user-id`` of the container) or 28 | the mode (to something like ``0o1777``, i.e. sticky writable by anyone). 29 | 30 | .. volume:: Statedir 31 | 32 | Example: ``!Statedir { path: /, mode: 0o700, user: 0, group: 0 }`` 33 | 34 | Mount subdir of the container's own state directory. This directory is 35 | used to store generated ``resolv.conf`` and ``hosts`` files as well as for 36 | other kinds of small state which is dropped when container dies. If you 37 | mount something other than ``/`` you should custimize mode or an owner 38 | similarly to ``!Persistent`` volumes (except that you can't create statedir 39 | subdirectory by hand because statedir is created for each process at start) 40 | 41 | .. volume:: Tmpfs 42 | 43 | Example: ``!Tmpfs { size: 100Mi, mode: 0o766 }`` 44 | 45 | The tmpfs mount point. Currently only ``size`` and ``mode`` options 46 | supported. Note that syntax of size and mode is generic syntax for 47 | numbers for our configuration library, not the syntax supported by kernel. 48 | -------------------------------------------------------------------------------- /example_configs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | CONFIGS="${1:-py}" 4 | 5 | sudo -k 6 | echo Copying examples/"$CONFIGS" into the system 7 | echo WARNING: This Command will remove /etc/lithos from the system 8 | echo ... hopefully you run this in a virtual machine 9 | echo ... but let you think for 10 seconds 10 | 11 | for i in $(seq 10 -1 0); do echo -n "$i \r"; sleep 1; done; 12 | echo Okay proceeding... 13 | 14 | sudo rsync -av --delete-after examples/${CONFIGS}/configs/ /etc/lithos 15 | 16 | vagga _build "${CONFIGS}-example" 17 | 18 | [ -d /var/lib/lithos/images ] || sudo mkdir -p /var/lib/lithos/images 19 | 20 | case $CONFIGS in 21 | multi_level) 22 | sudo mkdir -p /var/lib/lithos/images/${CONFIGS} 23 | sudo rsync -a --delete-after \ 24 | ".vagga/${CONFIGS}-example/" \ 25 | /var/lib/lithos/images/${CONFIGS}/example 26 | ;; 27 | *) 28 | sudo rsync -a --delete-after \ 29 | ".vagga/${CONFIGS}-example/" \ 30 | /var/lib/lithos/images/${CONFIGS}-example 31 | ;; 32 | esac 33 | 34 | echo Done. 35 | echo Ensure that you have run '`vagga make`' before. 36 | echo Then run '`sudo ./target/debug/lithos_tree`' or whatever command you wish 37 | 38 | -------------------------------------------------------------------------------- /examples/multi_level/code/py.yaml: -------------------------------------------------------------------------------- 1 | kind: Command 2 | user_id: 1 3 | volumes: 4 | /tmp: !Tmpfs { size: 100Mi } 5 | memory_limit: 104857600 6 | cpu_shares: 3 7 | interactive: true 8 | executable: /usr/bin/python3 9 | arguments: [] 10 | -------------------------------------------------------------------------------- /examples/multi_level/code/sock.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import socket 3 | 4 | print("VER",sys.version) 5 | 6 | sock = socket.socket(fileno=3) 7 | while True: 8 | s, a = sock.accept() 9 | s.send(b'hello') 10 | s.close() 11 | 12 | -------------------------------------------------------------------------------- /examples/multi_level/code/socket.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import socket 3 | 4 | print("VER",sys.version) 5 | 6 | sock = socket.socket(fileno=3) 7 | while True: 8 | s, a = sock.accept() 9 | s.write(b'hello') 10 | s.close() 11 | 12 | -------------------------------------------------------------------------------- /examples/multi_level/code/socket.yaml: -------------------------------------------------------------------------------- 1 | kind: Daemon 2 | user_id: 1 3 | volumes: 4 | /tmp: !Tmpfs { size: 100Mi } 5 | memory_limit: 104857600 6 | cpu_shares: 3 7 | executable: /usr/bin/python3 8 | arguments: [/code/sock.py] 9 | tcp-ports: 10 | 77: 11 | fd: 3 12 | -------------------------------------------------------------------------------- /examples/multi_level/configs/master.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tailhook/lithos/eaf420161293864dbecd435266b73635f1ce9bb0/examples/multi_level/configs/master.yaml -------------------------------------------------------------------------------- /examples/multi_level/configs/processes/socket.yaml: -------------------------------------------------------------------------------- 1 | socket: 2 | instances: 2 3 | image: multi_level/example 4 | config: /config/socket.yaml 5 | 6 | py: 7 | kind: Command 8 | image: multi_level/example 9 | config: /config/py.yaml 10 | -------------------------------------------------------------------------------- /examples/multi_level/configs/sandboxes/socket.yaml: -------------------------------------------------------------------------------- 1 | allow-users: [1] 2 | allow-groups: [0-1] 3 | image-dir: /var/lib/lithos/images 4 | image-dir-levels: 2 5 | uid-map: 6 | - {outside: 10000, inside: 0, count: 2} 7 | gid-map: 8 | - {outside: 10000, inside: 0, count: 2} 9 | allow-tcp-ports: [77] 10 | -------------------------------------------------------------------------------- /examples/py/code/py.yaml: -------------------------------------------------------------------------------- 1 | kind: Command 2 | user_id: 1 3 | volumes: 4 | /tmp: !Tmpfs { size: 100Mi } 5 | memory_limit: 104857600 6 | cpu_shares: 3 7 | interactive: true 8 | executable: /usr/bin/python3 9 | arguments: [] 10 | -------------------------------------------------------------------------------- /examples/py/code/sock.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import socket 3 | 4 | print("VER",sys.version) 5 | 6 | sock = socket.socket(fileno=3) 7 | while True: 8 | s, a = sock.accept() 9 | s.send(b'hello') 10 | s.close() 11 | 12 | -------------------------------------------------------------------------------- /examples/py/code/socket.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import socket 3 | 4 | print("VER",sys.version) 5 | 6 | sock = socket.socket(fileno=3) 7 | while True: 8 | s, a = sock.accept() 9 | s.write(b'hello') 10 | s.close() 11 | 12 | -------------------------------------------------------------------------------- /examples/py/code/socket.yaml: -------------------------------------------------------------------------------- 1 | kind: Daemon 2 | user_id: 1 3 | volumes: 4 | /tmp: !Tmpfs { size: 100Mi } 5 | memory_limit: 104857600 6 | cpu_shares: 3 7 | executable: /usr/bin/python3 8 | arguments: [/code/sock.py] 9 | tcp-ports: 10 | 77: 11 | fd: 3 12 | -------------------------------------------------------------------------------- /examples/py/configs/master.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tailhook/lithos/eaf420161293864dbecd435266b73635f1ce9bb0/examples/py/configs/master.yaml -------------------------------------------------------------------------------- /examples/py/configs/processes/socket.yaml: -------------------------------------------------------------------------------- 1 | socket: 2 | instances: 2 3 | image: py-example 4 | config: /config/socket.yaml 5 | 6 | py: 7 | kind: Command 8 | image: py-example 9 | config: /config/py.yaml 10 | -------------------------------------------------------------------------------- /examples/py/configs/sandboxes/socket.yaml: -------------------------------------------------------------------------------- 1 | allow-users: [1] 2 | allow-groups: [0-1] 3 | image-dir: /var/lib/lithos/images 4 | uid-map: 5 | - {outside: 10000, inside: 0, count: 2} 6 | gid-map: 7 | - {outside: 10000, inside: 0, count: 2} 8 | allow-tcp-ports: [77] 9 | -------------------------------------------------------------------------------- /examples/py_bridged/code/py.yaml: -------------------------------------------------------------------------------- 1 | kind: Command 2 | user_id: 1 3 | volumes: 4 | /tmp: !Tmpfs { size: 100Mi } 5 | memory_limit: 104857600 6 | cpu_shares: 3 7 | interactive: true 8 | executable: /usr/bin/python3 9 | arguments: [] 10 | -------------------------------------------------------------------------------- /examples/py_bridged/code/sock.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import socket 3 | 4 | print("VER",sys.version) 5 | 6 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 7 | sock.bind(('0.0.0.0', 80)) 8 | sock.listen() 9 | while True: 10 | s, a = sock.accept() 11 | s.send(b'hello') 12 | s.close() 13 | 14 | -------------------------------------------------------------------------------- /examples/py_bridged/code/socket.yaml: -------------------------------------------------------------------------------- 1 | kind: Daemon 2 | user_id: 1 3 | volumes: 4 | /tmp: !Tmpfs { size: 100Mi } 5 | memory_limit: 104857600 6 | cpu_shares: 3 7 | executable: /usr/bin/python3 8 | arguments: [/code/sock.py] 9 | -------------------------------------------------------------------------------- /examples/py_bridged/configs/master.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tailhook/lithos/eaf420161293864dbecd435266b73635f1ce9bb0/examples/py_bridged/configs/master.yaml -------------------------------------------------------------------------------- /examples/py_bridged/configs/processes/socket.yaml: -------------------------------------------------------------------------------- 1 | socket: 2 | instances: 1 3 | image: py_bridged-example 4 | config: /config/socket.yaml 5 | ip-addresses: [10.41.0.2] 6 | 7 | py: 8 | kind: Command 9 | image: py_bridged-example 10 | config: /config/py.yaml 11 | ip-addresses: [10.41.0.3] 12 | -------------------------------------------------------------------------------- /examples/py_bridged/configs/sandboxes/socket.yaml: -------------------------------------------------------------------------------- 1 | allow-users: [1] 2 | allow-groups: [0-1] 3 | image-dir: /var/lib/lithos/images 4 | uid-map: 5 | - {outside: 10000, inside: 0, count: 2} 6 | gid-map: 7 | - {outside: 10000, inside: 0, count: 2} 8 | allow-tcp-ports: [77] 9 | 10 | bridged-network: 11 | bridge: br0 12 | network: 10.71.0.0/16 13 | # default_gateway: 10.71.0.1 14 | # after-setup-command: [/usr/bin/arping, -U, -c1, '@{container_ip}'] 15 | -------------------------------------------------------------------------------- /examples/py_systemd/code/py.yaml: -------------------------------------------------------------------------------- 1 | kind: Command 2 | variables: 3 | hello: !TcpPort 4 | shit: !Choice [ru, ua] 5 | user_id: 1 6 | volumes: 7 | /tmp: !Tmpfs { size: 100Mi } 8 | memory_limit: 104857600 9 | cpu_shares: 3 10 | interactive: true 11 | executable: /usr/bin/python3 12 | arguments: [] 13 | -------------------------------------------------------------------------------- /examples/py_systemd/code/sock.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import socket 4 | 5 | assert os.environ["LISTEN_FDS"] == "1" 6 | assert os.environ["LISTEN_FDNAMES"] == "input_port" 7 | assert os.environ["LISTEN_PID"] == str(os.getpid()) 8 | 9 | sock = socket.socket(fileno=3) 10 | while True: 11 | s, a = sock.accept() 12 | s.send(b'hello') 13 | s.close() 14 | 15 | -------------------------------------------------------------------------------- /examples/py_systemd/code/socket.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import socket 3 | 4 | print("VER",sys.version) 5 | 6 | sock = socket.socket(fileno=3) 7 | while True: 8 | s, a = sock.accept() 9 | s.write(b'hello') 10 | s.close() 11 | 12 | -------------------------------------------------------------------------------- /examples/py_systemd/code/socket.yaml: -------------------------------------------------------------------------------- 1 | variables: 2 | input_port: !TcpPort { activation: systemd } 3 | kind: Daemon 4 | user_id: 1 5 | volumes: 6 | /tmp: !Tmpfs { size: 100Mi } 7 | memory_limit: 104857600 8 | cpu_shares: 3 9 | executable: /usr/bin/python3 10 | arguments: [/code/sock.py] 11 | -------------------------------------------------------------------------------- /examples/py_systemd/configs/master.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tailhook/lithos/eaf420161293864dbecd435266b73635f1ce9bb0/examples/py_systemd/configs/master.yaml -------------------------------------------------------------------------------- /examples/py_systemd/configs/processes/socket.yaml: -------------------------------------------------------------------------------- 1 | socket: 2 | instances: 2 3 | image: py_systemd-example 4 | variables: 5 | input_port: 77 6 | config: /config/socket.yaml 7 | 8 | py: 9 | kind: Command 10 | image: py_systemd-example 11 | config: /config/py.yaml 12 | -------------------------------------------------------------------------------- /examples/py_systemd/configs/sandboxes/socket.yaml: -------------------------------------------------------------------------------- 1 | allow-users: [1] 2 | allow-groups: [0-1] 3 | image-dir: /var/lib/lithos/images 4 | uid-map: 5 | - {outside: 10000, inside: 0, count: 2} 6 | gid-map: 7 | - {outside: 10000, inside: 0, count: 2} 8 | allow-tcp-ports: [77] 9 | -------------------------------------------------------------------------------- /examples/py_var/code/py.yaml: -------------------------------------------------------------------------------- 1 | kind: Command 2 | variables: 3 | hello: !TcpPort 4 | shit: !Choice [ru, ua] 5 | user_id: 1 6 | volumes: 7 | /tmp: !Tmpfs { size: 100Mi } 8 | memory_limit: 104857600 9 | cpu_shares: 3 10 | interactive: true 11 | executable: /usr/bin/python3 12 | arguments: [] 13 | -------------------------------------------------------------------------------- /examples/py_var/code/sock.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import socket 3 | 4 | print("VER",sys.version) 5 | 6 | sock = socket.socket(fileno=3) 7 | while True: 8 | s, a = sock.accept() 9 | s.send(b'hello') 10 | s.close() 11 | 12 | -------------------------------------------------------------------------------- /examples/py_var/code/socket.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import socket 3 | 4 | print("VER",sys.version) 5 | 6 | sock = socket.socket(fileno=3) 7 | while True: 8 | s, a = sock.accept() 9 | s.write(b'hello') 10 | s.close() 11 | 12 | -------------------------------------------------------------------------------- /examples/py_var/code/socket.yaml: -------------------------------------------------------------------------------- 1 | variables: 2 | input_port: !TcpPort 3 | kind: Daemon 4 | user_id: 1 5 | volumes: 6 | /tmp: !Tmpfs { size: 100Mi } 7 | memory_limit: 104857600 8 | cpu_shares: 3 9 | executable: /usr/bin/python3 10 | arguments: [/code/sock.py] 11 | tcp-ports: 12 | "@{input_port}": 13 | fd: 3 14 | -------------------------------------------------------------------------------- /examples/py_var/configs/master.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tailhook/lithos/eaf420161293864dbecd435266b73635f1ce9bb0/examples/py_var/configs/master.yaml -------------------------------------------------------------------------------- /examples/py_var/configs/processes/socket.yaml: -------------------------------------------------------------------------------- 1 | socket: 2 | instances: 2 3 | image: py_var-example 4 | variables: 5 | input_port: 77 6 | config: /config/socket.yaml 7 | 8 | py: 9 | kind: Command 10 | image: py_var-example 11 | config: /config/py.yaml 12 | -------------------------------------------------------------------------------- /examples/py_var/configs/sandboxes/socket.yaml: -------------------------------------------------------------------------------- 1 | allow-users: [1] 2 | allow-groups: [0-1] 3 | image-dir: /var/lib/lithos/images 4 | uid-map: 5 | - {outside: 10000, inside: 0, count: 2} 6 | gid-map: 7 | - {outside: 10000, inside: 0, count: 2} 8 | allow-tcp-ports: [77] 9 | -------------------------------------------------------------------------------- /src/bin/lithos_cmd.rs: -------------------------------------------------------------------------------- 1 | extern crate argparse; 2 | extern crate libc; 3 | extern crate lithos; 4 | extern crate quire; 5 | extern crate regex; 6 | extern crate serde_json; 7 | extern crate unshare; 8 | #[macro_use] extern crate log; 9 | 10 | 11 | use std::env; 12 | use std::str::FromStr; 13 | use std::process::exit; 14 | use std::path::{Path, PathBuf}; 15 | use std::io::{stderr, Write}; 16 | use std::collections::BTreeMap; 17 | 18 | use argparse::{ArgumentParser, Parse, List, StoreTrue, StoreOption, Print}; 19 | use libc::getpid; 20 | use quire::{parse_config, Options}; 21 | use regex::Regex; 22 | use serde_json::to_string; 23 | use unshare::{Command, Namespace}; 24 | 25 | use lithos::setup::{clean_child, init_logging}; 26 | use lithos::master_config::{MasterConfig, create_master_dirs}; 27 | use lithos::sandbox_config::SandboxConfig; 28 | use lithos::child_config::{ChildConfig, ChildKind}; 29 | 30 | 31 | fn run(master_cfg: &Path, sandbox_name: String, 32 | command_name: String, args: Vec, 33 | log_stderr: bool, log_level: Option) 34 | -> Result<(), String> 35 | { 36 | let master: MasterConfig = try!(parse_config(&master_cfg, 37 | &MasterConfig::validator(), &Options::default()) 38 | .map_err(|e| format!("Error reading master config: {}", e))); 39 | try!(create_master_dirs(&master)); 40 | 41 | if !Regex::new(r"^[\w-]+$").unwrap().is_match(&sandbox_name) { 42 | return Err(format!("Wrong sandbox name: {}", sandbox_name)); 43 | } 44 | if !Regex::new(r"^[\w-]+$").unwrap().is_match(&command_name) { 45 | return Err(format!("Wrong command name: {}", command_name)); 46 | } 47 | 48 | let sandbox: SandboxConfig = try!(parse_config( 49 | &master_cfg.parent().unwrap() 50 | .join(&master.sandboxes_dir).join(sandbox_name.clone() + ".yaml"), 51 | &SandboxConfig::validator(), &Options::default()) 52 | .map_err(|e| format!("Error reading sandbox config: {}", e))); 53 | 54 | let log_file; 55 | if let Some(ref fname) = sandbox.log_file { 56 | log_file = master.default_log_dir.join(fname); 57 | } else { 58 | log_file = master.default_log_dir.join(format!("{}.log", sandbox_name)); 59 | } 60 | try!(init_logging(&master, &log_file, 61 | &format!("{}-{}", master.syslog_app_name, sandbox_name), 62 | log_stderr, 63 | log_level 64 | .or(sandbox.log_level 65 | .and_then(|x| FromStr::from_str(&x).ok())) 66 | .or_else(|| FromStr::from_str(&master.log_level).ok()) 67 | .unwrap_or(log::LogLevel::Warn))); 68 | 69 | let cfg = master_cfg.parent().unwrap() 70 | .join(&master.processes_dir) 71 | .join(sandbox.config_file.as_ref().unwrap_or( 72 | &PathBuf::from(&(sandbox_name.clone() + ".yaml")))); 73 | debug!("Children config {:?}", cfg); 74 | let sandbox_children: BTreeMap; 75 | sandbox_children = try!(parse_config(&cfg, 76 | &ChildConfig::mapping_validator(), &Options::default()) 77 | .map_err(|e| format!("Error reading children config: {}", e))); 78 | let child_cfg = try!(sandbox_children.get(&command_name) 79 | .ok_or(format!("Command {:?} not found", command_name))); 80 | 81 | if child_cfg.kind != ChildKind::Command { 82 | return Err(format!("The target container is: {:?}", child_cfg.kind)); 83 | } 84 | 85 | let child_cfg = child_cfg.instantiate(0) 86 | .map_err(|e| format!("can't instantiate: {}", e))?; 87 | 88 | let name = format!("{}/cmd.{}.{}", sandbox_name, 89 | command_name, unsafe { getpid() }); 90 | 91 | let mut cmd = Command::new(env::current_exe().unwrap() 92 | .parent().unwrap().join("lithos_knot")); 93 | 94 | // Name is first here, so it's easily visible in ps 95 | cmd.arg("--name"); 96 | cmd.arg(&name); 97 | 98 | cmd.arg("--master"); 99 | cmd.arg(master_cfg); 100 | cmd.arg("--config"); 101 | cmd.arg(to_string(&child_cfg).unwrap()); 102 | cmd.env_clear(); 103 | cmd.env("TERM", env::var("TERM").unwrap_or("dumb".to_string())); 104 | if let Ok(x) = env::var("RUST_LOG") { 105 | cmd.env("RUST_LOG", x); 106 | } 107 | if let Ok(x) = env::var("RUST_BACKTRACE") { 108 | cmd.env("RUST_BACKTRACE", x); 109 | } 110 | cmd.arg("--"); 111 | cmd.args(&args); 112 | cmd.unshare(&[Namespace::Mount, Namespace::Uts, 113 | Namespace::Ipc, Namespace::Pid]); 114 | 115 | info!("Running {:?}", cmd); 116 | 117 | let res = match cmd.status() { 118 | Ok(x) if x.success() => { 119 | info!("Command {:?} {}", cmd, x); 120 | Ok(()) 121 | } 122 | Ok(x) => Err(format!("Command {:?} {}", cmd, x)), 123 | Err(e) => Err(format!("Can't run {:?}: {}", cmd, e)), 124 | }; 125 | 126 | clean_child(&name, &master, false); 127 | 128 | return res; 129 | } 130 | 131 | fn main() { 132 | let mut master_config = PathBuf::from("/etc/lithos/master.yaml"); 133 | let mut command_name = "".to_string(); 134 | let mut sandbox_name = "".to_string(); 135 | let mut args = vec!(); 136 | let mut log_stderr: bool = false; 137 | let mut log_level: Option = None; 138 | { 139 | let mut ap = ArgumentParser::new(); 140 | ap.set_description("Runs single ad-hoc command"); 141 | ap.refer(&mut master_config) 142 | .add_option(&["--master"], Parse, 143 | "Name of the master configuration file \ 144 | (default /etc/lithos/master.yaml)") 145 | .metavar("FILE"); 146 | ap.refer(&mut log_stderr) 147 | .add_option(&["--log-stderr"], StoreTrue, 148 | "Print debugging info to stderr"); 149 | ap.refer(&mut log_level) 150 | .add_option(&["--log-level"], StoreOption, 151 | "Set log level (default info for now)"); 152 | ap.refer(&mut sandbox_name) 153 | .add_argument("sandbox", Parse, 154 | "Name of the sandbox to run command for") 155 | .required(); 156 | ap.refer(&mut command_name) 157 | .add_argument("name", Parse, 158 | "Name of the command to run") 159 | .required(); 160 | ap.refer(&mut args) 161 | .add_argument("argument", List, 162 | "Arguments for the command"); 163 | ap.add_option(&["--version"], 164 | Print(env!("CARGO_PKG_VERSION").to_string()), 165 | "Show version"); 166 | ap.stop_on_first_argument(true); 167 | match ap.parse_args() { 168 | Ok(()) => {} 169 | Err(x) => { 170 | exit(x); 171 | } 172 | } 173 | } 174 | match run(&master_config, sandbox_name, command_name, args, 175 | log_stderr, log_level) 176 | { 177 | Ok(()) => { 178 | exit(0); 179 | } 180 | Err(e) => { 181 | write!(&mut stderr(), "Fatal error: {}\n", e).ok(); 182 | error!("Fatal error: {}", e); 183 | exit(1); 184 | } 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /src/bin/lithos_crypt.rs: -------------------------------------------------------------------------------- 1 | extern crate base64; 2 | extern crate blake2; 3 | extern crate rand; 4 | extern crate regex; 5 | extern crate lithos; 6 | extern crate ssh_keys; 7 | extern crate sha2; 8 | extern crate crypto; 9 | #[macro_use] extern crate failure; 10 | #[macro_use] extern crate structopt; 11 | 12 | 13 | use std::fs::File; 14 | use std::io::{Read, BufReader, BufRead, Write, stdout, stderr}; 15 | use std::path::Path; 16 | use std::process::exit; 17 | 18 | use blake2::{Blake2b, digest::VariableOutput, digest::Input}; 19 | use failure::{Error, ResultExt}; 20 | use regex::Regex; 21 | use ssh_keys::{PublicKey, PrivateKey, openssh}; 22 | use structopt::StructOpt; 23 | 24 | use lithos::nacl; 25 | 26 | 27 | #[derive(Debug, StructOpt)] 28 | #[structopt(name = "lithos_crypt", 29 | about = "An utility to encrypt secrets for lithos. \ 30 | It also allows to decrypt secrets for introspection if \ 31 | you have access for private key (only for debugging)")] 32 | enum Options { 33 | #[structopt(name="encrypt")] 34 | Encrypt(EncryptOpt), 35 | #[structopt(name="decrypt")] 36 | Decrypt(DecryptOpt), 37 | #[structopt(name="check-key")] 38 | CheckKey(CheckKeyOpt), 39 | } 40 | 41 | #[derive(Debug, StructOpt)] 42 | #[structopt(about = "Encrypt secret value to put in config")] 43 | pub struct EncryptOpt { 44 | #[structopt(long="key-file", short="k", help=" 45 | A openssh-formatted ed25519 public key to use for encryption 46 | ", parse(try_from_str="parse_public_key"))] 47 | key: PublicKey, 48 | #[structopt(long="data", short="d", help="data to encrypt")] 49 | data: String, 50 | #[structopt(long="namespace", short="n", help=" 51 | secrets namespace. Only processes \"authorized\" to read this namespace 52 | will be able do decrypt the data. 53 | ", default_value="", parse(try_from_str="validate_namespace"))] 54 | namespace: String, 55 | } 56 | 57 | #[derive(Debug, StructOpt)] 58 | #[structopt(about = "Check that the secret value is encrypted \ 59 | with specified public key")] 60 | pub struct CheckKeyOpt { 61 | #[structopt(long="key-file", short="k", help=" 62 | A openssh-formatted ed25519 public key to use for encryption 63 | ", parse(try_from_str="parse_public_key"))] 64 | key: PublicKey, 65 | #[structopt(long="data", short="d", help="data to encrypt")] 66 | data: String, 67 | } 68 | 69 | #[derive(Debug, StructOpt)] 70 | #[structopt(about = "Decrypt secret value from config")] 71 | pub struct DecryptOpt { 72 | #[structopt(long="key-file", short="i", help=" 73 | A openssh-formatted ed25519 private key to use for decryption 74 | ", parse(try_from_str="parse_private_key"))] 75 | key: PrivateKey, 76 | #[structopt(long="data", short="d", help="base64-encoded data to decrypt")] 77 | data: String, 78 | } 79 | 80 | fn validate_namespace(namespace: &str) -> Result { 81 | if !Regex::new("^[a-zA-Z0-9_.-]*$").expect("valid re").is_match(namespace) { 82 | bail!("invalid namespace, \ 83 | valid on should match regex `^[a-zA-Z0-9_.-]*$`"); 84 | } 85 | Ok(namespace.to_string()) 86 | } 87 | 88 | fn parse_public_key(filename: &str) -> Result { 89 | let mut buf = String::with_capacity(1024); 90 | File::open(filename) 91 | .and_then(|f| BufReader::new(f).read_line(&mut buf)) 92 | .context(Path::new(filename).display().to_string())?; 93 | let key = openssh::parse_public_key(&buf)?; 94 | Ok(key) 95 | } 96 | 97 | fn parse_private_key(filename: &str) -> Result { 98 | let mut buf = String::with_capacity(1024); 99 | File::open(filename) 100 | .and_then(|mut f| f.read_to_string(&mut buf)) 101 | .context(Path::new(filename).display().to_string())?; 102 | let mut key = openssh::parse_private_key(&buf)?; 103 | Ok(key.pop().expect("at least one key parsed")) 104 | } 105 | 106 | fn b2_short_hash(data: &[u8]) -> String { 107 | let mut buf = [0u8; 6]; 108 | let mut hash: Blake2b = VariableOutput::new(buf.len()).expect("blake2b"); 109 | hash.process(data); 110 | hash.variable_result(&mut buf[..]).expect("blake2b"); 111 | return base64::encode(&buf[..]) 112 | } 113 | 114 | fn encrypt(e: EncryptOpt) -> Result<(), Error> { 115 | let key_bytes = match e.key { 116 | PublicKey::Ed25519(key) => key, 117 | _ => bail!("Only ed25519 keys are supported"), 118 | }; 119 | let plaintext = format!("{}:{}", e.namespace, e.data); 120 | let cypher = nacl::crypto_box_edwards_seal( 121 | plaintext.as_bytes(), &key_bytes[..]); 122 | let mut buf = Vec::with_capacity(cypher.len() + 24); 123 | buf.write(&cypher).unwrap(); 124 | let data = base64::encode(&buf); 125 | println!("v2:{}:{}:{}:{}", 126 | b2_short_hash(&key_bytes[..]), 127 | b2_short_hash(e.namespace.as_bytes()), 128 | b2_short_hash(e.data.as_bytes()), 129 | data); 130 | Ok(()) 131 | } 132 | 133 | fn check_key(o: CheckKeyOpt) -> Result<(), Error> { 134 | let key_bytes = match o.key { 135 | PublicKey::Ed25519(key) => key, 136 | _ => bail!("Only ed25519 keys are supported"), 137 | }; 138 | if !o.data.starts_with("v2:") { 139 | bail!("Only v1 secrets are supported"); 140 | } 141 | let mut it = o.data.splitn(3, ":"); 142 | it.next(); // skip version 143 | let key_hash = it.next().ok_or(format_err!("bad format of data"))?; 144 | if b2_short_hash(&key_bytes) != key_hash { 145 | bail!("key doesn't match"); 146 | } 147 | Ok(()) 148 | } 149 | 150 | fn decrypt(e: DecryptOpt) -> Result<(), Error> { 151 | let key_bytes = match e.key { 152 | PrivateKey::Ed25519(key) => key, 153 | _ => bail!("Only ed25519 keys are supported"), 154 | }; 155 | let (private_key, public_key) = key_bytes.split_at(32); 156 | if !e.data.starts_with("v2:") { 157 | bail!("Only v2 secrets are supported"); 158 | } 159 | let mut it = e.data.split(":"); 160 | it.next(); // skip version 161 | let (key_hash, ns_hash, secr_hash, cipher) = { 162 | match (it.next(), it.next(), it.next(), it.next(), it.next()) { 163 | (Some(key), Some(ns), Some(secr), Some(cipher), None) => { 164 | (key, ns, secr, base64::decode(cipher)?) 165 | } 166 | _ => bail!("invalid key format"), 167 | } 168 | }; 169 | 170 | let plain = nacl::crypto_box_edwards_seal_open( 171 | &cipher, public_key, private_key)?; 172 | let mut pair = plain.splitn(2, |&x| x == b':'); 173 | let namespace = pair.next().unwrap(); 174 | let secret = pair.next().ok_or(format_err!("decrypted data is invalid"))?; 175 | 176 | if b2_short_hash(public_key) != key_hash { 177 | bail!("invalid key hash"); 178 | } 179 | if b2_short_hash(&namespace) != ns_hash { 180 | bail!("invalid namespace hash"); 181 | } 182 | if b2_short_hash(&secret) != secr_hash { 183 | bail!("invalid secret hash"); 184 | } 185 | 186 | let mut err = stderr(); 187 | err.write_all(&namespace)?; 188 | err.write_all(b":")?; 189 | err.flush()?; 190 | let mut out = stdout(); 191 | out.write_all(&secret)?; 192 | out.flush()?; 193 | err.write_all(b"\n")?; // nicer in print 194 | Ok(()) 195 | } 196 | 197 | fn main() { 198 | use Options::*; 199 | let opt = Options::from_args(); 200 | let res = match opt { 201 | Encrypt(e) => encrypt(e), 202 | Decrypt(d) => decrypt(d), 203 | CheckKey(c) => check_key(c), 204 | }; 205 | match res { 206 | Ok(()) => { 207 | exit(0); 208 | } 209 | Err(e) => { 210 | eprintln!("{}", e); 211 | exit(1); 212 | } 213 | } 214 | } 215 | -------------------------------------------------------------------------------- /src/bin/lithos_knot/config.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | 3 | use lithos::container_config::ContainerConfig; 4 | use lithos::child_config::ChildInstance; 5 | use lithos::utils::temporary_change_root; 6 | 7 | use quire::{parse_config, Options}; 8 | 9 | 10 | pub fn container_config(root: &Path, child_cfg: &ChildInstance) 11 | -> Result 12 | { 13 | return temporary_change_root(root, || { 14 | parse_config(&child_cfg.config, 15 | &ContainerConfig::validator(), &Options::default()) 16 | .map_err(|e| e.to_string()) 17 | }); 18 | } 19 | -------------------------------------------------------------------------------- /src/bin/lithos_knot/secrets.rs: -------------------------------------------------------------------------------- 1 | use std::collections::{BTreeMap, HashSet}; 2 | use std::io::Read; 3 | use std::fs::{File}; 4 | use std::os::unix::fs::MetadataExt; 5 | use std::path::Path; 6 | use std::str::from_utf8; 7 | 8 | use base64; 9 | use blake2::{Blake2b, digest::VariableOutput, digest::Input}; 10 | use failure::{Error, ResultExt}; 11 | use quire::{parse_config, Options}; 12 | use ssh_keys::{PrivateKey, openssh}; 13 | 14 | use lithos::nacl; 15 | use lithos::sandbox_config::SandboxConfig; 16 | use lithos::child_config::ChildInstance; 17 | use lithos::container_config::{environ_validator}; 18 | 19 | 20 | fn parse_private_key(filename: &Path) -> Result, Error> { 21 | let mut buf = String::with_capacity(1024); 22 | let mut f = File::open(filename) 23 | .context(Path::new(filename).display().to_string())?; 24 | let meta = f.metadata() 25 | .context(Path::new(filename).display().to_string())?; 26 | if meta.uid() != 0 { 27 | bail!("Key must be owned by root"); 28 | } 29 | if meta.mode() & 0o777 & !0o600 != 0 { 30 | bail!("Key's mode must be 0600"); 31 | } 32 | f.read_to_string(&mut buf) 33 | .context(Path::new(filename).display().to_string())?; 34 | Ok(openssh::parse_private_key(&buf)?) 35 | } 36 | 37 | fn b2_short_hash(data: &[u8]) -> String { 38 | let mut buf = [0u8; 6]; 39 | let mut hash: Blake2b = VariableOutput::new(buf.len()).expect("blake2b"); 40 | hash.process(data); 41 | hash.variable_result(&mut buf[..]).expect("blake2b"); 42 | return base64::encode(&buf[..]) 43 | } 44 | 45 | fn decrypt(key: &PrivateKey, namespaces: &HashSet<&str>, value: &str) 46 | -> Result 47 | { 48 | let key_bytes = match *key { 49 | PrivateKey::Ed25519(key) => key, 50 | _ => bail!("Only ed25519 keys are supported"), 51 | }; 52 | let (private_key, public_key) = key_bytes.split_at(32); 53 | if !value.starts_with("v2:") { 54 | bail!("Only v2 secrets are supported"); 55 | } 56 | let mut it = value.split(":"); 57 | it.next(); // skip version 58 | let (key_hash, ns_hash, secr_hash, cipher) = { 59 | match (it.next(), it.next(), it.next(), it.next(), it.next()) { 60 | (Some(key), Some(ns), Some(secr), Some(cipher), None) => { 61 | (key, ns, secr, base64::decode(cipher)?) 62 | } 63 | _ => bail!("invalid key format"), 64 | } 65 | }; 66 | 67 | let plain = nacl::crypto_box_edwards_seal_open( 68 | &cipher, public_key, private_key)?; 69 | 70 | let mut pair = plain.splitn(2, |&x| x == b':'); 71 | let namespace = from_utf8(pair.next().unwrap()) 72 | .map_err(|_| format_err!("can't decode namespace from utf-8"))?; 73 | let secret = pair.next().ok_or(format_err!("decrypted data is invalid"))?; 74 | 75 | if b2_short_hash(public_key) != key_hash { 76 | bail!("invalid key hash"); 77 | } 78 | if b2_short_hash(namespace.as_bytes()) != ns_hash { 79 | bail!("invalid namespace hash"); 80 | } 81 | if b2_short_hash(&secret) != secr_hash { 82 | bail!("invalid secret hash"); 83 | } 84 | if !namespaces.contains(namespace) { 85 | bail!("expected namespaces {:?} got {:?}", namespaces, namespace); 86 | } 87 | if secret.contains(&0) { 88 | bail!("no null bytes allowed in secret"); 89 | } 90 | 91 | String::from_utf8(secret.to_vec()) 92 | .map_err(|_| format_err!("Can't decode secret as utf-8")) 93 | } 94 | 95 | fn decrypt_pair(keys: &[PrivateKey], namespaces: &HashSet<&str>, 96 | values: &[String]) 97 | -> Result> 98 | { 99 | let mut errs = Vec::new(); 100 | for key in keys { 101 | for value in values { 102 | match decrypt(key, namespaces, value) { 103 | Ok(value) => return Ok(value), 104 | Err(e) => errs.push(e), 105 | } 106 | } 107 | } 108 | Err(errs) 109 | } 110 | 111 | pub fn read_keys(sandbox: &SandboxConfig) 112 | -> Result, Error> 113 | { 114 | let keys = if let Some(ref filename) = sandbox.secrets_private_key { 115 | parse_private_key(&filename)? 116 | } else { 117 | bail!("No secrets key file defined to decode secrets"); 118 | }; 119 | return Ok(keys); 120 | } 121 | 122 | pub fn parse_file(path: &Path) -> Result>, String> 123 | { 124 | parse_config(&path, &environ_validator(), &Options::default()) 125 | .map_err(|e| e.to_string()) 126 | } 127 | 128 | pub fn decode(keys: &Vec, sandbox: &SandboxConfig, 129 | child_config: &ChildInstance, secrets: &BTreeMap>) 130 | -> Result, Error> 131 | { 132 | let mut all_namespaces = HashSet::new(); 133 | if sandbox.secrets_namespaces.len() == 0 { 134 | all_namespaces.insert(""); 135 | } else { 136 | all_namespaces.extend( 137 | sandbox.secrets_namespaces.iter().map(|x| &x[..])) 138 | }; 139 | all_namespaces.extend( 140 | child_config.extra_secrets_namespaces.iter().map(|x| &x[..])); 141 | 142 | let mut res = BTreeMap::new(); 143 | 144 | for (name, values) in secrets { 145 | res.insert(name.clone(), decrypt_pair(&keys, &all_namespaces, values) 146 | .map_err(|e| { 147 | format_err!("Can't decrypt secret {:?}, errors: {}", name, 148 | e.iter().map(|x| x.to_string()) 149 | .collect::>().join(", ")) 150 | })?); 151 | } 152 | 153 | Ok(res) 154 | } 155 | -------------------------------------------------------------------------------- /src/bin/lithos_ps/ascii.rs: -------------------------------------------------------------------------------- 1 | // This is a part of lithos_ps not lithos library 2 | use std::io::Error as IoError; 3 | use std::io::Write; 4 | use std::cmp::max; 5 | use std::fmt::Display; 6 | use self::Column::*; 7 | 8 | pub struct Printer { 9 | color: bool, 10 | buf: Vec, 11 | } 12 | 13 | #[derive(Clone, Copy)] 14 | pub struct PrinterFactory(bool); 15 | 16 | pub struct TreeNode { 17 | pub head: String, 18 | pub children: Vec, 19 | } 20 | 21 | pub enum Column { 22 | Text(Vec), 23 | Bytes(Vec), 24 | Ordinal(Vec), 25 | Percent(Vec), 26 | } 27 | 28 | impl PrinterFactory { 29 | pub fn new(&self) -> Printer { 30 | let PrinterFactory(color) = *self; 31 | return Printer { 32 | color: color, 33 | buf: Vec::with_capacity(100), 34 | }; 35 | } 36 | } 37 | 38 | impl Printer { 39 | pub fn color_factory() -> PrinterFactory { 40 | return PrinterFactory(true); 41 | } 42 | pub fn plain_factory() -> PrinterFactory { 43 | return PrinterFactory(false); 44 | } 45 | pub fn norm(mut self, val: T) -> Printer { 46 | if self.buf.len() > 0 { 47 | self.buf.push(b' '); 48 | } 49 | write!(&mut self.buf, "{}", val).unwrap(); 50 | return self; 51 | } 52 | pub fn red(mut self, val: T) -> Printer { 53 | if self.buf.len() > 0 { 54 | self.buf.push(b' '); 55 | } 56 | if self.color { 57 | write!(&mut self.buf, "\x1b[31m\x1b[1m{}\x1b[0m\x1b[22m", 58 | val).unwrap(); 59 | } else { 60 | write!(&mut self.buf, "{}", val).unwrap(); 61 | } 62 | return self; 63 | } 64 | pub fn blue(mut self, val: T) -> Printer { 65 | if self.buf.len() > 0 { 66 | self.buf.push(b' '); 67 | } 68 | if self.color { 69 | write!(&mut self.buf, "\x1b[34m\x1b[1m{}\x1b[0m\x1b[22m", 70 | val).unwrap(); 71 | } else { 72 | write!(&mut self.buf, "{}", val).unwrap(); 73 | } 74 | return self; 75 | } 76 | pub fn green(mut self, val: T) -> Printer { 77 | if self.buf.len() > 0 { 78 | self.buf.push(b' '); 79 | } 80 | if self.color { 81 | write!(&mut self.buf, "\x1b[32m\x1b[1m{}\x1b[0m\x1b[22m", 82 | val).unwrap(); 83 | } else { 84 | write!(&mut self.buf, "{}", val).unwrap(); 85 | } 86 | return self; 87 | } 88 | pub fn map(self, fun: F) -> Printer 89 | where F: Fn(Printer) -> Printer 90 | { 91 | fun(self) 92 | } 93 | pub fn unwrap(self) -> String { 94 | return String::from_utf8(self.buf).unwrap(); 95 | } 96 | } 97 | 98 | impl TreeNode { 99 | pub fn print(&self, writer: &mut T) -> Result<(), IoError> { 100 | try!(write!(writer, "{}\n", self.head)); 101 | self._print_children(writer, " ") 102 | } 103 | pub fn _print_children(&self, writer: &mut T, indent: &str) 104 | -> Result<(), IoError> 105 | { 106 | if self.children.len() >= 2 { 107 | let childindent = indent.to_string() + "│ "; 108 | for child in self.children[..self.children.len()-1].iter() { 109 | try!(write!(writer, "{}├─{}\n", indent, child.head)); 110 | try!(child._print_children(writer, &childindent[..])); 111 | } 112 | } 113 | if let Some(child) = self.children.last() { 114 | let childindent = indent.to_string() + " "; 115 | try!(write!(writer, "{}└─{}\n", indent, child.head)); 116 | try!(child._print_children(writer, &childindent[..])); 117 | } 118 | return Ok(()); 119 | } 120 | 121 | } 122 | 123 | pub fn render_table(columns: &[(&'static str, Column)]) { 124 | let mut out_cols = Vec::new(); 125 | for &(ref title, ref col) in columns.iter() { 126 | match *col { 127 | Bytes(ref items) => { 128 | let max = items.iter().max().map(|&x| x).unwrap_or(1); 129 | let (k, unit) = match max { 130 | 1 ... 10240 => (1f64, "B"), 131 | 10241 ... 10485760 => (1024f64, "kiB"), 132 | 10485761 ... 10737418240 => (1048576f64, "MiB"), 133 | _ => (1073741824f64, "GiB"), 134 | }; 135 | let mut values = vec!(format!("{1:>0$}", 7+unit.len(), title)); 136 | values.extend(items.iter().map( 137 | |x| format!("{:7.1}{}", (*x as f64) / k, unit))); 138 | values.reverse(); 139 | out_cols.push(values); 140 | } 141 | Text(ref items) => { 142 | let maxlen = max(3, 143 | items.iter().map(|x| x.len()).max().unwrap_or(3)); 144 | let mut values = vec!(format!("{1:<0$}", maxlen, title)); 145 | values.extend(items.iter().map( 146 | |x| format!("{1:<0$}", maxlen, *x))); 147 | values.reverse(); 148 | out_cols.push(values); 149 | } 150 | Ordinal(ref items) => { 151 | let maxlen = max(3, items.iter().map( 152 | |x| format!("{}", x).len()).max().unwrap_or(3)); 153 | let mut values = vec!(format!("{1:>0$}", maxlen, title)); 154 | values.extend(items.iter().map( 155 | |x| format!("{1:0$}", maxlen, *x))); 156 | values.reverse(); 157 | out_cols.push(values); 158 | } 159 | Percent(ref items) => { 160 | let mut values = vec!(format!("{:>5}", title)); 161 | values.extend(items.iter().map( 162 | |x| format!("{:>5.1}", *x))); 163 | values.reverse(); 164 | out_cols.push(values); 165 | } 166 | } 167 | } 168 | loop { 169 | for ref mut lst in out_cols.iter_mut() { 170 | if lst.len() == 0 { 171 | return; 172 | } 173 | print!("{} ", lst.pop().unwrap()); 174 | } 175 | println!(""); 176 | } 177 | } 178 | 179 | #[cfg(test)] 180 | mod test { 181 | use super::TreeNode; 182 | 183 | fn write_tree(node: &TreeNode) -> String { 184 | let mut buf = Vec::with_capacity(100); 185 | node.print(&mut buf).unwrap(); 186 | return String::from_utf8(buf).unwrap(); 187 | } 188 | 189 | #[test] 190 | fn test_one_node() { 191 | assert_eq!(write_tree(&TreeNode { 192 | head: "parent".to_string(), 193 | children: vec!() 194 | }), String::from("\ 195 | parent\n\ 196 | ")); 197 | } 198 | 199 | #[test] 200 | fn test_many_nodes() { 201 | assert_eq!(write_tree(&TreeNode { 202 | head: "parent".to_string(), 203 | children: vec!(TreeNode { 204 | head: "child1".to_string(), 205 | children: vec!(TreeNode { 206 | head: "subchild".to_string(), 207 | children: vec!(), 208 | }), 209 | }, TreeNode { 210 | head: "child2".to_string(), 211 | children: vec!(TreeNode { 212 | head: "subchild".to_string(), 213 | children: vec!(), 214 | }), 215 | }) 216 | }), String::from("\ 217 | parent 218 | ├─child1 219 | │ └─subchild 220 | └─child2 221 | └─subchild\n\ 222 | ")); 223 | } 224 | } 225 | -------------------------------------------------------------------------------- /src/bin/lithos_switch.rs: -------------------------------------------------------------------------------- 1 | extern crate libc; 2 | extern crate nix; 3 | extern crate env_logger; 4 | extern crate regex; 5 | extern crate argparse; 6 | extern crate quire; 7 | #[macro_use] extern crate log; 8 | extern crate lithos; 9 | 10 | 11 | use std::env; 12 | use std::io::{stderr, Read, Write}; 13 | use std::process::exit; 14 | use std::path::{Path, PathBuf}; 15 | use std::str::FromStr; 16 | use std::fs::{File}; 17 | use std::fs::{copy, rename}; 18 | use std::process::{Command, Stdio}; 19 | 20 | use argparse::{ArgumentParser, Parse, StoreTrue, Print}; 21 | use quire::{parse_config, Options}; 22 | use nix::sys::signal::{SIGQUIT, kill}; 23 | use nix::unistd::Pid; 24 | 25 | use lithos::master_config::MasterConfig; 26 | use lithos::sandbox_config::SandboxConfig; 27 | 28 | 29 | fn switch_config(master_cfg: &Path, sandbox_name: String, config_file: &Path) 30 | -> Result<(), String> 31 | { 32 | match Command::new(env::current_exe().unwrap() 33 | .parent().unwrap().join("lithos_check")) 34 | .stdin(Stdio::inherit()) 35 | .stdout(Stdio::inherit()) 36 | .stderr(Stdio::inherit()) 37 | .arg("--config") 38 | .arg(&master_cfg) 39 | .arg("--sandbox") 40 | .arg(&sandbox_name) 41 | .arg("--alternate-config") 42 | .arg(&config_file) 43 | .output() 44 | { 45 | Ok(ref po) if po.status.code() == Some(0) => { } 46 | Ok(ref po) => { 47 | return Err(format!( 48 | "Configuration check failed with exit status: {}", 49 | po.status)); 50 | } 51 | Err(e) => { 52 | return Err(format!("Can't check configuration: {}", e)); 53 | } 54 | } 55 | info!("Checked. Proceeding"); 56 | 57 | let master: MasterConfig = match parse_config(&master_cfg, 58 | &MasterConfig::validator(), &Options::default()) 59 | { 60 | Ok(cfg) => cfg, 61 | Err(e) => { 62 | return Err(format!("Can't parse master config: {}", e)); 63 | } 64 | }; 65 | let sandbox_fn = master_cfg.parent().unwrap() 66 | .join(&master.sandboxes_dir) 67 | .join(&(sandbox_name.clone() + ".yaml")); 68 | let sandbox: SandboxConfig = match parse_config(&sandbox_fn, 69 | &SandboxConfig::validator(), &Options::default()) 70 | { 71 | Ok(cfg) => cfg, 72 | Err(e) => { 73 | return Err(format!("Can't parse sandbox config: {}", e)); 74 | } 75 | }; 76 | 77 | let target_fn = master_cfg.parent().unwrap() 78 | .join(&master.processes_dir) 79 | .join(sandbox.config_file.as_ref().unwrap_or( 80 | &PathBuf::from(&(sandbox_name.clone() + ".yaml")))); 81 | debug!("Target filename {:?}", target_fn); 82 | let tmp_filename = target_fn.with_file_name( 83 | &format!(".tmp.{}", sandbox_name)); 84 | try!(copy(&config_file, &tmp_filename) 85 | .map_err(|e| format!("Error copying: {}", e))); 86 | try!(rename(&tmp_filename, &target_fn) 87 | .map_err(|e| format!("Error replacing file: {}", e))); 88 | 89 | info!("Done. Sending SIGQUIT to lithos_tree"); 90 | let pid_file = master.runtime_dir.join("master.pid"); 91 | let mut buf = String::with_capacity(50); 92 | let read_pid = File::open(&pid_file) 93 | .and_then(|mut f| f.read_to_string(&mut buf)) 94 | .ok() 95 | .and_then(|_| FromStr::from_str(buf[..].trim()).ok()) 96 | .map(Pid::from_raw); 97 | match read_pid { 98 | Some(pid) if kill(pid, None).is_ok() => { 99 | kill(pid, SIGQUIT) 100 | .map_err(|e| error!("Error sending QUIT to master: {:?}", e)).ok(); 101 | } 102 | Some(pid) => { 103 | warn!("Process with pid {} is not running...", pid); 104 | } 105 | None => { 106 | warn!("Can't read pid file {}. Probably daemon is not running.", 107 | pid_file.display()); 108 | } 109 | }; 110 | 111 | return Ok(()); 112 | } 113 | 114 | 115 | fn main() { 116 | if env::var("RUST_LOG").is_err() { 117 | env::set_var("RUST_LOG", "warn"); 118 | } 119 | env_logger::init(); 120 | 121 | let mut master_config = PathBuf::from("/etc/lithos/master.yaml"); 122 | let mut verbose = false; 123 | let mut config_file = PathBuf::from(""); 124 | let mut sandbox_name = "".to_string(); 125 | { 126 | let mut ap = ArgumentParser::new(); 127 | ap.set_description("Checks if lithos configuration is ok"); 128 | ap.refer(&mut master_config) 129 | .add_option(&["--master"], Parse, 130 | "Name of the master configuration file \ 131 | (default /etc/lithos/master.yaml)") 132 | .metavar("FILE"); 133 | ap.refer(&mut verbose) 134 | .add_option(&["-v", "--verbose"], StoreTrue, 135 | "Verbose configuration"); 136 | ap.refer(&mut sandbox_name) 137 | .add_argument("sandbox", Parse, 138 | "Name of the sandbox which configuration will be switched for") 139 | .required() 140 | .metavar("NAME"); 141 | ap.refer(&mut config_file) 142 | .add_argument("new_config", Parse, " 143 | Name of the process configuration file for this sandbox to switch 144 | to. The file is copied over current config after configuration is 145 | validated and just before sending a signal to lithos_tree.") 146 | .metavar("FILE") 147 | .required(); 148 | ap.add_option(&["--version"], 149 | Print(env!("CARGO_PKG_VERSION").to_string()), 150 | "Show version"); 151 | match ap.parse_args() { 152 | Ok(()) => {} 153 | Err(x) => { 154 | exit(x); 155 | } 156 | } 157 | } 158 | match switch_config(&master_config, sandbox_name, &config_file) 159 | { 160 | Ok(()) => { 161 | exit(0); 162 | } 163 | Err(e) => { 164 | write!(&mut stderr(), "Fatal error: {}\n", e).unwrap(); 165 | exit(1); 166 | } 167 | } 168 | } 169 | -------------------------------------------------------------------------------- /src/bin/lithos_tree/args.rs: -------------------------------------------------------------------------------- 1 | use std::fs::File; 2 | use std::io::{self, Read}; 3 | use std::path::Path; 4 | use std::thread::sleep; 5 | use std::time::{Instant, Duration}; 6 | 7 | use nix::unistd::Pid; 8 | 9 | pub enum Child { 10 | Normal { name: String, config: String }, 11 | Zombie, 12 | Unidentified, 13 | Error, 14 | } 15 | 16 | pub fn read(pid: Pid, global_config: &Path) -> Child { 17 | use self::Child::*; 18 | let start = Instant::now(); 19 | loop { 20 | let mut buf = String::with_capacity(4096); 21 | match File::open(&format!("/proc/{}/cmdline", pid)) 22 | .and_then(|mut f| f.read_to_string(&mut buf)) 23 | { 24 | Ok(_) => {}, 25 | Err(ref e) if e.kind() == io::ErrorKind::NotFound => { 26 | // TODO(tailhook) actually already dead, but shouldn't happen 27 | return Zombie; 28 | } 29 | Err(e) => { 30 | warn!("Error opening /proc/{}/cmdline: {}", pid, e); 31 | return Error; 32 | } 33 | } 34 | let args: Vec<&str> = buf[..].splitn(8, '\0').collect(); 35 | if args[0].len() == 0 { 36 | return Zombie; 37 | } 38 | 39 | if Path::new(args[0]).file_name() 40 | .and_then(|x| x.to_str()) == Some("lithos_tree") 41 | { 42 | if start + Duration::new(1, 0) > Instant::now() { 43 | sleep(Duration::from_millis(2)); 44 | continue; 45 | } else { 46 | error!("Child did not exec'd in > 1 sec"); 47 | return Error; 48 | } 49 | } 50 | 51 | if args.len() != 8 52 | || Path::new(args[0]).file_name() 53 | .and_then(|x| x.to_str()) != Some("lithos_knot") 54 | || args[1] != "--name" 55 | || args[3] != "--master" 56 | || Path::new(args[4]) != global_config 57 | || args[5] != "--config" 58 | || args[7] != "" 59 | { 60 | return Unidentified; 61 | } 62 | return Normal { 63 | name: args[2].to_string(), 64 | config: args[6].to_string(), 65 | }; 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/cgroup.rs: -------------------------------------------------------------------------------- 1 | use std::rc::Rc; 2 | use std::io::{Write, BufRead, BufReader}; 3 | use std::fs::{File, create_dir, remove_dir, metadata}; 4 | use std::io::ErrorKind::NotFound; 5 | use std::fs::OpenOptions; 6 | use std::path::{Path, PathBuf}; 7 | use std::default::Default; 8 | use std::collections::BTreeMap; 9 | use libc::pid_t; 10 | use libc::getpid; 11 | 12 | use super::utils::relative; 13 | 14 | 15 | 16 | #[derive(PartialEq, Eq, PartialOrd, Ord)] 17 | pub struct CGroupPath(pub String, pub PathBuf); 18 | 19 | #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] 20 | pub enum Controller { 21 | Cpu, 22 | Memory, 23 | } 24 | 25 | 26 | #[derive(Default)] 27 | pub struct ParsedCGroups { 28 | pub all_groups: Vec>, 29 | pub by_name: BTreeMap>, 30 | } 31 | 32 | pub struct CGroups { 33 | full_paths: BTreeMap 34 | } 35 | 36 | 37 | pub fn parse_cgroups(pid: Option) -> Result { 38 | let path = pid.map(|x| format!("/proc/{}/cgroup", x)) 39 | .unwrap_or("/proc/self/cgroup".to_string()); 40 | let f = try!(File::open(&path) 41 | .map_err(|e| format!("Error reading cgroup: {}", e))); 42 | let f = BufReader::new(f); 43 | let mut res: ParsedCGroups = Default::default(); 44 | for line in f.lines() { 45 | let line = try!(line 46 | .map_err(|e| format!("Can't read CGroup file: {}", e))); 47 | if line.len() == 0 { 48 | continue; 49 | } 50 | // Line is in form of "123:ctr1[,ctr2][=folder]:/group/path" 51 | let mut chunks = line[..].splitn(3, ':'); 52 | try!(chunks.next().ok_or(format!("CGroup num expected"))); 53 | let namechunk = try!(chunks.next() 54 | .ok_or(format!("CGroup name expected"))); 55 | let mut namepair = namechunk.splitn(2, '='); 56 | let names = try!(namepair.next() 57 | .ok_or(format!("CGroup names expected"))); 58 | let group_name = namepair.next().unwrap_or(names).to_string(); 59 | let group_path = Path::new(try!(chunks.next() 60 | .ok_or(format!("CGroup path expected"))) 61 | .trim()); 62 | let grp = Rc::new(CGroupPath(group_name.clone(), 63 | group_path.to_owned())); 64 | res.all_groups.push(grp.clone()); 65 | for name in names.split(',') { 66 | if res.by_name.insert(name.to_string(), grp.clone()).is_some() { 67 | return Err(format!("Duplicate CGroup encountered")); 68 | } 69 | } 70 | } 71 | return Ok(res); 72 | } 73 | 74 | pub fn ensure_in_group(name: &String, controllers: &Vec) 75 | -> Result 76 | { 77 | let default_controllers = vec!( 78 | "name".to_string(), 79 | "cpu".to_string(), 80 | "cpuacct".to_string(), 81 | "memory".to_string(), 82 | "blkio".to_string(), 83 | ); 84 | let controllers = if controllers.len() > 0 85 | { controllers } else { &default_controllers }; 86 | debug!("Setting up cgroup {} with controllers {:?}", name, controllers); 87 | // TODO(tailhook) do we need to customize cgroup mount points? 88 | let cgroup_base = Path::new("/sys/fs/cgroup"); 89 | 90 | let root_path = Path::new("/"); 91 | 92 | let parent_grp = try!(parse_cgroups(Some(1))); 93 | let old_grp = try!(parse_cgroups(None)); 94 | let mypid = unsafe { getpid() }; 95 | let mut res = CGroups { full_paths: BTreeMap::new() }; 96 | 97 | for ctr in controllers.iter() { 98 | let CGroupPath(ref rfolder, ref rpath) = **try!( 99 | parent_grp.by_name.get(ctr) 100 | .ok_or(format!("CGroup {} not mounted", ctr))); 101 | let CGroupPath(ref ofolder, ref opath) = **try!( 102 | old_grp.by_name.get(ctr) 103 | .ok_or(format!("CGroup {} not mounted", ctr))); 104 | if ofolder != rfolder { 105 | return Err(format!("Init process has CGroup hierarchy different \ 106 | from ours, we can't setup CGroups in any \ 107 | meaningful way in this case")); 108 | } 109 | 110 | // TODO(tailhook) do we need to customize nested groups? 111 | // TODO(tailhook) what if we *are* init process? 112 | let new_path = rpath.join(&name); 113 | 114 | if new_path == *opath { 115 | debug!("Already in cgroup {}:{}", ctr, new_path.display()); 116 | continue; 117 | } 118 | let fullpath = cgroup_base.join(&ofolder).join( 119 | relative(&new_path, &root_path)); 120 | if metadata(&fullpath).is_err() { 121 | debug!("Creating cgroup {:?}", fullpath); 122 | try!(create_dir(&fullpath) 123 | .map_err(|e| format!("Error creating cgroup dir {:?}: {}", 124 | fullpath, e))); 125 | } else { 126 | debug!("CGroup {} already exists", fullpath.display()); 127 | } 128 | debug!("Adding task to cgroup {}", fullpath.display()); 129 | try!(OpenOptions::new().write(true).open(&fullpath.join("tasks")) 130 | .and_then(|mut f| write!(&mut f, "{}", mypid)) 131 | .map_err(|e| format!( 132 | "Error adding myself (pid: {}) to the group {:?}: {}", 133 | mypid, fullpath, e))); 134 | match &ctr[..] { 135 | "cpu" => { 136 | res.full_paths.insert(Controller::Cpu, fullpath); 137 | } 138 | "memory" => { 139 | res.full_paths.insert(Controller::Memory, fullpath); 140 | } 141 | _ => {} 142 | }; 143 | } 144 | return Ok(res); 145 | } 146 | 147 | pub fn remove_child_cgroup(child: &str, master: &String, 148 | controllers: &Vec) 149 | -> Result<(), String> 150 | { 151 | // TODO(tailhook) do we need to customize cgroup mount points? 152 | let cgroup_base = PathBuf::from("/sys/fs/cgroup"); 153 | let default_controllers = vec!( 154 | "name".to_string(), 155 | "cpu".to_string(), 156 | "cpuacct".to_string(), 157 | "memory".to_string(), 158 | "blkio".to_string(), 159 | ); 160 | let controllers = if controllers.len() > 0 161 | { controllers } else { &default_controllers }; 162 | debug!("Removing cgroup {}", child); 163 | 164 | let root_path = PathBuf::from("/"); 165 | let parent_grp = try!(parse_cgroups(Some(1))); 166 | 167 | for ctr in controllers.iter() { 168 | let CGroupPath(ref folder, ref path) = **parent_grp.by_name.get(ctr) 169 | .expect("CGroups already checked"); 170 | let fullpath = cgroup_base.join(&folder) 171 | .join(relative(path, &root_path)) 172 | .join(&master).join(child); 173 | remove_dir(&fullpath) 174 | .map_err(|e| if e.kind() != NotFound { 175 | error!("Error removing cgroup {}: {}", fullpath.display(), e)}) 176 | .ok(); 177 | } 178 | return Ok(()); 179 | } 180 | 181 | impl CGroups { 182 | pub fn set_value(&self, ctr: Controller, key: &str, value: &str) 183 | -> Result<(), String> 184 | { 185 | let path = try!(self.full_paths.get(&ctr) 186 | .ok_or(format!("Controller {:?} is not initialized", ctr))); 187 | File::create(&path.join(key)) 188 | .and_then(|mut f| f.write_all(value.as_bytes())) 189 | .map_err(|e| format!("Can't write to cgroup path {:?}/{}: {}", 190 | path, key, e)) 191 | } 192 | pub fn set_value_if_exists(&self, ctr: Controller, key: &str, value: &str) 193 | -> Result<(), String> 194 | { 195 | let path = try!(self.full_paths.get(&ctr) 196 | .ok_or(format!("Controller {:?} is not initialized", ctr))); 197 | let full_path = path.join(key); 198 | if full_path.exists() { 199 | File::create(&full_path) 200 | .and_then(|mut f| f.write_all(value.as_bytes())) 201 | .map_err(|e| format!("Can't write to cgroup path {:?}/{}: {}", 202 | path, key, e)) 203 | } else { 204 | debug!("No cgroup setting {:?}", full_path); 205 | Ok(()) 206 | } 207 | } 208 | } 209 | -------------------------------------------------------------------------------- /src/child_config.rs: -------------------------------------------------------------------------------- 1 | use failure::Error; 2 | use std::str::FromStr; 3 | use std::net::IpAddr; 4 | use std::collections::BTreeMap; 5 | 6 | use quire::validate::{Structure, Scalar, Numeric, Mapping, Sequence}; 7 | use quire::{Options, parse_string}; 8 | 9 | #[derive(Serialize, Deserialize)] 10 | #[derive(Debug, PartialEq, Eq, Clone, Copy)] 11 | pub enum ChildKind { 12 | Daemon, 13 | Command, 14 | } 15 | 16 | // Note everything here should be stable-serializable 17 | #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] 18 | pub struct ChildInstance { 19 | pub instances: usize, // legacy maybe remove somehow? 20 | pub image: String, 21 | pub config: String, 22 | #[serde(skip_serializing_if="BTreeMap::is_empty", default)] 23 | pub variables: BTreeMap, 24 | #[serde(skip_serializing_if="Vec::is_empty", default)] 25 | pub extra_secrets_namespaces: Vec, 26 | #[serde(skip_serializing_if="Option::is_none", default)] 27 | pub ip_address: Option, 28 | pub kind: ChildKind, 29 | } 30 | 31 | fn one() -> usize { 1 } 32 | 33 | #[derive(Serialize, Deserialize, PartialEq, Debug, Clone)] 34 | pub struct ChildConfig { 35 | #[serde(default="one")] 36 | pub instances: usize, 37 | pub image: String, 38 | pub config: String, 39 | #[serde(skip_serializing_if="BTreeMap::is_empty", default)] 40 | pub variables: BTreeMap, 41 | #[serde(skip_serializing_if="Vec::is_empty", default)] 42 | pub extra_secrets_namespaces: Vec, 43 | #[serde(skip_serializing_if="Vec::is_empty", default)] 44 | pub ip_addresses: Vec, 45 | pub kind: ChildKind, 46 | } 47 | 48 | impl ChildConfig { 49 | pub fn instantiate(&self, instance: usize) -> Result 50 | { 51 | let cfg = ChildInstance { 52 | instances: 1, // TODO(tailhook) legacy, find a way to remove 53 | image: self.image.clone(), 54 | config: self.config.clone(), 55 | variables: self.variables.clone(), 56 | ip_address: if self.ip_addresses.len() > 0 { 57 | if let Some(addr) = self.ip_addresses.get(instance) { 58 | Some(*addr) 59 | } else { 60 | bail!("Instance no {}, but there's only {} ip addresses", 61 | instance, self.ip_addresses.len()); 62 | } 63 | } else { 64 | None 65 | }, 66 | extra_secrets_namespaces: self.extra_secrets_namespaces.clone(), 67 | kind: self.kind, 68 | }; 69 | return Ok(cfg); 70 | } 71 | pub fn mapping_validator<'x>() -> Mapping<'x> { 72 | return Mapping::new( 73 | Scalar::new(), 74 | ChildConfig::validator()); 75 | } 76 | pub fn validator<'x>() -> Structure<'x> { 77 | Structure::new() 78 | .member("instances", Numeric::new().default(1)) 79 | .member("image", Scalar::new()) 80 | .member("config", Scalar::new()) 81 | .member("variables", Mapping::new(Scalar::new(), Scalar::new())) 82 | .member("extra_secrets_namespaces", Sequence::new(Scalar::new())) 83 | .member("kind", Scalar::new().default("Daemon")) 84 | .member("ip_addresses", Sequence::new(Scalar::new())) 85 | } 86 | } 87 | impl ChildInstance { 88 | pub fn validator<'x>() -> Structure<'x> { 89 | Structure::new() 90 | .member("instances", Numeric::new().default(1)) 91 | .member("image", Scalar::new()) 92 | .member("config", Scalar::new()) 93 | .member("variables", Mapping::new(Scalar::new(), Scalar::new())) 94 | .member("extra_secrets_namespaces", Sequence::new(Scalar::new())) 95 | .member("kind", Scalar::new().default("Daemon")) 96 | .member("ip_address", Scalar::new().optional()) 97 | } 98 | } 99 | 100 | impl FromStr for ChildInstance { 101 | type Err = (); 102 | fn from_str(body: &str) -> Result { 103 | parse_string("", body, 104 | &Self::validator(), &Options::default()) 105 | .map_err(|_| ()) 106 | } 107 | } 108 | 109 | #[cfg(test)] 110 | mod test { 111 | use std::collections::BTreeMap; 112 | use std::str::FromStr; 113 | use super::ChildInstance; 114 | use super::ChildKind::Daemon; 115 | use serde_json::{to_string, from_str}; 116 | 117 | #[test] 118 | fn deserialize_compat() { 119 | let data = r#"{ 120 | "instances":1, 121 | "image":"myproj.4a20772b", 122 | "config":"/config/staging/myproj.yaml", 123 | "kind":"Daemon"}"#; 124 | let cc = ChildInstance::from_str(data).unwrap(); 125 | assert_eq!(cc, ChildInstance { 126 | instances: 1, 127 | image: String::from("myproj.4a20772b"), 128 | config: String::from("/config/staging/myproj.yaml"), 129 | variables: BTreeMap::new(), 130 | extra_secrets_namespaces: Vec::new(), 131 | ip_address: None, 132 | kind: Daemon, 133 | }); 134 | 135 | let cc: ChildInstance = from_str(&data).unwrap(); 136 | assert_eq!(cc, ChildInstance { 137 | instances: 1, 138 | image: String::from("myproj.4a20772b"), 139 | config: String::from("/config/staging/myproj.yaml"), 140 | variables: BTreeMap::new(), 141 | extra_secrets_namespaces: Vec::new(), 142 | ip_address: None, 143 | kind: Daemon, 144 | }); 145 | } 146 | 147 | #[test] 148 | fn dedeserialize_vars() { 149 | let data = r#"{ 150 | "instances":1, 151 | "image":"myproj.4a20772b", 152 | "config":"/config/staging/myproj.yaml", 153 | "variables": {"a": "b"}, 154 | "kind":"Daemon"}"#; 155 | let cc = ChildInstance::from_str(data).unwrap(); 156 | assert_eq!(cc, ChildInstance { 157 | instances: 1, 158 | image: String::from("myproj.4a20772b"), 159 | config: String::from("/config/staging/myproj.yaml"), 160 | variables: vec![ 161 | (String::from("a"), String::from("b")), 162 | ].into_iter().collect(), 163 | extra_secrets_namespaces: Vec::new(), 164 | ip_address: None, 165 | kind: Daemon, 166 | }) 167 | } 168 | 169 | #[test] 170 | fn serialize_compat() { 171 | let data = to_string(&ChildInstance { 172 | instances: 1, 173 | image: String::from("myproj.4a20772b"), 174 | config: String::from("/config/staging/myproj.yaml"), 175 | variables: BTreeMap::new(), 176 | extra_secrets_namespaces: Vec::new(), 177 | ip_address: None, 178 | kind: Daemon, 179 | }).unwrap(); 180 | assert_eq!(data, "{\ 181 | \"instances\":1,\ 182 | \"image\":\"myproj.4a20772b\",\ 183 | \"config\":\"/config/staging/myproj.yaml\",\ 184 | \"kind\":\"Daemon\"}"); 185 | } 186 | 187 | #[test] 188 | fn serialize_vars() { 189 | let data = to_string(&ChildInstance { 190 | instances: 1, 191 | image: String::from("myproj.4a20772b"), 192 | config: String::from("/config/staging/myproj.yaml"), 193 | variables: vec![ 194 | (String::from("a"), String::from("b")), 195 | (String::from("c"), String::from("d")), 196 | ].into_iter().collect(), 197 | extra_secrets_namespaces: Vec::new(), 198 | ip_address: None, 199 | kind: Daemon, 200 | }).unwrap(); 201 | assert_eq!(data, "{\ 202 | \"instances\":1,\ 203 | \"image\":\"myproj.4a20772b\",\ 204 | \"config\":\"/config/staging/myproj.yaml\",\ 205 | \"variables\":{\"a\":\"b\",\"c\":\"d\"},\ 206 | \"kind\":\"Daemon\"}"); 207 | } 208 | } 209 | -------------------------------------------------------------------------------- /src/id_map.rs: -------------------------------------------------------------------------------- 1 | use quire::validate::{Sequence, Numeric, Structure}; 2 | 3 | 4 | #[derive(Deserialize, Serialize, Clone, Copy, Debug)] 5 | pub struct IdMap { 6 | pub inside: u32, 7 | pub outside: u32, 8 | pub count: u32, 9 | } 10 | pub trait IdMapExt { 11 | fn map_id(&self, internal_id: u32) -> Option; 12 | } 13 | 14 | impl IdMapExt for Vec { 15 | fn map_id(&self, internal_id: u32) -> Option { 16 | if self.len() == 0 { 17 | return Some(internal_id); 18 | } 19 | for rng in self.iter() { 20 | if internal_id >= rng.inside && 21 | internal_id <= rng.inside + rng.count 22 | { 23 | return Some(rng.outside + (internal_id - rng.inside)); 24 | } 25 | } 26 | None 27 | } 28 | } 29 | 30 | pub fn mapping_validator<'x>() -> Sequence<'x> { 31 | Sequence::new( 32 | Structure::new() 33 | .member("inside", Numeric::new()) 34 | .member("outside", Numeric::new()) 35 | .member("count", Numeric::new())) 36 | } 37 | -------------------------------------------------------------------------------- /src/itertools.rs: -------------------------------------------------------------------------------- 1 | use std::str::{FromStr, CharIndices}; 2 | use std::borrow::Borrow; 3 | 4 | 5 | pub trait NextValue { 6 | fn next_value(&mut self) -> Result; 7 | fn nth_value(&mut self, i: usize) -> Result; 8 | } 9 | 10 | impl> NextValue for I 11 | where I: Iterator 12 | { 13 | 14 | fn next_value(&mut self) -> Result { 15 | self.next().ok_or(()) 16 | .and_then(|x| FromStr::from_str(x.borrow()).map_err(|_| ())) 17 | } 18 | 19 | fn nth_value(&mut self, i: usize) -> Result { 20 | self.nth(i).ok_or(()) 21 | .and_then(|x| FromStr::from_str(x.borrow()).map_err(|_| ())) 22 | } 23 | 24 | } 25 | 26 | pub trait NextStr<'a> { 27 | fn next_str(&mut self) -> Result<&'a str, ()>; 28 | fn nth_str(&mut self, i: usize) -> Result<&'a str, ()>; 29 | } 30 | 31 | impl<'a, I> NextStr<'a> for I 32 | where I: Iterator 33 | { 34 | fn next_str(&mut self) -> Result<&'a str, ()> { 35 | return self.next().ok_or(()); 36 | } 37 | fn nth_str(&mut self, i: usize) -> Result<&'a str, ()> { 38 | return self.nth(i).ok_or(()); 39 | } 40 | } 41 | 42 | pub struct Words<'a> { 43 | src: &'a str, 44 | iter: CharIndices<'a>, 45 | } 46 | 47 | impl<'a> Words<'a> { 48 | fn skip_ws(&mut self) -> Option<(usize, char)> { 49 | loop { 50 | if let Some((idx, ch)) = self.iter.next() { 51 | if !ch.is_whitespace() { 52 | return Some((idx, ch)); 53 | } 54 | } else { 55 | return None 56 | } 57 | } 58 | } 59 | } 60 | 61 | impl<'a> Iterator for Words<'a> { 62 | type Item = &'a str; 63 | fn next(&mut self) -> Option<&'a str> { 64 | if let Some((start_idx, _)) = self.skip_ws() { 65 | loop { 66 | if let Some((idx, ch)) = self.iter.next() { 67 | if ch.is_whitespace() { 68 | return Some(&self.src[start_idx..idx]); 69 | } 70 | } else { 71 | return Some(&self.src[start_idx..]); 72 | } 73 | } 74 | } else { 75 | return None; 76 | } 77 | } 78 | } 79 | 80 | pub fn words<'a, 'b: 'a, B: Borrow + ?Sized + 'a>(src: &'b B) -> Words<'a> { 81 | return Words { 82 | src: src.borrow(), 83 | iter: src.borrow().char_indices(), 84 | }; 85 | } 86 | -------------------------------------------------------------------------------- /src/knot_options.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | use std::env; 3 | use std::io::{stdout, stderr}; 4 | use std::io::{Write}; 5 | use std::path::{PathBuf}; 6 | 7 | use log; 8 | use argparse::{ArgumentParser, StoreOption, Store, Parse, List, StoreTrue}; 9 | use argparse::{Print}; 10 | 11 | use child_config::ChildInstance; 12 | use child_config::ChildKind::Daemon; 13 | 14 | 15 | pub struct Options { 16 | pub master_config: PathBuf, 17 | pub config: ChildInstance, 18 | pub name: String, 19 | pub args: Vec, 20 | pub log_stderr: bool, 21 | pub log_level: Option, 22 | } 23 | 24 | impl Options { 25 | pub fn parse_args() -> Result { 26 | Options::parse_specific_args(env::args().collect(), 27 | &mut stdout(), &mut stderr()) 28 | } 29 | pub fn parse_specific_args(args: Vec, 30 | stdout: &mut Write, stderr: &mut Write) 31 | -> Result 32 | { 33 | let mut options = Options { 34 | master_config: PathBuf::from("/etc/lithos/master.yaml"), 35 | config: ChildInstance { 36 | instances: 1, 37 | image: "".to_string(), 38 | config: "".to_string(), 39 | variables: BTreeMap::new(), 40 | extra_secrets_namespaces: Vec::new(), 41 | ip_address: None, 42 | kind: Daemon, 43 | }, 44 | name: "".to_string(), 45 | args: vec!(), 46 | log_stderr: false, 47 | log_level: None, 48 | }; 49 | let parse_result = { 50 | let mut ap = ArgumentParser::new(); 51 | ap.set_description("Runs tree of processes"); 52 | ap.refer(&mut options.name) 53 | .add_option(&["--name"], Store, 54 | "The process name"); 55 | ap.refer(&mut options.master_config) 56 | .add_option(&["--master"], Parse, 57 | "Name of the master configuration file \ 58 | (default /etc/lithos/master.yaml)") 59 | .metavar("FILE"); 60 | ap.refer(&mut options.config) 61 | .add_option(&["--config"], Store, 62 | "JSON-serialized container configuration") 63 | .required() 64 | .metavar("JSON"); 65 | ap.refer(&mut options.args) 66 | .add_argument("argument", List, 67 | "Additional arguments for the command"); 68 | ap.refer(&mut options.log_stderr) 69 | .add_option(&["--log-stderr"], StoreTrue, 70 | "Print debugging info to stderr"); 71 | ap.refer(&mut options.log_level) 72 | .add_option(&["--log-level"], StoreOption, 73 | "Set log level (default info for now)"); 74 | ap.add_option(&["--version"], 75 | Print(env!("CARGO_PKG_VERSION").to_string()), 76 | "Show version"); 77 | ap.stop_on_first_argument(true); 78 | ap.parse(args, stdout, stderr) 79 | }; 80 | match parse_result { 81 | Ok(()) => Ok(options), 82 | Err(x) => Err(x), 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! This is internal module not intended to be used as a library 2 | //! 3 | //! See [Documentation](http://lithos.readthedocs.io) for the actual usage. 4 | //! 5 | extern crate argparse; 6 | extern crate blake2; 7 | extern crate base64; 8 | extern crate crypto; 9 | extern crate humantime; 10 | extern crate fern; 11 | extern crate ipnetwork; 12 | extern crate libc; 13 | extern crate libcantal; 14 | extern crate libmount; 15 | extern crate nix; 16 | extern crate quire; 17 | extern crate rand; 18 | extern crate serde; 19 | extern crate serde_json; 20 | extern crate serde_str; 21 | extern crate signal; 22 | extern crate sha2; 23 | extern crate syslog; 24 | #[macro_use] extern crate failure; 25 | #[macro_use] extern crate log; 26 | #[macro_use] extern crate serde_derive; 27 | 28 | pub mod master_config; 29 | pub mod sandbox_config; 30 | pub mod container_config; 31 | pub mod child_config; 32 | pub mod mount; 33 | pub mod utils; 34 | pub mod network; 35 | pub mod setup; 36 | pub mod pipe; 37 | pub mod limits; 38 | pub mod cgroup; 39 | pub mod itertools; 40 | pub mod timer_queue; 41 | pub mod id_map; 42 | pub mod metrics; 43 | pub mod range; 44 | pub mod knot_options; 45 | pub mod tree_options; 46 | pub mod nacl; 47 | 48 | pub const MAX_CONFIG_LOGS: u32 = 100; 49 | -------------------------------------------------------------------------------- /src/limits.rs: -------------------------------------------------------------------------------- 1 | use std::io::Error as IoError; 2 | use libc::c_int; 3 | 4 | static RLIMIT_NOFILE: c_int = 7; 5 | 6 | #[repr(C)] 7 | struct rlimit { 8 | rlim_cur: u64, 9 | rlim_max: u64, 10 | } 11 | 12 | extern "C" { 13 | fn setrlimit(resource: c_int, rlimit: *const rlimit) -> c_int; 14 | } 15 | 16 | pub fn set_fileno_limit(limit: u64) -> Result<(), IoError> { 17 | let res = unsafe { setrlimit(RLIMIT_NOFILE, &rlimit { 18 | rlim_cur: limit, 19 | rlim_max: limit, 20 | }) }; 21 | if res != 0 { 22 | return Err(IoError::last_os_error()); 23 | } 24 | return Ok(()); 25 | } 26 | -------------------------------------------------------------------------------- /src/master_config.rs: -------------------------------------------------------------------------------- 1 | use std::path::PathBuf; 2 | 3 | use quire::validate::{Structure, Sequence}; 4 | use quire::validate::{Scalar}; 5 | use super::utils::ensure_dir; 6 | 7 | #[derive(Deserialize)] 8 | pub struct MasterConfig { 9 | pub runtime_dir: PathBuf, 10 | pub sandboxes_dir: PathBuf, 11 | pub processes_dir: PathBuf, 12 | pub state_dir: PathBuf, 13 | pub mount_dir: PathBuf, 14 | pub devfs_dir: PathBuf, 15 | pub default_log_dir: PathBuf, 16 | pub config_log_dir: Option, 17 | pub stdio_log_dir: PathBuf, 18 | pub log_file: PathBuf, 19 | pub syslog_facility: Option, 20 | pub syslog_app_name: String, 21 | pub log_level: String, 22 | pub cgroup_name: Option, 23 | pub cgroup_controllers: Vec, 24 | } 25 | 26 | impl MasterConfig { 27 | pub fn validator<'x>() -> Structure<'x> { 28 | Structure::new() 29 | .member("sandboxes_dir", Scalar::new().default("./sandboxes")) 30 | .member("processes_dir", Scalar::new().default("./processes")) 31 | .member("runtime_dir", Scalar::new().default("/run/lithos")) 32 | .member("state_dir", Scalar::new().default("state")) 33 | .member("mount_dir", Scalar::new().default("mnt")) 34 | .member("devfs_dir", Scalar::new() 35 | .default("/var/lib/lithos/dev")) 36 | .member("default_log_dir", Scalar::new().default("/var/log/lithos")) 37 | .member("syslog_facility", Scalar::new().optional()) 38 | .member("syslog_app_name", Scalar::new().default("lithos")) 39 | .member("log_file", Scalar::new().default("master.log")) 40 | .member("log_level", Scalar::new().default("warn")) 41 | .member("config_log_dir", Scalar::new().optional() 42 | .default("/var/log/lithos/config")) 43 | .member("stdio_log_dir", Scalar::new() 44 | .default("/var/log/lithos/stderr")) 45 | .member("cgroup_name", 46 | Scalar::new().optional().default("lithos.slice")) 47 | .member("cgroup_controllers", Sequence::new(Scalar::new())) 48 | } 49 | } 50 | 51 | pub fn create_master_dirs(cfg: &MasterConfig) -> Result<(), String> { 52 | try!(ensure_dir(&cfg.runtime_dir) 53 | .map_err(|e| format!("Cant create runtime-dir: {}", e))); 54 | try!(ensure_dir(&cfg.runtime_dir.join(&cfg.state_dir)) 55 | .map_err(|e| format!("Cant create state-dir: {}", e))); 56 | try!(ensure_dir(&cfg.runtime_dir.join(&cfg.mount_dir)) 57 | .map_err(|e| format!("Cant create mount-dir: {}", e))); 58 | try!(ensure_dir(&cfg.default_log_dir) 59 | .map_err(|e| format!("Cant create log dir: {}", e))); 60 | if let Some(ref config_log_dir) = cfg.config_log_dir { 61 | ensure_dir(config_log_dir) 62 | .map_err(|e| format!("Cant create configuration log dir: {}", e))?; 63 | } 64 | try!(ensure_dir(&cfg.stdio_log_dir) 65 | .map_err(|e| format!("Cant create stdio log dir: {}", e))); 66 | return Ok(()); 67 | } 68 | -------------------------------------------------------------------------------- /src/metrics.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use libcantal::{Counter, Integer, Collection, Visitor, Name, NameVisitor}; 4 | 5 | 6 | pub struct Process { 7 | pub started: Counter, 8 | pub failures: Counter, 9 | pub deaths: Counter, 10 | pub running: Integer, 11 | } 12 | 13 | pub struct Metrics { 14 | pub restarts: Counter, 15 | pub sandboxes: Integer, 16 | pub containers: Integer, 17 | pub queue: Integer, 18 | 19 | pub started: Counter, 20 | pub failures: Counter, 21 | pub deaths: Counter, 22 | pub running: Integer, 23 | pub unknown: Integer, 24 | 25 | pub processes: HashMap<(String, String), Process>, 26 | } 27 | 28 | pub struct MasterName(&'static str); 29 | pub struct GlobalName(&'static str); 30 | pub struct ProcessName<'a>(&'a str, &'a str, &'static str); 31 | 32 | impl Metrics { 33 | pub fn new() -> Metrics { 34 | Metrics { 35 | restarts: Counter::new(), 36 | sandboxes: Integer::new(), 37 | containers: Integer::new(), 38 | 39 | started: Counter::new(), 40 | failures: Counter::new(), 41 | deaths: Counter::new(), 42 | running: Integer::new(), 43 | unknown: Integer::new(), 44 | queue: Integer::new(), 45 | 46 | processes: HashMap::new(), 47 | } 48 | } 49 | } 50 | 51 | impl Process { 52 | pub fn new() -> Process { 53 | Process { 54 | started: Counter::new(), 55 | failures: Counter::new(), 56 | deaths: Counter::new(), 57 | running: Integer::new(), 58 | } 59 | } 60 | } 61 | 62 | 63 | impl Collection for Metrics { 64 | fn visit<'x>(&'x self, visitor: &mut Visitor<'x>) { 65 | visitor.metric(&MasterName("restarts"), &self.restarts); 66 | visitor.metric(&MasterName("sandboxes"), &self.sandboxes); 67 | visitor.metric(&MasterName("containers"), &self.containers); 68 | visitor.metric(&MasterName("queue"), &self.queue); 69 | 70 | visitor.metric(&GlobalName("started"), &self.started); 71 | visitor.metric(&GlobalName("failures"), &self.failures); 72 | visitor.metric(&GlobalName("deaths"), &self.deaths); 73 | visitor.metric(&GlobalName("running"), &self.running); 74 | for (&(ref g, ref n), ref p) in &self.processes { 75 | visitor.metric(&ProcessName(g, n, "started"), &p.started); 76 | visitor.metric(&ProcessName(g, n, "failures"), &p.failures); 77 | visitor.metric(&ProcessName(g, n, "deaths"), &p.deaths); 78 | visitor.metric(&ProcessName(g, n, "running"), &p.running); 79 | } 80 | } 81 | } 82 | 83 | impl Name for MasterName { 84 | fn get(&self, key: &str) -> Option<&str> { 85 | match key { 86 | "group" => Some("master"), 87 | "metric" => Some(self.0), 88 | _ => None, 89 | } 90 | } 91 | fn visit(&self, s: &mut NameVisitor) { 92 | s.visit_pair("group", "master"); 93 | s.visit_pair("metric", self.0); 94 | } 95 | } 96 | 97 | impl Name for GlobalName { 98 | fn get(&self, key: &str) -> Option<&str> { 99 | match key { 100 | "group" => Some("containers"), 101 | "metric" => Some(self.0), 102 | _ => None, 103 | } 104 | } 105 | fn visit(&self, s: &mut NameVisitor) { 106 | s.visit_pair("group", "containers"); 107 | s.visit_pair("metric", self.0); 108 | } 109 | } 110 | 111 | impl<'a> Name for ProcessName<'a> { 112 | fn get(&self, _key: &str) -> Option<&str> { 113 | unimplemented!(); 114 | } 115 | fn visit(&self, s: &mut NameVisitor) { 116 | s.visit_pair("group", &format!("processes.{}.{}", self.0, self.1)); 117 | s.visit_pair("metric", self.2); 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /src/mount.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | use std::io::Error as IoError; 3 | use std::ffi::CString; 4 | use std::ptr::null; 5 | use std::path::Path; 6 | use libc::{c_ulong, c_int}; 7 | 8 | use super::itertools::{NextValue, NextStr, words}; 9 | use super::utils::cpath; 10 | 11 | // sys/mount.h 12 | static MS_RDONLY: c_ulong = 1; /* Mount read-only. */ 13 | static MS_NOSUID: c_ulong = 2; /* Ignore suid and sgid bits. */ 14 | static MS_NODEV: c_ulong = 4; /* Disallow access to device special files. */ 15 | static MS_NOEXEC: c_ulong = 8; /* Disallow program execution. */ 16 | static MS_SYNCHRONOUS: c_ulong = 16; /* Writes are synced at once. */ 17 | static MS_REMOUNT: c_ulong = 32; /* Alter flags of a mounted FS. */ 18 | static MS_MANDLOCK: c_ulong = 64; /* Allow mandatory locks on an FS. */ 19 | static MS_DIRSYNC: c_ulong = 128; /* Directory modifications are synchronous. */ 20 | static MS_NOATIME: c_ulong = 1024; /* Do not update access times. */ 21 | static MS_NODIRATIME: c_ulong = 2048; /* Do not update directory access times. */ 22 | static MS_BIND: c_ulong = 4096; /* Bind directory at different place. */ 23 | static MS_MOVE: c_ulong = 8192; 24 | static MS_REC: c_ulong = 16384; 25 | static MS_SILENT: c_ulong = 32768; 26 | static MS_POSIXACL: c_ulong = 1 << 16; /* VFS does not apply the umask. */ 27 | static MS_UNBINDABLE: c_ulong = 1 << 17; /* Change to unbindable. */ 28 | static MS_PRIVATE: c_ulong = 1 << 18; /* Change to private. */ 29 | static MS_SLAVE: c_ulong = 1 << 19; /* Change to slave. */ 30 | static MS_SHARED: c_ulong = 1 << 20; /* Change to shared. */ 31 | static MS_RELATIME: c_ulong = 1 << 21; /* Update atime relative to mtime/ctime. */ 32 | static MS_KERNMOUNT: c_ulong = 1 << 22; /* This is a kern_mount call. */ 33 | static MS_I_VERSION: c_ulong = 1 << 23; /* Update inode I_version field. */ 34 | static MS_STRICTATIME: c_ulong = 1 << 24; /* Always perform atime updates. */ 35 | static MS_ACTIVE: c_ulong = 1 << 30; 36 | static MS_NOUSER: c_ulong = 1 << 31; 37 | 38 | static MNT_FORCE: c_int = 1; /* Force unmounting. */ 39 | static MNT_DETACH: c_int = 2; /* Just detach from the tree. */ 40 | static MNT_EXPIRE: c_int = 4; /* Mark for expiry. */ 41 | static UMOUNT_NOFOLLOW: c_int = 8; /* Don't follow symlink on umount. */ 42 | 43 | 44 | extern { 45 | fn mount(source: *const i8, target: *const i8, 46 | filesystemtype: *const i8, flags: c_ulong, 47 | data: *const i8) -> c_int; 48 | fn umount(target: *const i8) -> c_int; 49 | fn umount2(target: *const i8, flags: c_int) -> c_int; 50 | } 51 | 52 | 53 | pub struct MountRecord<'a> { 54 | pub mount_id: usize, 55 | pub parent_id: usize, 56 | _device: &'a str, // TODO(tailhook) parse if ever need 57 | pub relative_root: &'a str, 58 | pub mount_point: &'a str, 59 | pub mount_options: &'a str, 60 | pub tag_shared: Option, 61 | pub tag_master: Option, 62 | pub tag_propagate_from: Option, 63 | pub tag_unbindable: Option<()>, 64 | pub fstype: &'a str, 65 | pub mount_source: &'a str, 66 | pub super_options: &'a str, 67 | } 68 | 69 | impl<'a> MountRecord<'a> { 70 | pub fn from_str<'x>(line: &'x str) -> Result, ()> { 71 | let mut parts = words(line); 72 | let mount_id = try!(parts.next_value()); 73 | let parent_id = try!(parts.next_value()); 74 | let device = try!(parts.next_str()); 75 | let relative_root = try!(parts.next_str()); 76 | let mount_point = try!(parts.next_str()); 77 | let mount_options = try!(parts.next_str()); 78 | let mut tag_shared = None; 79 | let mut tag_master = None; 80 | let mut tag_propagate_from = None; 81 | let mut tag_unbindable = None; 82 | 83 | for name in &mut parts { 84 | if name == "-" { break; } 85 | let mut pair = name.splitn(2, ':'); 86 | let key = pair.next(); 87 | match key { 88 | Some("shared") => { 89 | tag_shared = Some(try!(pair.next_value())); 90 | } 91 | Some("master") => { 92 | tag_master = Some(try!(pair.next_value())); 93 | } 94 | Some("propagate_from") => { 95 | tag_propagate_from = Some(try!(pair.next_value())); 96 | } 97 | Some("unbindable") => tag_unbindable = Some(()), 98 | _ => {} 99 | } 100 | } 101 | 102 | let fstype = try!(parts.next_str()); 103 | let mount_source = try!(parts.next_str()); 104 | let super_options = try!(parts.next_str()); 105 | 106 | return Ok(MountRecord { 107 | mount_id: mount_id, 108 | parent_id: parent_id, 109 | _device: device, 110 | relative_root: relative_root, 111 | mount_point: mount_point, 112 | mount_options: mount_options, 113 | tag_shared: tag_shared, 114 | tag_master: tag_master, 115 | tag_propagate_from: tag_propagate_from, 116 | tag_unbindable: tag_unbindable, 117 | fstype: fstype, 118 | mount_source: mount_source, 119 | super_options: super_options, 120 | }); 121 | } 122 | pub fn is_private(&self) -> bool { 123 | return self.tag_shared.is_none() 124 | && self.tag_master.is_none() 125 | && self.tag_propagate_from.is_none() 126 | && self.tag_unbindable.is_none(); 127 | } 128 | } 129 | 130 | pub fn mount_ro_recursive(target: &Path) -> Result<(), String> { 131 | let none = CString::new("none").unwrap(); 132 | debug!("Remount readonly: {:?}", target); 133 | let c_target = cpath(target); 134 | let rc = unsafe { mount( 135 | none.as_ptr(), 136 | c_target.as_ptr(), 137 | null(), MS_BIND|MS_REMOUNT|MS_RDONLY, null()) }; 138 | if rc != 0 { 139 | let err = IoError::last_os_error(); 140 | return Err(format!("Remount readonly {}: {}", target.display(), err)); 141 | } 142 | return Ok(()); 143 | } 144 | 145 | pub fn mount_private(target: &Path) -> Result<(), String> { 146 | let none = CString::new("none").unwrap(); 147 | let c_target = cpath(target); 148 | debug!("Making private {:?}", target); 149 | let rc = unsafe { mount( 150 | none.as_ptr(), 151 | c_target.as_ptr(), 152 | null(), MS_REC|MS_PRIVATE, null()) }; 153 | if rc == 0 { 154 | return Ok(()); 155 | } else { 156 | let err = IoError::last_os_error(); 157 | return Err(format!("Can't make {} a slave: {}", 158 | target.display(), err)); 159 | } 160 | } 161 | 162 | pub fn mount_pseudo(target: &Path, name: &str, options: &str, readonly: bool) 163 | -> Result<(), String> 164 | { 165 | let c_name = CString::new(name).unwrap(); 166 | let c_target = cpath(target); 167 | let c_opts = CString::new(options).unwrap(); 168 | let mut flags = MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_NOATIME; 169 | if readonly { 170 | flags |= MS_RDONLY; 171 | } 172 | debug!("Pseudofs mount {} {} {}", target.display(), name, options); 173 | let rc = unsafe { mount( 174 | c_name.as_ptr(), 175 | c_target.as_ptr(), 176 | c_name.as_ptr(), 177 | flags, 178 | c_opts.as_ptr()) }; 179 | if rc == 0 { 180 | return Ok(()); 181 | } else { 182 | let err = IoError::last_os_error(); 183 | return Err(format!("Can't mount pseudofs {} ({}, options: {}): {}", 184 | target.display(), options, name, err)); 185 | } 186 | } 187 | 188 | pub fn mount_pts(target: &Path) 189 | -> Result<(), String> 190 | { 191 | let c_name = CString::new("devpts").unwrap(); 192 | let c_target = cpath(target); 193 | let opts = "newinstance,ptmxmode=0666"; 194 | let c_opts = CString::new(opts).unwrap(); 195 | let flags = MS_NOSUID | MS_NOEXEC | MS_NOATIME; 196 | debug!("Pseudofs mount {} {} {}", target.display(), "devpts", opts); 197 | let rc = unsafe { mount( 198 | c_name.as_ptr(), 199 | c_target.as_ptr(), 200 | c_name.as_ptr(), 201 | flags, 202 | c_opts.as_ptr()) }; 203 | if rc == 0 { 204 | return Ok(()); 205 | } else { 206 | let err = IoError::last_os_error(); 207 | return Err(format!("Can't mount pseudofs {} ({}, options: {}): {}", 208 | target.display(), opts, "devpts", err)); 209 | } 210 | } 211 | 212 | pub fn unmount(target: &Path) -> Result<(), String> { 213 | let c_target = cpath(target); 214 | let rc = unsafe { umount2(c_target.as_ptr(), MNT_DETACH) }; 215 | if rc == 0 { 216 | return Ok(()); 217 | } else { 218 | let err = IoError::last_os_error(); 219 | return Err(format!("Can't unmount {} : {}", target.display(), err)); 220 | } 221 | } 222 | 223 | -------------------------------------------------------------------------------- /src/network.rs: -------------------------------------------------------------------------------- 1 | use std::ptr::{null, copy}; 2 | use std::ffi::{CString}; 3 | use std::io::Error as IoError; 4 | use std::io::Result as IoResult; 5 | use std::net::Ipv4Addr; 6 | use libc::{c_int, size_t, c_char, EINVAL}; 7 | 8 | #[repr(C)] 9 | struct hostent { 10 | h_name: *const c_char, /* official name of host */ 11 | h_aliases: *const *const c_char, /* alias list */ 12 | h_addrtype: c_int, /* host address type */ 13 | h_length: c_int, /* length of address */ 14 | h_addr_list: *const *const c_char, /* list of addresses */ 15 | } 16 | 17 | extern { 18 | fn gethostname(name: *mut c_char, size: size_t) -> c_int; 19 | fn gethostbyname(name: *const c_char) -> *const hostent; 20 | } 21 | 22 | pub fn get_host_ip() -> IoResult { 23 | let host = try!(get_host_name()); 24 | let addr = try!(get_host_address(&host[..])); 25 | return Ok(addr); 26 | } 27 | 28 | pub fn get_host_name() -> IoResult { 29 | let mut buf: Vec = Vec::with_capacity(256); 30 | let nbytes = unsafe { 31 | buf.set_len(256); 32 | gethostname( 33 | (&mut buf[..]).as_ptr() as *mut i8, 34 | 256) 35 | }; 36 | if nbytes != 0 { 37 | return Err(IoError::last_os_error()); 38 | } 39 | return buf[..].splitn(2, |x| *x == 0u8) 40 | .next() 41 | .and_then(|x| String::from_utf8(x.to_vec()).ok()) 42 | .ok_or(IoError::from_raw_os_error(EINVAL)); 43 | } 44 | 45 | pub fn get_host_address(val: &str) -> IoResult { 46 | let cval = CString::new(val).unwrap(); 47 | unsafe { 48 | let hostent = gethostbyname(cval.as_ptr()); 49 | if hostent == null() { 50 | return Err(IoError::last_os_error()); 51 | } 52 | if (*hostent).h_length == 0 { 53 | return Err(IoError::from_raw_os_error(EINVAL)); 54 | } 55 | let mut addr = [0u8; 4]; 56 | copy(*(*hostent).h_addr_list, addr.as_mut_ptr() as *mut i8, 4); 57 | return Ok(format!("{}", 58 | Ipv4Addr::new(addr[0], addr[1], addr[2], addr[3]))); 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/pipe.rs: -------------------------------------------------------------------------------- 1 | use std::io::Error as IoError; 2 | use std::os::unix::io::RawFd; 3 | use nix::unistd::{pipe}; 4 | use nix::errno::Errno::EPIPE; 5 | 6 | use libc::{c_int, c_void, close, write, EINTR, EAGAIN}; 7 | 8 | 9 | pub struct CPipe { 10 | reader: RawFd, 11 | writer: RawFd, 12 | } 13 | 14 | impl CPipe { 15 | pub fn new() -> Result { 16 | use nix::Error::*; 17 | match pipe() { 18 | Ok((reader, writer)) => Ok(CPipe { 19 | reader: reader, writer: writer 20 | }), 21 | Err(Sys(code)) => Err(IoError::from_raw_os_error(code as i32)), 22 | Err(InvalidPath) => unreachable!(), 23 | Err(InvalidUtf8) => unreachable!(), 24 | Err(UnsupportedOperation) => unreachable!(), 25 | } 26 | } 27 | pub fn reader_fd(&self) -> c_int { 28 | return self.reader; 29 | } 30 | pub fn wakeup(&self) -> Result<(), IoError> { 31 | let mut rc; 32 | loop { 33 | unsafe { 34 | rc = write(self.writer, 35 | ['x' as u8].as_ptr() as *const c_void, 1); 36 | } 37 | let err = IoError::last_os_error().raw_os_error(); 38 | if rc < 0 && (err == Some(EINTR) || err == Some(EAGAIN)) { 39 | continue 40 | } 41 | break; 42 | } 43 | if rc == 0 { 44 | return Err(IoError::from_raw_os_error(EPIPE as i32)); 45 | } else if rc == -1 { 46 | return Err(IoError::last_os_error()); 47 | } 48 | return Ok(()); 49 | } 50 | } 51 | 52 | impl Drop for CPipe { 53 | fn drop(&mut self) { 54 | unsafe { 55 | close(self.reader); 56 | close(self.writer); 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/range.rs: -------------------------------------------------------------------------------- 1 | use std::str::FromStr; 2 | 3 | use serde::de::{Deserializer, Deserialize, Error}; 4 | 5 | 6 | #[derive(Clone, Debug)] 7 | pub struct Range { 8 | pub start: u32, 9 | pub end: u32, 10 | } 11 | 12 | impl Range { 13 | pub fn new(start: u32, end: u32) -> Range { 14 | return Range { start: start, end: end }; 15 | } 16 | pub fn len(&self) -> u32 { 17 | return self.end - self.start + 1; 18 | } 19 | pub fn shift(&self, val: u32) -> Range { 20 | assert!(self.end - self.start + 1 >= val); 21 | return Range::new(self.start + val, self.end); 22 | } 23 | } 24 | 25 | impl<'a> Deserialize<'a> for Range { 26 | fn deserialize>(d: D) -> Result { 27 | let val = String::deserialize(d)?; 28 | FromStr::from_str(&val[..]) 29 | .map(|num| Range::new(num, num)) 30 | .or_else(|_| { 31 | let mut pair = val.splitn(2, '-'); 32 | Ok(Range::new( 33 | pair.next().and_then(|x| FromStr::from_str(x).ok()) 34 | .ok_or(D::Error::custom("Error parsing range"))?, 35 | pair.next().and_then(|x| FromStr::from_str(x).ok()) 36 | .ok_or(D::Error::custom("Error parsing range"))?, 37 | )) 38 | }) 39 | } 40 | } 41 | 42 | pub fn in_range(ranges: &Vec, value: u32) -> bool { 43 | for rng in ranges.iter() { 44 | if rng.start <= value && rng.end >= value { 45 | return true; 46 | } 47 | } 48 | return false; 49 | } 50 | -------------------------------------------------------------------------------- /src/sandbox_config.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BTreeMap; 2 | use std::net::IpAddr; 3 | use std::path::{PathBuf, Path, Component}; 4 | 5 | use id_map::{IdMap, mapping_validator}; 6 | use ipnetwork::IpNetwork; 7 | use quire::validate::{Sequence, Mapping, Scalar, Numeric}; 8 | use quire::validate::{Structure}; 9 | use range::Range; 10 | 11 | 12 | #[derive(Deserialize, Clone)] 13 | pub struct BridgedNetwork { 14 | pub bridge: String, 15 | #[serde(with="::serde_str")] 16 | pub network: IpNetwork, 17 | pub default_gateway: Option, 18 | pub after_setup_command: Vec, 19 | } 20 | 21 | #[derive(Deserialize)] 22 | pub struct SandboxConfig { 23 | pub config_file: Option, 24 | pub image_dir: PathBuf, 25 | pub image_dir_levels: u32, 26 | pub used_images_list: Option, 27 | pub log_file: Option, 28 | pub log_level: Option, 29 | pub readonly_paths: BTreeMap, 30 | pub writable_paths: BTreeMap, 31 | pub allow_users: Vec, 32 | pub default_user: Option, 33 | pub allow_groups: Vec, 34 | pub default_group: Option, 35 | pub allow_tcp_ports: Vec, 36 | pub additional_hosts: BTreeMap, 37 | pub uid_map: Vec, 38 | pub gid_map: Vec, 39 | pub auto_clean: bool, 40 | pub resolv_conf: PathBuf, 41 | pub hosts_file: PathBuf, 42 | pub bridged_network: Option, 43 | pub secrets_private_key: Option, 44 | pub secrets_namespaces: Vec, 45 | } 46 | 47 | impl SandboxConfig { 48 | pub fn check_path>(&self, path: P) -> bool { 49 | let mut num = 0; 50 | for component in path.as_ref().components() { 51 | match component { 52 | Component::Normal(x) if x.to_str().is_some() => num += 1, 53 | _ => return false, 54 | } 55 | } 56 | return num == self.image_dir_levels; 57 | } 58 | pub fn validator<'x>() -> Structure<'x> { 59 | Structure::new() 60 | .member("config_file", Scalar::new().optional()) 61 | .member("image_dir", Scalar::new().optional() 62 | .default("/var/lib/lithos/containers")) 63 | .member("image_dir_levels", 64 | Numeric::new().min(1).max(16).default(1)) 65 | .member("used_images_list", Scalar::new().optional()) 66 | .member("log_file", Scalar::new().optional()) 67 | .member("log_level", Scalar::new().optional()) 68 | .member("readonly_paths", Mapping::new( 69 | Scalar::new(), 70 | Scalar::new())) 71 | .member("writable_paths", Mapping::new( 72 | Scalar::new(), 73 | Scalar::new())) 74 | .member("allow_users", Sequence::new(Scalar::new())) 75 | .member("default_user", Scalar::new().optional()) 76 | .member("allow_groups", Sequence::new(Scalar::new())) 77 | .member("default_group", Scalar::new().default(0)) 78 | .member("allow_tcp_ports", Sequence::new(Scalar::new())) 79 | .member("uid_map", mapping_validator()) 80 | .member("gid_map", mapping_validator()) 81 | .member("additional_hosts", Mapping::new( 82 | Scalar::new(), 83 | Scalar::new())) 84 | .member("auto_clean", Scalar::new().default("true").optional()) 85 | .member("hosts_file", Scalar::new().default("/etc/hosts")) 86 | .member("resolv_conf", Scalar::new().default("/etc/resolv.conf")) 87 | .member("bridged_network", Structure::new() 88 | .member("bridge", Scalar::new()) 89 | .member("network", Scalar::new()) 90 | .member("default_gateway", Scalar::new().optional()) 91 | .member("after_setup_command", Sequence::new(Scalar::new())) 92 | .optional()) 93 | .member("secrets_private_key", Scalar::new().optional()) 94 | .member("secrets_namespaces", Sequence::new(Scalar::new())) 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /src/setup.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::io::{Write, stderr}; 3 | use std::path::{Path}; 4 | use std::time::SystemTime; 5 | 6 | use log; 7 | use fern; 8 | use syslog; 9 | use humantime::format_rfc3339_seconds; 10 | 11 | use super::master_config::MasterConfig; 12 | use super::utils::{clean_dir}; 13 | use super::cgroup; 14 | 15 | 16 | 17 | pub fn clean_child(name: &str, master: &MasterConfig, temporary: bool) { 18 | let st_dir = master.runtime_dir 19 | .join(&master.state_dir).join(name); 20 | clean_dir(&st_dir, true) 21 | .map_err(|e| error!("Error removing state dir for {}: {}", name, e)) 22 | .ok(); 23 | if !temporary { 24 | // If shutdown is temporary (i.e. process failed and we are going to 25 | // restart it shortly), we don't remove cgroups. Because removing 26 | // them triggers the following bug in the memory cgroup controller: 27 | // 28 | // https://lkml.org/lkml/2016/6/15/1135 29 | // 30 | // I mean this is still not fixed in linux 4.6, so while we may be 31 | // able to get rid of this. But this won't gonna happen in 2-3 years :( 32 | // 33 | // Anyway it's possible that we don't need this in the new (unified) 34 | // cgroup hierarhy which is already there in 4.5, but we don't support 35 | // it yet. 36 | if let Some(ref master_grp) = master.cgroup_name { 37 | let cgname = name.replace("/", ":") + ".scope"; 38 | cgroup::remove_child_cgroup(&cgname, master_grp, 39 | &master.cgroup_controllers) 40 | .map_err(|e| error!("Error removing cgroup: {}", e)) 41 | .ok(); 42 | } 43 | } 44 | } 45 | 46 | pub fn init_logging(cfg: &MasterConfig, suffix: &Path, name: &str, 47 | log_stderr: bool, level: log::LogLevel) 48 | -> Result<(), String> 49 | { 50 | let sysfac = cfg.syslog_facility.as_ref() 51 | .and_then(|v| v.parse() 52 | .map_err(|_| writeln!(&mut stderr(), 53 | "Can't parse syslog facility: {:?}. Syslog is disabled.", v)) 54 | .ok()); 55 | if let Some(facility) = sysfac { 56 | syslog::init(facility, level.to_log_level_filter(), Some(&name)) 57 | .map_err(|e| format!("Can't initialize logging: {}", e)) 58 | } else { 59 | let path = cfg.default_log_dir.join(suffix); 60 | let file = fern::log_file(path) 61 | .map_err(|e| format!("Can't initialize logging: {}", e))?; 62 | let mut disp = fern::Dispatch::new() 63 | .format(|out, message, record| { 64 | if record.level() >= log::LogLevel::Debug { 65 | out.finish(format_args!("[{}][{}]{}:{}: {}", 66 | format_rfc3339_seconds(SystemTime::now()), 67 | record.level(), 68 | record.location().file(), record.location().line(), 69 | message)) 70 | } else { 71 | out.finish(format_args!("[{}][{}] {}", 72 | format_rfc3339_seconds(SystemTime::now()), 73 | record.level(), message)) 74 | } 75 | }) 76 | .level(level.to_log_level_filter()) 77 | .chain(file); 78 | if log_stderr { 79 | disp = disp.chain(io::stderr()) 80 | } 81 | disp.apply() 82 | .map_err(|e| format!("Can't initialize logging: {}", e)) 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/timer_queue.rs: -------------------------------------------------------------------------------- 1 | use std::cmp::Ordering; 2 | use std::time::Instant; 3 | use std::collections::BinaryHeap; 4 | 5 | 6 | 7 | struct Item { 8 | pub deadline: Instant, 9 | value: T, 10 | } 11 | 12 | impl PartialEq for Item { 13 | fn eq(&self, other: &Item) -> bool { 14 | return other.deadline.eq(&self.deadline); 15 | } 16 | } 17 | 18 | impl PartialOrd for Item { 19 | fn partial_cmp(&self, other: &Item) -> Option { 20 | // Turning max-heap upside down 21 | return other.deadline.partial_cmp(&self.deadline); 22 | } 23 | } 24 | 25 | impl Eq for Item {} 26 | impl Ord for Item { 27 | fn cmp(&self, other: &Self) -> Ordering { 28 | return other.deadline.cmp(&self.deadline); 29 | } 30 | } 31 | 32 | pub struct Queue(BinaryHeap>); 33 | 34 | pub struct QueueIter<'a, T> where T: 'a { 35 | queue: &'a mut Queue, 36 | max_time: Instant, 37 | } 38 | 39 | impl<'a, T> Iterator for QueueIter<'a, T> { 40 | type Item = T; 41 | fn next(&mut self) -> Option { 42 | if self.queue.peek_time().map(|x| x < self.max_time).unwrap_or(false) { 43 | self.queue.0.pop().map(|x| x.value) 44 | } else { 45 | None 46 | } 47 | } 48 | } 49 | 50 | impl Queue { 51 | pub fn new() -> Queue { 52 | Queue(BinaryHeap::new()) 53 | } 54 | pub fn add(&mut self, deadline: Instant, value: T) { 55 | self.0.push(Item { deadline: deadline, value: value }); 56 | } 57 | pub fn peek_time(&self) -> Option { 58 | return self.0.peek().map(|x| x.deadline) 59 | } 60 | pub fn pop_until<'x>(&'x mut self, max_time: Instant) 61 | -> QueueIter<'x, T> 62 | { 63 | QueueIter { queue: self, max_time: max_time } 64 | } 65 | pub fn len(&self) -> usize { 66 | self.0.len() 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/tree_options.rs: -------------------------------------------------------------------------------- 1 | use log; 2 | use std::env; 3 | use std::path::PathBuf; 4 | use std::io::{Write, stdout, stderr}; 5 | use argparse::{ArgumentParser, Parse, StoreOption, StoreTrue, Print}; 6 | 7 | 8 | pub struct Options { 9 | pub config_file: PathBuf, 10 | pub log_stderr: bool, 11 | pub log_level: Option, 12 | } 13 | 14 | impl Options { 15 | pub fn parse_args() -> Result { 16 | Options::parse_specific_args(env::args().collect(), 17 | &mut stdout(), &mut stderr()) 18 | } 19 | pub fn parse_specific_args(args: Vec, 20 | stdout: &mut Write, stderr: &mut Write) 21 | -> Result 22 | { 23 | let mut options = Options { 24 | config_file: PathBuf::from("/etc/lithos/master.yaml"), 25 | log_stderr: false, 26 | log_level: None, 27 | }; 28 | let parse_result = { 29 | let mut ap = ArgumentParser::new(); 30 | ap.set_description("Runs tree of processes"); 31 | ap.refer(&mut options.config_file) 32 | .add_option(&["-C", "--config"], Parse, 33 | "Name of the global configuration file \ 34 | (default /etc/lithos/master.yaml)") 35 | .metavar("FILE"); 36 | ap.refer(&mut options.log_stderr) 37 | .add_option(&["--log-stderr"], StoreTrue, 38 | "Print debugging info to stderr"); 39 | ap.refer(&mut options.log_level) 40 | .add_option(&["--log-level"], StoreOption, 41 | "Set log level (default info for now)"); 42 | ap.add_option(&["--version"], 43 | Print(env!("CARGO_PKG_VERSION").to_string()), 44 | "Show version"); 45 | ap.parse(args, stdout, stderr) 46 | }; 47 | match parse_result { 48 | Ok(()) => Ok(options), 49 | Err(x) => Err(x), 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/utils.rs: -------------------------------------------------------------------------------- 1 | use std::ptr; 2 | use std::io; 3 | use std::fs::{create_dir, remove_dir_all, read_dir, remove_file, remove_dir}; 4 | use std::fs::{metadata}; 5 | use std::path::{Path, PathBuf}; 6 | use std::path::Component::Normal; 7 | use std::io::Error as IoError; 8 | use std::io::ErrorKind::{AlreadyExists, NotFound}; 9 | use std::ffi::CString; 10 | use std::env::current_dir; 11 | 12 | use nix::sys::signal::Signal; 13 | use nix::sys::signal::{SIGQUIT, SIGSEGV, SIGBUS, SIGHUP, SIGILL, SIGABRT}; 14 | use nix::sys::signal::{SIGFPE, SIGUSR1, SIGUSR2}; 15 | use libc::{c_int, c_char, timeval, c_void, mode_t, uid_t, gid_t}; 16 | use libc::{chmod, chdir, chown}; 17 | use signal::trap::Trap; 18 | use range::Range; 19 | 20 | 21 | use super::id_map::IdMap; 22 | 23 | pub type Time = f64; 24 | pub type SigNum = i32; 25 | // TODO(tailhook) signal::Trap might use nix signals instead of i32 26 | pub const ABNORMAL_TERM_SIGNALS: &'static [Signal] = &[ 27 | SIGQUIT, SIGSEGV, SIGBUS, SIGHUP, 28 | SIGILL, SIGABRT, SIGFPE, SIGUSR1, 29 | SIGUSR2, 30 | ]; 31 | 32 | pub struct FsUidGuard(bool); 33 | 34 | extern { 35 | fn chroot(dir: *const c_char) -> c_int; 36 | fn pivot_root(new_root: *const c_char, put_old: *const c_char) -> c_int; 37 | fn gettimeofday(tp: *mut timeval, tzp: *mut c_void) -> c_int; 38 | 39 | // TODO(tailhook) move to libc and nix 40 | fn setfsuid(uid: uid_t) -> c_int; 41 | fn setfsgid(gid: gid_t) -> c_int; 42 | } 43 | 44 | 45 | pub fn temporary_change_root(path: &Path, mut fun: F) 46 | -> Result 47 | where F: FnMut() -> Result 48 | { 49 | // The point is: if we gat fatal signal in the chroot, we have 2 issues: 50 | // 51 | // 1. Process can't actually restart (the binary path is wrong) 52 | // 2. Even if it finds the binary, it will be angry restarting in chroot 53 | // 54 | let _trap = Trap::trap(ABNORMAL_TERM_SIGNALS); 55 | 56 | let cwd = current_dir().map_err(|e| { 57 | format!("Can't determine current dir: {}. \ 58 | This usually happens if the directory \ 59 | your're in is already deleted", e) 60 | })?; 61 | if unsafe { chdir(CString::new("/").unwrap().as_ptr()) } != 0 { 62 | return Err(format!("Error chdir to root: {}", 63 | IoError::last_os_error())); 64 | } 65 | if unsafe { chroot(cpath(&path).as_ptr()) } != 0 { 66 | return Err(format!("Error chroot to {:?}: {}", 67 | path, IoError::last_os_error())); 68 | } 69 | let res = fun(); 70 | if unsafe { chroot(CString::new(".").unwrap().as_ptr()) } != 0 { 71 | return Err(format!("Error chroot back: {}", 72 | IoError::last_os_error())); 73 | } 74 | if unsafe { chdir(cpath(&cwd).as_ptr()) } != 0 { 75 | return Err(format!("Error chdir to workdir back: {}", 76 | IoError::last_os_error())); 77 | } 78 | return res; 79 | } 80 | 81 | pub fn in_mapping(mapping: &Vec, value: u32) -> bool { 82 | for mp in mapping.iter() { 83 | if value >= mp.inside && value < mp.inside + mp.count { 84 | return true; 85 | } 86 | } 87 | return false; 88 | } 89 | 90 | pub fn check_mapping(ranges: &Vec, map: &Vec) -> bool { 91 | // TODO(tailhook) do more comprehensive algo 92 | 'map: for item in map.iter() { 93 | for rng in ranges.iter() { 94 | if rng.start <= item.outside && 95 | rng.end >= item.outside + item.count - 1 96 | { 97 | continue 'map; 98 | } 99 | } 100 | return false; 101 | } 102 | return true; 103 | } 104 | 105 | pub fn change_root(new_root: &Path, put_old: &Path) -> Result<(), String> 106 | { 107 | if unsafe { pivot_root( 108 | cpath(new_root).as_ptr(), 109 | cpath(put_old).as_ptr()) } != 0 110 | { 111 | return Err(format!("Error pivot_root to {}: {}", new_root.display(), 112 | IoError::last_os_error())); 113 | } 114 | if unsafe { chdir(CString::new("/").unwrap().as_ptr()) } != 0 115 | { 116 | return Err(format!("Error chdir to root: {}", 117 | IoError::last_os_error())); 118 | } 119 | return Ok(()); 120 | } 121 | 122 | pub fn ensure_dir(dir: &Path) -> Result<(), String> { 123 | if let Ok(dmeta) = metadata(dir) { 124 | if !dmeta.is_dir() { 125 | return Err(format!(concat!("Can't create dir {:?}, ", 126 | "path already exists but not a directory"), dir)); 127 | } 128 | return Ok(()); 129 | } 130 | 131 | match create_dir(dir) { 132 | Ok(()) => return Ok(()), 133 | Err(ref e) if e.kind() == AlreadyExists => { 134 | let dmeta = metadata(dir); 135 | if dmeta.is_ok() && dmeta.unwrap().is_dir() { 136 | return Ok(()); 137 | } else { 138 | return Err(format!(concat!("Can't create dir {:?}, ", 139 | "path already exists but not a directory"), 140 | dir)); 141 | } 142 | } 143 | Err(ref e) => { 144 | return Err(format!(concat!("Can't create dir {:?}: {} ", 145 | "path already exists but not a directory"), dir, e)); 146 | } 147 | } 148 | } 149 | 150 | pub fn clean_dir(dir: &Path, remove_dir_itself: bool) -> Result<(), String> { 151 | if let Err(e) = metadata(dir) { 152 | if e.kind() == NotFound { 153 | return Ok(()); 154 | } else { 155 | return Err(format!("Can't stat dir {:?}: {}", dir, e)); 156 | } 157 | } 158 | // We temporarily change root, so that symlinks inside the dir 159 | // would do no harm. But note that dir itself can be a symlink 160 | try!(temporary_change_root(dir, || { 161 | let dirlist = try!(read_dir("/") 162 | .map_err(|e| format!("Can't read directory {:?}: {}", dir, e))) 163 | .filter_map(|x| x.ok()) 164 | .collect::>(); 165 | for entry in dirlist.into_iter() { 166 | match metadata(entry.path()) { 167 | Ok(ref meta) if meta.is_dir() => { 168 | try!(remove_dir_all(entry.path()) 169 | .map_err(|e| format!("Can't remove directory {:?}{:?}: {}", 170 | dir, entry.path(), e))); 171 | } 172 | Ok(_) => { 173 | try!(remove_file(entry.path()) 174 | .map_err(|e| format!("Can't remove file {:?}{:?}: {}", 175 | dir, entry.path(), e))); 176 | } 177 | Err(_) => { 178 | return Err(format!("Can't stat file {:?}", entry.path())); 179 | } 180 | } 181 | } 182 | Ok(()) 183 | })); 184 | if remove_dir_itself { 185 | try!(remove_dir(dir) 186 | .map_err(|e| format!("Can't remove dir {:?}: {}", dir, e))); 187 | } 188 | return Ok(()); 189 | } 190 | 191 | pub fn join(mut iter: I, sep: S2) -> String 192 | where S1:AsRef, S2:AsRef, I:Iterator 193 | { 194 | let mut buf = String::new(); 195 | match iter.next() { 196 | Some(x) => buf.push_str(x.as_ref()), 197 | None => {} 198 | } 199 | for i in iter { 200 | buf.push_str(sep.as_ref()); 201 | buf.push_str(i.as_ref()); 202 | } 203 | return buf; 204 | } 205 | 206 | pub fn get_time() -> Time { 207 | let mut tv = timeval { tv_sec: 0, tv_usec: 0 }; 208 | unsafe { gettimeofday(&mut tv, ptr::null_mut()) }; 209 | return tv.tv_sec as f64 + tv.tv_usec as f64 * 0.000001; 210 | } 211 | 212 | pub fn set_file_owner(path: &Path, owner: uid_t, group: gid_t) 213 | -> Result<(), IoError> 214 | { 215 | let cpath = cpath(path); 216 | let rc = unsafe { chown(cpath.as_ptr(), owner, group) }; 217 | if rc < 0 { 218 | return Err(IoError::last_os_error()); 219 | } 220 | return Ok(()); 221 | } 222 | 223 | pub fn set_file_mode(path: &Path, mode: mode_t) -> Result<(), IoError> { 224 | let cpath = cpath(path); 225 | let rc = unsafe { chmod(cpath.as_ptr(), mode) }; 226 | if rc < 0 { 227 | return Err(IoError::last_os_error()); 228 | } 229 | return Ok(()); 230 | } 231 | 232 | pub fn cpath>(path: P) -> CString { 233 | CString::new(path.as_ref().to_str().unwrap()).unwrap() 234 | } 235 | 236 | pub fn relative(child: &Path, base: &Path) -> PathBuf { 237 | assert!(child.starts_with(base)); 238 | let mut res = PathBuf::new(); 239 | for cmp in child.components().skip(base.components().count()) { 240 | if let Normal(ref chunk) = cmp { 241 | res.push(chunk); 242 | } else { 243 | panic!("Bad path for relative ({:?} from {:?} against {:?})", 244 | cmp, child, base); 245 | } 246 | } 247 | return res 248 | } 249 | 250 | impl FsUidGuard { 251 | pub fn set(uid: u32, gid: u32) -> FsUidGuard { 252 | if uid != 0 || gid != 0 { 253 | unsafe { setfsuid(uid) }; 254 | if unsafe { setfsuid(uid) } != uid as i32 { 255 | error!("Can't set fs gid to open socket: {}. Ignoring.", 256 | io::Error::last_os_error()); 257 | } 258 | unsafe { setfsgid(gid) }; 259 | if unsafe { setfsgid(gid) } != gid as i32 { 260 | error!("Can't set fs uid to open socket: {}. Ignoring.", 261 | io::Error::last_os_error()); 262 | } 263 | FsUidGuard(true) 264 | } else { 265 | FsUidGuard(false) 266 | } 267 | } 268 | } 269 | 270 | impl Drop for FsUidGuard { 271 | fn drop(&mut self) { 272 | if self.0 { 273 | unsafe { setfsuid(0) }; 274 | if unsafe { setfsuid(0) } != 0 { 275 | let err = io::Error::last_os_error(); 276 | error!("Can't return fs uid back to zero: {}. Aborting.", err); 277 | panic!("Can't return fs uid back to zero: {}. Aborting.", err); 278 | } 279 | unsafe { setfsgid(0) }; 280 | if unsafe { setfsgid(0) } != 0 { 281 | let err = io::Error::last_os_error(); 282 | error!("Can't return fs gid back to zero: {}. Aborting.", err); 283 | panic!("Can't return fs gid back to zero: {}. Aborting.", err); 284 | } 285 | } 286 | } 287 | } 288 | -------------------------------------------------------------------------------- /systemd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Lithos Supervisor service 3 | 4 | [Service] 5 | Type=simple 6 | Environment="RUST_LOG=warn" 7 | Environment="RUST_BACKTRACE=1" 8 | ExecStart=/usr/bin/lithos_tree 9 | Restart=always 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /upstart.conf: -------------------------------------------------------------------------------- 1 | start on runlevel [234] 2 | respawn 3 | env RUST_LOG=warn 4 | env RUST_BACKTRACE=1 5 | exec /usr/bin/lithos_tree 6 | 7 | -------------------------------------------------------------------------------- /vagga.yaml: -------------------------------------------------------------------------------- 1 | commands: 2 | 3 | make: !Command 4 | description: Build lithos in ubuntu container 5 | container: xenial 6 | run: [cargo, build] 7 | 8 | test: !Command 9 | description: Run cargo tests 10 | container: xenial 11 | run: [cargo, test] 12 | 13 | _static-lithos_check-package: !Command 14 | description: Build static lithos_check package (.tar.gz) 15 | container: rust-musl 16 | run: | 17 | cargo build --target=x86_64-unknown-linux-musl 18 | tar -czf dist/lithos-check-$(git describe).tar.gz \ 19 | -C target/x86_64-unknown-linux-musl/debug lithos_check 20 | 21 | cargo: !Command 22 | description: Run any cargo command 23 | container: xenial 24 | symlink-name: cargo 25 | run: [cargo] 26 | 27 | bulk: !Command 28 | description: Run any bulk command 29 | container: xenial 30 | run: [bulk] 31 | 32 | doc: !Command 33 | description: Build HTML docs 34 | container: docs 35 | work-dir: docs 36 | epilog: | 37 | ---------------------------------------- 38 | xdg-open docs/_build/html/index.html 39 | run: [make, html] 40 | 41 | make-docs: !Command 42 | description: Build 43 | container: docs 44 | work-dir: docs 45 | run: [make] 46 | 47 | _package-trusty: !Command 48 | container: trusty 49 | run: [make, ubuntu-packages] 50 | 51 | _package-xenial: !Command 52 | container: xenial 53 | run: [make, ubuntu-packages] 54 | 55 | _package-bionic: !Command 56 | container: bionic 57 | run: [make, ubuntu-packages] 58 | 59 | _package-lithos_check: !Command 60 | container: rust-musl 61 | run: [make, ubuntu-lithos_check-package] 62 | 63 | packages: !Command 64 | container: xenial 65 | description: Build packages and repostories (mostly for CI) 66 | prerequisites: [_package-trusty, _package-xenial, _package-bionic, 67 | _package-lithos_check] 68 | accepts_arguments: true 69 | run: | 70 | bulk repo-add --config bulk-check.yaml \ 71 | --repository-base dist/repos \ 72 | dist/lithos-check-*.deb "$@" 73 | bulk repo-add --config bulk.yaml --repository-base dist/repos \ 74 | dist/lithos-[0-9]*.deb "$@" 75 | 76 | containers: 77 | 78 | py-example: 79 | # This one is used as a container inside the lithos example 80 | setup: 81 | - !Alpine v3.3 82 | - !Install [python3] 83 | - !EnsureDir /config 84 | - !EnsureDir /code 85 | - !Copy 86 | source: /work/examples/py/code/sock.py 87 | path: /code/sock.py 88 | - !Copy 89 | source: /work/examples/py/code/socket.yaml 90 | path: /config/socket.yaml 91 | - !Copy 92 | source: /work/examples/py/code/py.yaml 93 | path: /config/py.yaml 94 | 95 | py_var-example: 96 | # This one is used as a container inside the lithos example 97 | setup: 98 | - !Alpine v3.3 99 | - !Install [python3] 100 | - !EnsureDir /config 101 | - !EnsureDir /code 102 | - !Copy 103 | source: /work/examples/py_var/code/sock.py 104 | path: /code/sock.py 105 | - !Copy 106 | source: /work/examples/py_var/code/socket.yaml 107 | path: /config/socket.yaml 108 | - !Copy 109 | source: /work/examples/py_var/code/py.yaml 110 | path: /config/py.yaml 111 | 112 | py_systemd-example: 113 | # This one is used as a container inside the lithos example 114 | setup: 115 | - !Alpine v3.7 116 | - !Install [python3] 117 | - !EnsureDir /config 118 | - !EnsureDir /code 119 | - !Copy 120 | source: /work/examples/py_systemd/code/sock.py 121 | path: /code/sock.py 122 | - !Copy 123 | source: /work/examples/py_systemd/code/socket.yaml 124 | path: /config/socket.yaml 125 | - !Copy 126 | source: /work/examples/py_systemd/code/py.yaml 127 | path: /config/py.yaml 128 | 129 | multi_level-example: 130 | # This one is used as a container inside the lithos example 131 | setup: 132 | - !Alpine v3.6 133 | - !Install [python3] 134 | - !EnsureDir /config 135 | - !EnsureDir /code 136 | - !Copy 137 | source: /work/examples/multi_level/code/sock.py 138 | path: /code/sock.py 139 | - !Copy 140 | source: /work/examples/multi_level/code/socket.yaml 141 | path: /config/socket.yaml 142 | - !Copy 143 | source: /work/examples/multi_level/code/py.yaml 144 | path: /config/py.yaml 145 | 146 | xenial: 147 | setup: 148 | - !Ubuntu xenial 149 | - &pkgs !Install [make, gcc, libc-dev, ca-certificates, git, lsb-release, vim] 150 | - &rust !TarInstall 151 | url: https://static.rust-lang.org/dist/rust-1.28.0-x86_64-unknown-linux-gnu.tar.gz 152 | script: "./install.sh --prefix=/usr --components=rustc,cargo \ 153 | --components=rustc,rust-std-x86_64-unknown-linux-gnu,cargo" 154 | - &bulk !Tar 155 | url: "https://github.com/tailhook/bulk/releases/download/v0.4.12/bulk-v0.4.12.tar.gz" 156 | sha256: 7deeb4895b3909afea46194ef01bafdeb30ff89fc4a7b6497172ba117734040e 157 | path: / 158 | 159 | environ: &environ 160 | HOME: /work/target 161 | 162 | trusty: 163 | setup: 164 | - !Ubuntu trusty 165 | - *pkgs 166 | - *rust 167 | - *bulk 168 | environ: *environ 169 | 170 | bionic: 171 | setup: 172 | - !Ubuntu bionic 173 | - *pkgs 174 | - *rust 175 | - *bulk 176 | environ: *environ 177 | 178 | 179 | rust-musl: 180 | environ: &rustenv 181 | LD_LIBRARY_PATH: /musl/lib/rustlib/x86_64-unknown-linux-musl/lib 182 | PATH: /musl/bin:/usr/local/bin:/usr/bin:/bin 183 | HOME: /work/target 184 | setup: 185 | - !Ubuntu xenial 186 | - !UbuntuUniverse 187 | - !Install [musl-tools] 188 | - *pkgs 189 | - *rust 190 | - !TarInstall 191 | url: "https://static.rust-lang.org/dist/rust-std-1.28.0-x86_64-unknown-linux-musl.tar.gz" 192 | script: "./install.sh --prefix=/musl \ 193 | --components=rust-std-x86_64-unknown-linux-musl" 194 | - !Sh 'ln -s /musl/lib/rustlib/x86_64-unknown-linux-musl /usr/lib/rustlib/x86_64-unknown-linux-musl' 195 | - *bulk 196 | 197 | docs: 198 | setup: 199 | - !Alpine v3.1 200 | - !Install [py-sphinx, make] 201 | - !Py2Requirements "docs/requirements.txt" 202 | --------------------------------------------------------------------------------