├── tests ├── .gitignore ├── bin │ ├── is-running │ ├── expect-test-image │ ├── expect-test-command │ ├── kill-from-pidfile │ ├── logecho │ ├── talkback │ ├── read_from_port │ ├── get-serial │ ├── envcp │ ├── telchap │ ├── chaperone │ ├── sdnotify │ ├── sdnotify-exec │ ├── expect-lite-command-run │ ├── expect-lite-image-run │ ├── daemon │ ├── proctool │ ├── test-driver │ └── daemonutil.py ├── el-tests │ ├── basic-1 │ │ ├── test-002.elt │ │ ├── test-001.elt │ │ └── chaperone.conf │ ├── simple-1 │ │ ├── test-002.elt │ │ ├── test-001.elt │ │ ├── test-004.elt │ │ ├── test-003.elt │ │ └── chaperone.conf │ ├── fork-1 │ │ ├── test-001.elt │ │ ├── test-001b.elt │ │ ├── test-003.elt │ │ ├── test-004.elt │ │ └── chaperone.conf │ ├── notify-1 │ │ ├── test-001c.elt │ │ ├── test-001d.elt │ │ ├── test-001b.elt │ │ ├── test-001.elt │ │ ├── test-001e.elt │ │ └── chaperone.conf │ ├── cron-1 │ │ ├── test-001.elt │ │ ├── test-008.elt │ │ ├── test-006.elt │ │ ├── test-007.elt │ │ ├── test-005.elt │ │ ├── test-004.elt │ │ ├── simulate-rotate.sh │ │ └── chaperone.conf │ ├── exitkills-1 │ │ ├── test-001.elt │ │ └── chaperone.conf │ ├── simple-2 │ │ ├── test-002.elt │ │ ├── test-001.elt │ │ ├── test-003.elt │ │ ├── test-004.elt │ │ └── chaperone.conf │ └── inetd-1 │ │ ├── chaperone.conf │ │ ├── test-001.elt │ │ └── test-002.elt ├── run-all-tests.sh ├── run-shell.sh ├── prefix.py ├── run-el.sh ├── README.md ├── env_parse.py ├── syslog_spec.py ├── events.py └── service_order.py ├── chaperone ├── __init__.py ├── cutil │ ├── __init__.py │ ├── errors.py │ ├── syslog_info.py │ ├── format.py │ ├── servers.py │ ├── patches.py │ ├── proc.py │ ├── logging.py │ └── events.py ├── exec │ ├── __init__.py │ ├── telchap.py │ ├── sdnotify.py │ └── envcp.py └── cproc │ ├── __init__.py │ ├── pt │ ├── __init__.py │ ├── simple.py │ ├── oneshot.py │ ├── forking.py │ ├── cron.py │ ├── inetd.py │ └── notify.py │ ├── version.py │ ├── client.py │ └── watcher.py ├── doc ├── .gitignore ├── source │ ├── includes │ │ ├── defs.rst │ │ └── incomplete.rst │ ├── guide │ │ ├── chap-using.rst │ │ ├── chap-other.rst │ │ ├── chap-docker.rst │ │ ├── chap-intro.rst │ │ ├── chap-docker-simple.rst │ │ └── chap-docker-smaller.rst │ ├── ref │ │ ├── utilities.rst │ │ ├── index.rst │ │ ├── config.rst │ │ └── config-format.rst │ ├── _static │ │ └── custom.css │ ├── _templates │ │ └── layout.html │ ├── status.rst │ └── index.rst └── docserver │ ├── build │ ├── Dockerfile │ └── install.sh │ ├── README │ ├── build.sh │ ├── run.sh │ ├── etc │ ├── init.sh │ └── apache2.conf │ └── chaperone.d │ ├── 120-apache2.conf │ └── 010-start.conf ├── samples ├── chaperone-devbase │ ├── setup-bin │ ├── chaperone │ │ ├── README │ │ ├── chaperone │ │ └── setup.py │ ├── apps │ │ ├── bin │ │ │ └── README │ │ ├── init.d │ │ │ └── README │ │ ├── etc │ │ │ ├── README │ │ │ └── init.sh │ │ └── chaperone.d │ │ │ └── 010-start.conf │ ├── build-image.sh │ ├── Dockerfile │ └── install.sh ├── chaperone-lamp │ ├── setup-bin │ ├── apps │ │ ├── www │ │ │ ├── default │ │ │ │ └── index.php │ │ │ └── sites.d │ │ │ │ └── default.conf │ │ ├── init.d │ │ │ ├── phpmyadmin.sh │ │ │ └── mysql.sh │ │ ├── chaperone.d │ │ │ ├── 105-mysqld.conf │ │ │ └── 120-apache2.conf │ │ └── etc │ │ │ ├── mysql │ │ │ ├── start_mysql.sh │ │ │ └── my.cnf │ │ │ └── apache2.conf │ ├── Dockerfile │ ├── build-image.sh │ └── install.sh ├── docsample │ ├── README │ ├── chaperone.conf │ └── Dockerfile ├── README └── setup-bin │ ├── ct_setproxy │ ├── dot.bashrc │ └── build ├── sandbox ├── .shinit ├── .gitignore ├── bareimage │ ├── setup-bin │ ├── Dockerfile │ └── install-bareimage.sh ├── centos.d │ ├── app.conf │ ├── cron.conf │ ├── apache.conf │ └── sys1.conf ├── test.d │ ├── cron.conf │ ├── apache.conf │ └── sys1.conf ├── bin │ ├── cps │ ├── fakeentry │ ├── chaperone │ └── repeat ├── etc │ └── makezombie.conf ├── bash.bashrc ├── testdock ├── testcent ├── bare_startup.sh ├── testbare ├── README ├── distserv │ ├── chaperone.d │ │ ├── 120-apache2.conf │ │ ├── 005-config.conf │ │ └── 010-start.conf │ ├── run.sh │ └── etc │ │ └── apache2.conf ├── testvar ├── testimage └── user.d │ └── sys1.conf ├── .gitignore ├── LICENSE ├── README ├── setup.py └── README.md /tests/.gitignore: -------------------------------------------------------------------------------- 1 | test_logs 2 | -------------------------------------------------------------------------------- /chaperone/__init__.py: -------------------------------------------------------------------------------- 1 | # Placeholder 2 | -------------------------------------------------------------------------------- /chaperone/cutil/__init__.py: -------------------------------------------------------------------------------- 1 | # Placeholder 2 | -------------------------------------------------------------------------------- /chaperone/exec/__init__.py: -------------------------------------------------------------------------------- 1 | # Placeholder 2 | -------------------------------------------------------------------------------- /doc/.gitignore: -------------------------------------------------------------------------------- 1 | build/* 2 | docserver/var 3 | -------------------------------------------------------------------------------- /samples/chaperone-devbase/setup-bin: -------------------------------------------------------------------------------- 1 | ../setup-bin -------------------------------------------------------------------------------- /samples/chaperone-lamp/setup-bin: -------------------------------------------------------------------------------- 1 | ../setup-bin -------------------------------------------------------------------------------- /sandbox/.shinit: -------------------------------------------------------------------------------- 1 | echo THIS IS THE SHELL INIT 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build/* 2 | chaperone.egg* 3 | dist/* 4 | -------------------------------------------------------------------------------- /sandbox/.gitignore: -------------------------------------------------------------------------------- 1 | apps-* 2 | var-* 3 | 4 | 5 | -------------------------------------------------------------------------------- /sandbox/bareimage/setup-bin: -------------------------------------------------------------------------------- 1 | ../../samples/setup-bin -------------------------------------------------------------------------------- /samples/chaperone-devbase/chaperone/README: -------------------------------------------------------------------------------- 1 | ../../../README -------------------------------------------------------------------------------- /doc/source/includes/defs.rst: -------------------------------------------------------------------------------- 1 | .. |ENV| replace:: :kbd:`$ENV` 2 | -------------------------------------------------------------------------------- /samples/chaperone-devbase/chaperone/chaperone: -------------------------------------------------------------------------------- 1 | ../../../chaperone -------------------------------------------------------------------------------- /samples/chaperone-devbase/chaperone/setup.py: -------------------------------------------------------------------------------- 1 | ../../../setup.py -------------------------------------------------------------------------------- /samples/chaperone-lamp/apps/www/default/index.php: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /sandbox/centos.d/app.conf: -------------------------------------------------------------------------------- 1 | main.logging: { 2 | stderr: false, 3 | } 4 | -------------------------------------------------------------------------------- /tests/bin/is-running: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ps -C $1 >/dev/null && exit 0 4 | exit 1 5 | -------------------------------------------------------------------------------- /chaperone/cproc/__init__.py: -------------------------------------------------------------------------------- 1 | # Placeholder 2 | 3 | from chaperone.cproc.process_manager import TopLevelProcess 4 | -------------------------------------------------------------------------------- /chaperone/cproc/pt/__init__.py: -------------------------------------------------------------------------------- 1 | # Placeholder 2 | 3 | from chaperone.cproc.process_manager import TopLevelProcess 4 | -------------------------------------------------------------------------------- /tests/bin/expect-test-image: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export EL_SHELL="expect-lite-image-run" 4 | exec expect-lite $1 5 | -------------------------------------------------------------------------------- /doc/docserver/build/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM chapdev/chaperone-lamp:latest 2 | ADD . /setup/ 3 | RUN /setup/build/install.sh 4 | -------------------------------------------------------------------------------- /tests/bin/expect-test-command: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export EL_SHELL="expect-lite-command-run" 4 | exec expect-lite $1 5 | -------------------------------------------------------------------------------- /sandbox/bareimage/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | ADD setup-bin/* *.sh /setup-bin/ 4 | RUN /setup-bin/install-bareimage.sh 5 | -------------------------------------------------------------------------------- /sandbox/test.d/cron.conf: -------------------------------------------------------------------------------- 1 | cron.service: { 2 | command: '/usr/sbin/cron -f', 3 | restart: true, 4 | enabled: false, 5 | } 6 | -------------------------------------------------------------------------------- /tests/bin/kill-from-pidfile: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pidfile=$1 4 | 5 | if [ -f $pidfile ]; then 6 | sudo kill `cat $1` 7 | fi 8 | -------------------------------------------------------------------------------- /sandbox/centos.d/cron.conf: -------------------------------------------------------------------------------- 1 | cron.service: { 2 | bin: /usr/sbin/cron, 3 | args: -f, 4 | optional: true, 5 | restart: true, 6 | } 7 | -------------------------------------------------------------------------------- /sandbox/bin/cps: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Shortcut for more relevant PS for containers 3 | 4 | ps --forest -weo 'user,pid,ppid,pgid,sid,%cpu,%mem,stat,command' 5 | -------------------------------------------------------------------------------- /tests/bin/logecho: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$SERVICE_NAME" == "" ]; then 4 | SERVICE_NAME="pid$$" 5 | fi 6 | 7 | logger -p info -t $SERVICE_NAME "$*" 8 | -------------------------------------------------------------------------------- /samples/chaperone-lamp/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM chapdev/chaperone-base:latest 2 | 3 | ADD *.sh /setup-bin/ 4 | ADD apps/ /apps/ 5 | RUN /setup-bin/install.sh 6 | 7 | EXPOSE 8080 8 | -------------------------------------------------------------------------------- /samples/chaperone-lamp/build-image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # the cd trick assures this works even if the current directory is not current. 4 | cd ${0%/*} 5 | ./setup-bin/build -x 6 | -------------------------------------------------------------------------------- /tests/el-tests/basic-1/test-002.elt: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env expect-test-command 2 | #TITLE: Test simplest possible task 3 | 4 | >RUNTASK proctool testing-123 5 | RUNIMAGE 4 | /dev/null 2>&1; then 4 | # nmap.org accepts --version and has different syntax (lovely eh) 5 | nc --recv-only $* 6 | else 7 | # bsd version 8 | nc $* 9 | fi 10 | -------------------------------------------------------------------------------- /tests/el-tests/simple-1/test-002.elt: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env expect-test-command 2 | #TITLE: Simple services - all processes disabled 3 | 4 | >RUNIMAGE 5 | $serfile 13 | echo $current 14 | -------------------------------------------------------------------------------- /tests/el-tests/fork-1/test-001.elt: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env expect-test-command 2 | #TITLE: Forking service - spawn daemon normally 3 | 4 | >RUNIMAGE -e ENABLE_EXIT1=true 5 | RUNIMAGE -e ENABLE_EXIT1C=true 5 | (sleep 15; echo "K""ILL ME NOW")& 5 | >RUNIMAGE -e ENABLE_CRON1=true 6 | @30 7 | ^C 11 | RUNIMAGE 6 | <: daemon running 7 | RUNIMAGE -e ENABLE_EXIT1D=true 5 | <: daemon running 6 | 7 | RUNIMAGE -e ENABLE_EXIT1=true 5 | RUNIMAGE -e ENABLE_APACHE8=true 6 | RUNIMAGE -e ENABLE_EXIT1B=true 5 | RUNIMAGE -e ENABLE_EXIT1B=true 5 | ' 7 | RUNIMAGE -e ENABLE_EXIT1=true 5 | ' 5 | exit 6 | fi 7 | 8 | export PATH=$PWD/bin:$PATH 9 | export CHTEST_DOCKER_CMD="sdnotify-exec --noproxy --verbose --wait-stop docker run %{SOCKET_ARGS}" 10 | 11 | test-driver --shell el-tests/$1 12 | -------------------------------------------------------------------------------- /tests/el-tests/notify-1/test-001e.elt: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env expect-test-command 2 | #TITLE: Notify service - spawn daemon - normal ready notification 3 | 4 | >RUNIMAGE -e ENABLE_EXIT1E=true 5 | <: daemon running 6 | (sleep 5; echo "K""ILL ME NOW")& 5 | >RUNIMAGE 6 | ^C 9 | RUNIMAGE -e ENABLE_APACHE6=true 5 | (sleep 8; echo "K""ILL ME NOW")& 5 | >RUNIMAGE -e ENABLE_APACHE3=true 6 | ^C 11 | RUNIMAGE -e ENABLE_APACHE4=true 5 | (sleep 5; echo "K""ILL ME NOW")& 5 | >RUNIMAGE -e ENABLE_EXIT1=true 6 | ^C 10 | RUNIMAGE -e ENABLE_APACHE4=true 5 | (sleep 8; echo "K""ILL ME NOW")& 5 | >RUNIMAGE -e ENABLE_APACHE3=true 6 | ^C 12 | /etc/apt/apt.conf.d/01proxy 8 | echo ADDED PROXY FOR apt-get on $defhost 9 | else 10 | rm -f /etc/apt/apt.conf.d/01proxy 11 | echo NO PROXY FOR apt-get 12 | fi 13 | -------------------------------------------------------------------------------- /tests/el-tests/cron-1/test-007.elt: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env expect-test-command 2 | #TITLE: Cron job killing apache keeps container running (cron scheduled) 3 | 4 | @30 5 | >(sleep 20; echo "K""ILL ME NOW")& 6 | >RUNIMAGE -e ENABLE_APACHE7=true 7 | ^C 12 | (sleep 8; echo "K""ILL ME NOW")& 5 | >RUNIMAGE -e ENABLE_APACHE3=true 6 | ^C 12 | (sleep 8; echo "K""ILL ME NOW")& 5 | >RUNIMAGE -e ENABLE_APACHE4=true 6 | ^C 13 | " 7 | exit 1 8 | fi 9 | prodimage="$1" 10 | if [ ! -f build/Dockerfile ]; then 11 | echo "Expecting to find Dockerfile in ./build ... not found!" 12 | exit 1 13 | fi 14 | tar czh --exclude '*~' --exclude 'var/*' . | docker build -t $prodimage -f build/Dockerfile - 15 | -------------------------------------------------------------------------------- /samples/chaperone-lamp/apps/www/sites.d/default.conf: -------------------------------------------------------------------------------- 1 | 2 | 3 | # The ServerName directive sets the request scheme, hostname and port that 4 | # the server uses to identify itself. 5 | #ServerName www.example.com 6 | 7 | ServerAdmin webmaster@localhost 8 | DocumentRoot ${APACHE_SITES_DIR}/default 9 | 10 | # Errors go to the syslog so they can be duplicated to the console easily 11 | ErrorLog syslog:local1 12 | CustomLog ${APACHE_LOG_DIR}/default-access.log combined 13 | 14 | 15 | -------------------------------------------------------------------------------- /samples/chaperone-lamp/apps/chaperone.d/105-mysqld.conf: -------------------------------------------------------------------------------- 1 | settings: { 2 | env_set: { 3 | 'MYSQL_HOME': '$(APPS_DIR)/etc/mysql', 4 | 'MYSQL_UNIX_PORT': '$(APPS_DIR)/var/run/mysqld.sock', 5 | }, 6 | } 7 | 8 | mysql1.service: { 9 | type: forking, 10 | command: "/etc/init.d/mysql start", 11 | enabled: false, 12 | uid: root, 13 | service_group: database, 14 | } 15 | 16 | mysql.service: { 17 | type: simple, 18 | command: "$(APPS_DIR)/etc/mysql/start_mysql.sh", 19 | enabled: true, 20 | service_group: database, 21 | } 22 | -------------------------------------------------------------------------------- /samples/chaperone-lamp/apps/etc/mysql/start_mysql.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # For a general query log, include the following: 4 | # --general-log-file=$APPS_DIR/log/mysqld-query.log 5 | # --general-log=1 6 | 7 | exec /usr/sbin/mysqld \ 8 | --defaults-file=$APPS_DIR/etc/mysql/my.cnf \ 9 | --user ${USER:-mysql} \ 10 | --datadir=$APPS_DIR/var/mysql \ 11 | --socket=$APPS_DIR/var/run/mysqld.sock \ 12 | --pid-file=$APPS_DIR/var/run/mysqld.pid \ 13 | --log-error=$APPS_DIR/var/log/mysqld-error.log \ 14 | --plugin-dir=/usr/lib/var/mysql/plugin 15 | -------------------------------------------------------------------------------- /sandbox/README: -------------------------------------------------------------------------------- 1 | Files in this directory were created ad-hoc by me as a sandbox testing area. Typically, I create a docker 2 | image and point /home to my host's /home, then keep chaperone in a sub-directory where I work on it without 3 | needing to install it each time. I run my docker image with ./testdock and it isolates operation in this 4 | sandbox directory. 5 | 6 | The "testimage" script is especially useful since it lets you work with a standard docker chaperone image 7 | while substituting the current chaperone source instead of using the installed version. 8 | -------------------------------------------------------------------------------- /tests/el-tests/cron-1/test-005.elt: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env expect-test-command 2 | #TITLE: Complex Apache and background restart - process termination with PIDFILE 3 | 4 | @40 5 | >(sleep 25; echo "K""ILL ME NOW")& 6 | >RUNIMAGE -e ENABLE_APACHE5=true 7 | ^C 18 | <(sleep 25; echo "K""ILL ME NOW")& 6 | >RUNIMAGE -e ENABLE_APACHE4=true 7 | ^C 18 | < 6 | (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ 7 | (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), 8 | m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) 9 | })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); 10 | 11 | ga('create', 'UA-59042532-2', 'auto'); 12 | ga('send', 'pageview'); 13 | 14 | 15 | 16 | {% endblock %} 17 | -------------------------------------------------------------------------------- /tests/bin/expect-lite-command-run: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function RUNTASK() { 4 | expect-lite-image-run --task $* 5 | } 6 | 7 | function RUNIMAGE() { 8 | export CHTEST_DOCKER_CMD="sdnotify-exec --noproxy --verbose --wait-stop docker run %{SOCKET_ARGS}" 9 | export CHTEST_DOCKER_OPTS=$* 10 | expect-lite-image-run 11 | } 12 | 13 | function RUNIMAGE_READY() { 14 | export CHTEST_DOCKER_CMD="sdnotify-exec --noproxy --verbose --wait-ready docker run %{SOCKET_ARGS}" 15 | export CHTEST_DOCKER_OPTS=$* 16 | expect-lite-image-run 17 | } 18 | 19 | export -f RUNTASK RUNIMAGE RUNIMAGE_READY 20 | bash -i 21 | -------------------------------------------------------------------------------- /sandbox/test.d/apache.conf: -------------------------------------------------------------------------------- 1 | apache1.service: { 2 | command: "/usr/sbin/apache2 -f $(SANDBOX)/etc/apache2.conf", 3 | enabled: true, 4 | restart: false, 5 | optional: true, 6 | env_set: { 7 | APACHE_LOCK_DIR: /tmp, 8 | APACHE_PID_FILE: /tmp/apache2.pid, 9 | APACHE_RUN_USER: www-data, 10 | APACHE_RUN_GROUP: www-data, 11 | APACHE_LOG_DIR: /var/log/apache2, 12 | } 13 | } 14 | 15 | mysql.service: { 16 | command: "/etc/init.d/mysql start", 17 | enabled: false, 18 | } 19 | 20 | apache2.service: { 21 | command: "/etc/init.d/apache2 start", 22 | after: "mysql.service", 23 | enabled: false, 24 | } 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, Gary J. Wisniewski 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /tests/el-tests/exitkills-1/chaperone.conf: -------------------------------------------------------------------------------- 1 | settings: { 2 | env_set: { PATH: "$(TESTHOME)/bin:$(PATH)" } 3 | } 4 | 5 | test1-keeper.service: { 6 | command: "bash -c 'logecho lagging task sleeping for 5 minutes... ; sleep 600'", 7 | } 8 | 9 | test1-kills.service: { 10 | type: forking, 11 | enabled: true, 12 | command: "daemon bash -c 'echo $$ >/tmp/kid.pid; logecho daemon running; sleep 10; logecho wait completed: daemon exiting'", 13 | pidfile: "/tmp/kid.pid", 14 | exit_kills: true, 15 | service_groups: IDLE, 16 | } 17 | 18 | # Debugging output for all 19 | 20 | default.logging: { 21 | selector: "*.debug", 22 | stdout: true, 23 | } 24 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | Chaperone is a lean, full-featured top-level system manager, similar to init, systemd, and others, 2 | but designed for lean container environments like Docker. It is a single, small program which provides 3 | process clean-up, rudimentary logging, and service management without the overhead of additional 4 | complex configuration. 5 | 6 | ================ ====================================================== 7 | Documentation http://garywiz.github.io/chaperone 8 | chaperone Source http://github.com/garywiz/chaperone 9 | pypi link http://pypi.python.org/pypi/chaperone 10 | ================ ====================================================== 11 | -------------------------------------------------------------------------------- /tests/el-tests/cron-1/simulate-rotate.sh: -------------------------------------------------------------------------------- 1 | echo simulating rotation 2 | echo SIMULATE-ROTATE SERIAL NUMBER: $(get-serial) 3 | service=$1 4 | telchap=$2 5 | $(is-running apache2) && echo apache is running || echo apache is NOT running 6 | ps axf 7 | if [ "$telchap" != "telchap" ]; then 8 | sudo kill `cat /run/apache2/apache2.pid` # chaperone doesn't know this 9 | echo DIRECT KILL of $service 10 | else 11 | echo Use TELCHAP to tell Chaperone to kill $service 12 | fi 13 | telchap reset $service 14 | sleep 1 15 | $(is-running apache2) && echo apache is running || echo apache is NOT running 16 | ps axf 17 | telchap start $service 18 | sleep 1 19 | $(is-running apache2) && echo apache is running || echo apache is NOT running 20 | ps axf 21 | -------------------------------------------------------------------------------- /tests/run-el.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function relpath() { python -c "import os,sys;print(os.path.relpath(*(sys.argv[1:])))" "$@"; } 4 | 5 | export PATH=$PWD/bin:$PATH 6 | 7 | if [ "$1" == '-n' ]; then 8 | counter=$2 9 | shift 2 10 | for (( i=1; $i<=$counter; i++ )); do 11 | export CHTEST_LOGDIR=$PWD/test_logs/n$i 12 | $0 $* & 13 | done 14 | wait 15 | exit 16 | fi 17 | 18 | if [ "$1" != "" ]; then 19 | export CHTEST_ONLY_ENDSWITH=$1 20 | fi 21 | 22 | test-driver el-tests/basic-1 23 | test-driver el-tests/simple-1 24 | test-driver el-tests/simple-2 25 | test-driver el-tests/cron-1 26 | test-driver el-tests/fork-1 27 | test-driver el-tests/inetd-1 28 | test-driver el-tests/notify-1 29 | test-driver el-tests/exitkills-1 30 | -------------------------------------------------------------------------------- /samples/chaperone-lamp/apps/init.d/mysql.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | distdir=/var/lib/mysql 4 | appdbdir=$APPS_DIR/var/mysql 5 | 6 | function dolog() { logger -t mysql.sh -p info $*; } 7 | 8 | if [ $CONTAINER_INIT == 1 ]; then 9 | dolog "hiding distribution mysql files in /etc so no clients see them" 10 | su -c "cd /etc; mv my.cnf my.cnf-dist; mv mysql mysql-dist; mv $distdir $distdir-dist" 11 | fi 12 | 13 | if [ $APPS_INIT == 1 ]; then 14 | if [ ! -d $appdbdir ]; then 15 | dolog "copying distribution $distdir to $appdbdir" 16 | su -c "cp -a $distdir-dist $appdbdir; chown -R ${USER:-mysql} $appdbdir" 17 | else 18 | dolong "existing $appdbdir found when initializing $APPS_DIR for the first time, not changed." 19 | fi 20 | fi 21 | -------------------------------------------------------------------------------- /tests/el-tests/inetd-1/chaperone.conf: -------------------------------------------------------------------------------- 1 | settings: { 2 | env_set: { PATH: "$(TESTHOME)/bin:$(PATH)" } 3 | } 4 | 5 | # INETD1: simple test 6 | 7 | inetd1.service: { 8 | enabled: "$(ENABLE_INETD1:-false)", 9 | type: inetd, 10 | port: 8080, 11 | command: "echo hello from port 8080", 12 | } 13 | 14 | # INETD2: disables both this service and inetd1 which will cause container exit 15 | 16 | inetd2.service: { 17 | enabled: "$(ENABLE_INETD2:-false)", 18 | type: inetd, 19 | port: 8443, 20 | command: "bash -c 'telchap stop inetd1 inetd2; echo disabled both'", 21 | after: inetd1.service, # so log entries are in the right order 22 | } 23 | 24 | # Debugging output for all 25 | 26 | default.logging: { 27 | selector: "*.debug", 28 | stdout: true, 29 | } 30 | -------------------------------------------------------------------------------- /samples/chaperone-devbase/apps/init.d/README: -------------------------------------------------------------------------------- 1 | Files in this directory are executed upon container startup by the ../etc/init.sh script. 2 | 3 | There are two modes: 4 | 5 | 1. When the container is first set up, CONTAINER_INIT=="1" and the script can use 'su' without a 6 | password. This is so that any setup activities can be performed which require full access 7 | to the system. 8 | 9 | 2. On subsequent boots (if the container is stopped and started), the same scripts will be 10 | run with CONTAINER_INIT=="0". However, root access is locked down if env var SECURE_ROOT=1. 11 | 12 | Note that SECURE_ROOT is not defined by default. 13 | 14 | In all cases, scripts are run as either root, or the user specified by --user on the 15 | chaperone command line. 16 | -------------------------------------------------------------------------------- /samples/chaperone-devbase/apps/etc/README: -------------------------------------------------------------------------------- 1 | This is a "mini etc" directory which, as much as possible, is where all normal application and service configuration 2 | files are stored. For example, in the chaperone-lamp configuration, all MySQL and Apache configurations are stored 3 | here, but may make reference to other files on the system (such as modules and plugins). However, the normal 4 | startup files in /etc/apache2 and /etc/mysql are not used, as they expect a normal fully-booted system. 5 | 6 | System start-up is controlled by the init.sh script, which reads additional startup files from ../init.d. 7 | 8 | This is not built into chaperone, but rather is a custom configuration defined within chaperone.d. If you want, 9 | you can completely change the way things work and invent new startup schemes. But, this is a good place to start. 10 | -------------------------------------------------------------------------------- /tests/bin/expect-lite-image-run: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | options="" 4 | if [ "$CHTEST_CONTAINER_NAME" != "" ]; then 5 | options="--name $CHTEST_CONTAINER_NAME" 6 | fi 7 | 8 | if [[ " $CHTEST_DOCKER_OPTS " != *\ -d* ]]; then 9 | options="$options -i -t --rm" 10 | fi 11 | 12 | if [ "$CHTEST_DOCKER_CMD" == "" ]; then 13 | CHTEST_DOCKER_CMD="docker run" 14 | fi 15 | 16 | SELINUX_FLAG=$(sestatus 2>/dev/null | fgrep -q enabled && echo :z) 17 | 18 | exec $CHTEST_DOCKER_CMD $options \ 19 | -v /home:/home$SELINUX_FLAG \ 20 | -e TESTHOME=$TESTHOME \ 21 | -e TESTDIR=$TESTDIR \ 22 | -e CHTEST_HOME=$CHTEST_HOME \ 23 | $CHTEST_DOCKER_OPTS \ 24 | --entrypoint $TESTHOME/bin/chaperone \ 25 | $CHTEST_IMAGE \ 26 | --create $USER:$TESTHOME \ 27 | --default-home $CHTEST_HOME \ 28 | --config $CHTEST_HOME/../chaperone.conf \ 29 | $* 30 | -------------------------------------------------------------------------------- /chaperone/exec/telchap.py: -------------------------------------------------------------------------------- 1 | """ 2 | Interactive command tool for chaperone 3 | 4 | Usage: 5 | telchap [ ...] 6 | """ 7 | 8 | # perform any patches first 9 | import chaperone.cutil.patches 10 | 11 | # regular code begins 12 | import sys 13 | import os 14 | import asyncio 15 | import shlex 16 | from docopt import docopt 17 | 18 | from chaperone.cproc.client import CommandClient 19 | from chaperone.cproc.version import VERSION_MESSAGE 20 | 21 | def main_entry(): 22 | options = docopt(__doc__, options_first=True, version=VERSION_MESSAGE) 23 | try: 24 | result = CommandClient.sendCommand(options[''] + " " + " ".join([shlex.quote(a) for a in options['']])) 25 | except (ConnectionRefusedError, FileNotFoundError) as ex: 26 | result = "chaperone does not seem to be listening, is it running?\n(Error is: {0})".format(ex) 27 | 28 | print(result) 29 | -------------------------------------------------------------------------------- /doc/docserver/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Created by chaplocal on Wed Jun 10 16:08:42 EST 2015 3 | 4 | cd ${0%/*} # go to directory of this file 5 | APPS=$PWD 6 | cd .. 7 | 8 | options="-t -i -e TERM=$TERM --rm=true" 9 | shellopt="/bin/bash" 10 | if [ "$1" == '-d' ]; then 11 | shift 12 | options="-d" 13 | shellopt="" 14 | fi 15 | 16 | if [ "$1" == "-h" ]; then 17 | echo "Usage: run.sh [-d] [-h] [extra-chaperone-options]" 18 | echo " Run chapdev/chaperone-baseimage:latest as a daemon or interactively (the default)." 19 | exit 20 | fi 21 | 22 | # Extract our local UID/GID 23 | myuid=`id -u` 24 | mygid=`id -g` 25 | 26 | # Run the image with this directory as our local apps dir. 27 | # Create a user with uid=$myuid inside the container so the mountpoint permissions 28 | # are correct. 29 | 30 | docker run $options -v /home:/home -p 8088:8080 chapdev/chaperone-lamp:latest \ 31 | --create $USER:$myuid --config $APPS/chaperone.d $* $shellopt 32 | -------------------------------------------------------------------------------- /samples/chaperone-devbase/apps/chaperone.d/010-start.conf: -------------------------------------------------------------------------------- 1 | # General environmental settings 2 | 3 | settings: { 4 | env_set: { 5 | 'PATH': '$(APPS_DIR)/bin:/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/sbin', 6 | 'APPS_DIR': '$(_CHAP_CONFIG_DIR:-/)', 7 | #'SECURE_ROOT': '1', 8 | }, 9 | } 10 | 11 | init.service: { 12 | type: oneshot, 13 | command: '/bin/bash $(APPS_DIR)/etc/init.sh', 14 | before: 'default,database,application', 15 | process_timeout: 20, # init may take longer 16 | service_group: 'init', 17 | } 18 | 19 | chaperone.logging: { 20 | enabled: true, 21 | filter: '[chaperone].*', 22 | file: '$(APPS_DIR)/var/log/chaperone-%d.log', 23 | } 24 | 25 | syslog.logging: { 26 | enabled: true, 27 | filter: '*.info;![chaperone].*', 28 | file: '$(APPS_DIR)/var/log/syslog-%d.log', 29 | } 30 | 31 | console.logging: { 32 | enabled: true, 33 | stdout: true, 34 | filter: '*.warn;authpriv,auth.!*;daemon.!warn', 35 | } 36 | -------------------------------------------------------------------------------- /samples/chaperone-lamp/apps/chaperone.d/120-apache2.conf: -------------------------------------------------------------------------------- 1 | apache2.service: { 2 | command: "/usr/sbin/apache2 -f $(APPS_DIR)/etc/apache2.conf -DFOREGROUND", 3 | enabled: true, 4 | restart: true, 5 | optional: true, 6 | uid: "$(USER:-www-data)", 7 | env_set: { 8 | APACHE_LOCK_DIR: /tmp, 9 | APACHE_PID_FILE: /tmp/apache2.pid, 10 | APACHE_RUN_USER: www-data, 11 | APACHE_RUN_GROUP: www-data, 12 | APACHE_LOG_DIR: "$(APPS_DIR)/var/log/apache2", 13 | APACHE_SITES_DIR: "$(APPS_DIR)/www", 14 | MYSQL_SOCKET: "$(APPS_DIR)/var/run/mysqld.sock", 15 | }, 16 | after: database, 17 | } 18 | 19 | apache2.logging: { 20 | enabled: true, 21 | filter: 'local1.*;*.!err', 22 | file: '$(APPS_DIR)/var/log/apache2/access-%d.log', 23 | uid: "$(USER:-www-data)", 24 | } 25 | 26 | apache2.logging: { 27 | enabled: true, 28 | filter: 'local1.err', 29 | stderr: true, 30 | file: '$(APPS_DIR)/var/log/apache2/error-%d.log', 31 | uid: "$(USER:-www-data)", 32 | } 33 | -------------------------------------------------------------------------------- /tests/el-tests/inetd-1/test-001.elt: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env expect-test-command 2 | #TITLE: inetd - simple exit service, keeps running 3 | 4 | # Start the image and capture the container ID and port 5 | # (Note: the initial "echo" below is needed to take care of a timing issue with 'docker run') 6 | 7 | >echo running...; echo CID:`RUNIMAGE_READY -d -P -e ENABLE_INETD1=true` 8 | +$cid=CID:([0-9a-f]{32,}) 9 | >docker port $cid 10 | +$ourport=8080/tcp -> 0.0.0.0:([0-9]+) 11 | 12 | # Fire up an inetd connection inside the container and verify it works (ready assured) 13 | 14 | >read_from_port localhost $ourport 15 | sleep 3 20 | >docker stop $cid 21 | >docker logs $cid 22 | docker rm -v $cid 31 | -------------------------------------------------------------------------------- /doc/source/guide/chap-other.rst: -------------------------------------------------------------------------------- 1 | 2 | .. include:: /includes/incomplete.rst 3 | 4 | .. _chap.other: 5 | 6 | Other Uses for Chaperone 7 | ======================== 8 | 9 | Chaperone was designed for container use in scenarios such as Docker containers. However, 10 | it has also been designed to operate as a non-root process manager, though this has not 11 | been tested very well. 12 | 13 | If runnnig as a non-root user, observe the following: 14 | 15 | * The :ref:`--force ` switch will need to be used at startup. 16 | * Chaperone will not create it's ``syslog`` service at ``/dev/log``. 17 | * Chaperone will not create the ``telchap`` command socket at ``/dev/chaperone.sock``. 18 | * Process cleanup will not occur if processes are reparented, since they will be 19 | reparented to PID 1. 20 | 21 | Other than these notes, Chaperone *should* work as a process manager within userspace 22 | for managing small groups of related processes. If you find use cases outside 23 | of container management, let me know. 24 | -------------------------------------------------------------------------------- /chaperone/cproc/version.py: -------------------------------------------------------------------------------- 1 | # This file is designed to be used as a package module, but also as a main program runnable 2 | # by Python2 or Python3 which will print the version. Used in setup.py 3 | 4 | VERSION = (0,3,9) 5 | DISPLAY_VERSION = ".".join([str(v) for v in VERSION]) 6 | 7 | LICENSE = "Apache License, Version 2.0" 8 | 9 | MAINTAINER = "Gary Wisniewski " 10 | 11 | LINK_PYPI = "https://pypi.python.org/pypi/chaperone" 12 | LINK_DOC = "http://garywiz.github.io/chaperone" 13 | LINK_SOURCE = "http://github.com/garywiz/chaperone" 14 | LINK_QUICKSTART = "http://github.com/garywiz/chaperone-baseimage" 15 | LINK_LICENSE = "http://www.apache.org/licenses/LICENSE-2.0" 16 | 17 | import sys 18 | import os 19 | 20 | VERSION_MESSAGE = """ 21 | This is '{1}' version {0.DISPLAY_VERSION}. 22 | 23 | Documentation and source is available at {0.LINK_SOURCE}. 24 | Licensed under the {0.LICENSE}. 25 | """.format(sys.modules[__name__], os.path.basename(sys.argv[0])) 26 | 27 | if __name__ == '__main__': 28 | print(DISPLAY_VERSION) 29 | -------------------------------------------------------------------------------- /doc/source/ref/index.rst: -------------------------------------------------------------------------------- 1 | .. chaperone documentation master file, created by 2 | sphinx-quickstart on Mon May 6 17:19:12 2013. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | .. _reference: 7 | 8 | Chaperone Reference 9 | =================== 10 | 11 | This is the full Chaperone Reference and describes in detail how to run and configuration Chaperone. 12 | 13 | However, if you are using Chaperone with Docker, you can save time and see how a pre-built container works 14 | and come back here later when you need more detail. 15 | To get started, we suggest reading the :ref:`intro`, then try out the ``chaperone-lamp`` Docker 16 | image by following the instructions on the `chaperone-docker github page `_ 17 | 18 | Any bugs should be reported as issues at https://github.com/garywiz/chaperone/issues. 19 | 20 | .. toctree:: 21 | :maxdepth: 2 22 | 23 | command-line 24 | config 25 | env 26 | utilities 27 | -------------------------------------------------------------------------------- /samples/setup-bin/dot.bashrc: -------------------------------------------------------------------------------- 1 | # ~/.bashrc: executed by bash(1) for non-login shells. 2 | # This is a simpler, stripped-down version for containers 3 | 4 | # If not running interactively, don't do anything 5 | [ -z "$PS1" ] && return 6 | 7 | # don't put duplicate lines in the history. See bash(1) for more options 8 | # ... or force ignoredups and ignorespace 9 | HISTCONTROL=ignoredups:ignorespace 10 | 11 | # append to the history file, don't overwrite it 12 | shopt -s histappend 13 | 14 | # for setting history length see HISTSIZE and HISTFILESIZE in bash(1) 15 | HISTSIZE=1000 16 | HISTFILESIZE=2000 17 | 18 | # make less more friendly for non-text input files, see lesspipe(1) 19 | [ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" 20 | 21 | case "$TERM" in 22 | xterm*|rxvt*) 23 | PS1="\[\u@\h: \w\a\]$PS1" 24 | ;; 25 | *) 26 | ;; 27 | esac 28 | 29 | # some more ls aliases 30 | alias ll='ls -alF' 31 | alias la='ls -A' 32 | alias l='ls -CF' 33 | 34 | # Alias definitions. 35 | if [ -f ~/.bash_aliases ]; then 36 | . ~/.bash_aliases 37 | fi 38 | -------------------------------------------------------------------------------- /doc/source/guide/chap-docker.rst: -------------------------------------------------------------------------------- 1 | .. _chap.docker: 2 | 3 | Using Chaperone with Docker 4 | =========================== 5 | 6 | While Chaperone is a general-purpose program that can be used to manage any small hierarchy of 7 | processes, it was designed specifically to solve problems encountered when creating containers. 8 | 9 | While the goal is to keep containers streamlined and small, ideally containing only one 10 | process, the reality is that in many real-world applications, existing daemons may need 11 | to be exploited for use within a container to save time or provide commonly-available 12 | functionality. Some applications also benefit from greater modularity by breaking up 13 | functionality into multiple processes to better exploit CPU resources. 14 | 15 | The moment a container contains even two cooperating proceses, the problem of management 16 | arises, and ``chaperone`` was designed to solve multi-process management simple 17 | and well-contained. 18 | 19 | .. toctree:: 20 | :maxdepth: 2 21 | 22 | chap-docker-simple.rst 23 | chap-docker-smaller.rst 24 | -------------------------------------------------------------------------------- /doc/source/ref/config.rst: -------------------------------------------------------------------------------- 1 | .. chaperone documentation 2 | configuration directives 3 | 4 | .. _config: 5 | 6 | Chaperone Configuration 7 | ======================= 8 | 9 | Chaperone has a versatile configuration language that can be quick and easy to use, or can comprise many services 10 | and dependencies. For example, the following user appllication plus MySQL database server along with syslog 11 | redirection could be defined simply in just a few lines:: 12 | 13 | mysql.service: { command: "/etc/init.d/mysql start", 14 | type: forking } 15 | myapp.service: { command: "/opt/apps/bin/my_application", 16 | restart: true, after: mysql.service } 17 | syslog.logging: { filter: "*.info", stdout: true } 18 | 19 | Configurations can be as sophisticated as desired, including cron-type scheduling, multiple types of jobs, and 20 | complex job trees. These sections provide a complete reference to how the chaperone configuration directives. 21 | 22 | .. toctree:: 23 | 24 | config-format.rst 25 | config-global.rst 26 | config-service.rst 27 | config-logging.rst 28 | -------------------------------------------------------------------------------- /sandbox/distserv/chaperone.d/120-apache2.conf: -------------------------------------------------------------------------------- 1 | # 120-apache2.conf 2 | # 3 | # Start up apache. This is a "simple" service, so chaperone will monitor Apache and restart 4 | # it if necessary. Note that apache2.conf refers to MYSQL_UNIX_PORT (set by 105-mysql.conf) 5 | # to tell PHP where MySQL is running. 6 | # 7 | # In the case where no USER variable is specified, we run as the www-data user. 8 | 9 | settings: { 10 | env_set: { 11 | HTTPD_SERVER_NAME: apache, 12 | } 13 | } 14 | 15 | apache2.service: { 16 | command: "/usr/sbin/apache2 -f $(APPS_DIR)/etc/apache2.conf -DFOREGROUND", 17 | restart: true, 18 | stdout: inherit, stderr: inherit, 19 | uid: "$(USER:-www-data)", 20 | env_set: { 21 | APACHE_LOCK_DIR: /tmp, 22 | APACHE_PID_FILE: /tmp/apache2.pid, 23 | APACHE_RUN_USER: www-data, 24 | APACHE_RUN_GROUP: www-data, 25 | APACHE_RUN_DIR: "/tmp", 26 | APACHE_LOG_DIR: "/tmp", 27 | }, 28 | # If Apache2 does not require a database, you can leave this out. 29 | after: database, 30 | } 31 | 32 | apache2.logging: { 33 | enabled: true, 34 | selector: 'local1.*;*.!err', 35 | stderr: true, 36 | } 37 | -------------------------------------------------------------------------------- /chaperone/cproc/pt/simple.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from chaperone.cproc.subproc import SubProcess 3 | 4 | class SimpleProcess(SubProcess): 5 | 6 | _fut_monitor = None 7 | 8 | @asyncio.coroutine 9 | def process_started_co(self): 10 | if self._fut_monitor and not self._fut_monitor.cancelled(): 11 | self._fut_monitor.cancel() 12 | self._fut_monitor = None 13 | 14 | # We wait a short time just to see if the process errors out immediately. This avoids a retry loop 15 | # and catches any immediate failures now. 16 | 17 | yield from self.do_startup_pause() 18 | 19 | # If there is a pidfile, sit here and wait for a bit 20 | yield from self.wait_for_pidfile() 21 | 22 | # We have a successful start. Monitor this service. 23 | 24 | self._fut_monitor = asyncio.async(self._monitor_service()) 25 | self.add_pending(self._fut_monitor) 26 | 27 | @asyncio.coroutine 28 | def _monitor_service(self): 29 | result = yield from self.wait() 30 | if isinstance(result, int) and result > 0: 31 | yield from self._abnormal_exit(result) 32 | -------------------------------------------------------------------------------- /chaperone/cproc/client.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | class CommandClient(asyncio.Protocol): 4 | 5 | @classmethod 6 | def sendCommand(cls, cmd): 7 | loop = asyncio.get_event_loop() 8 | coro = loop.create_unix_connection(lambda: CommandClient(cmd, loop), path = "/dev/chaperone.sock") 9 | (transport, protocol) = loop.run_until_complete(coro) 10 | loop.run_forever() 11 | loop.close() 12 | return protocol.result 13 | 14 | def __init__(self, message, loop): 15 | self.message = message 16 | self.loop = loop 17 | self.result = None 18 | 19 | def connection_made(self, transport): 20 | transport.write(self.message.encode()) 21 | 22 | def data_received(self, data): 23 | msg = data.decode() 24 | lines = msg.split("\n") 25 | error = None 26 | 27 | if lines[0] in {'COMMAND-ERROR', 'RESULT'}: 28 | self.result = "\n".join(lines[1:]) 29 | else: 30 | error = "Unexpected response from chaperone: " + str(msg) 31 | 32 | if error: 33 | raise Exception(error) 34 | 35 | def connection_lost(self, exc): 36 | self.loop.stop() 37 | 38 | -------------------------------------------------------------------------------- /sandbox/bin/repeat: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | """ 4 | Repeat utility for testing 5 | 6 | Usage: repeat [--nosignals] [-n=] [-i=] [-e] 7 | 8 | Options: 9 | -n= Specify number of repetitions, or infinite if absent 10 | -i= Specify interval, or 1 second if absent 11 | --nosignals Ignore all signals if present 12 | -e Output to stderr instead of stdout. 13 | """ 14 | 15 | import signal 16 | from time import sleep, strftime, localtime 17 | from docopt import docopt 18 | import sys 19 | 20 | opt = docopt(__doc__) 21 | 22 | if opt['--nosignals']: 23 | signal.signal(signal.SIGTERM, lambda signum, frame: print("ignoring SIGTERM")) 24 | signal.signal(signal.SIGHUP, lambda signum, frame: print("ignoring SIGHUP")) 25 | signal.signal(signal.SIGINT, lambda signum, frame: print("ignoring SIGINT")) 26 | 27 | reps = iter(int,1) if not opt['-n'] else range(int(opt['-n'])) 28 | delay = 1 if not opt['-i'] else int(opt['-i']) 29 | handle = sys.stderr if opt['-e'] else sys.stdout 30 | msg = " " + opt[''] + "\n" 31 | 32 | for n in reps: 33 | handle.write(strftime("%M:%S", localtime()) + msg) 34 | handle.flush() 35 | sleep(delay) 36 | -------------------------------------------------------------------------------- /tests/el-tests/inetd-1/test-002.elt: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env expect-test-command 2 | #TITLE: inetd - second service disables both 3 | 4 | # Start the image and capture the container ID and port 5 | # (Note: the initial "echo" below is needed to take care of a timing issue with 'docker run') 6 | 7 | >echo running...; echo CID:`RUNIMAGE_READY -d -P -e ENABLE_INETD1=true -e ENABLE_INETD2=true` 8 | +$cid=CID:([0-9a-f]{32,}) 9 | >docker port $cid 10 | +$ourport=8080/tcp -> 0.0.0.0:([0-9]+) 11 | >docker port $cid 12 | +$otherport=8443/tcp -> 0.0.0.0:([0-9]+) 13 | 14 | # Fire up an inetd connection inside the container and verify it works (ready assured) 15 | 16 | >read_from_port localhost $ourport 17 | sleep 2 21 | >read_from_port localhost $otherport 22 | 23 | # Kill and inspect logs 24 | 25 | >sleep 3 26 | >docker logs $cid 27 | docker rm -v $cid 38 | -------------------------------------------------------------------------------- /sandbox/testvar: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Used to create an apps directory here in the sandbox which runs a 3 | # standard docker image, however uses the local chaperone sources. 4 | # Creates a data-only "var" directory instead of a full apps directory 5 | # to test things like --default-home 6 | 7 | if [ $# == 0 ]; then 8 | echo "usage: testvar image-suffix" 9 | exit 1 10 | fi 11 | 12 | # the cd trick assures this works even if the current directory is not current. 13 | cd ${0%/*} 14 | 15 | SUFFIX=$1 16 | shift # remaining arguments are for chaperone 17 | 18 | IMAGE=chapdev/chaperone-$SUFFIX 19 | SANDBOX=$PWD 20 | VARDIR=$SANDBOX/var-$SUFFIX 21 | 22 | bashcmd="/bin/bash --rcfile $SANDBOX/bash.bashrc" 23 | if [ "$1" == "-" ]; then 24 | bashcmd="" 25 | shift 26 | fi 27 | 28 | myuid=`id -u` 29 | mygid=`id -g` 30 | 31 | # Run the lamp image using our local copy of chaperone as well as the local var-only directory 32 | 33 | mkdir -p $VARDIR 34 | 35 | docker run -t -i -e "TERM=$TERM" -e "EMACS=$EMACS" --rm=true -v /home:/sandbox \ 36 | -v $VARDIR:/apps/var \ 37 | --name run-$SUFFIX \ 38 | --entrypoint /sandbox${SANDBOX#/home}/bin/chaperone $IMAGE \ 39 | --create $USER/$myuid \ 40 | --default-home / \ 41 | $* $bashcmd 42 | -------------------------------------------------------------------------------- /chaperone/cproc/pt/oneshot.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from chaperone.cproc.subproc import SubProcess 3 | from chaperone.cutil.errors import ChProcessError 4 | 5 | class OneshotProcess(SubProcess): 6 | 7 | process_timeout = 60.0 # default for a oneshot is 90 seconds 8 | 9 | @asyncio.coroutine 10 | def process_started_co(self): 11 | result = yield from self.timed_wait(self.process_timeout, self._exit_timeout) 12 | if result is not None and not result.normal_exit: 13 | if self.ignore_failures: 14 | warn("{0} (ignored) failure on start-up with result '{1}'".format(self.name, result)) 15 | else: 16 | raise ChProcessError("{0} failed on start-up with result '{1}'".format(self.name, result), resultcode = result) 17 | 18 | def _exit_timeout(self): 19 | service = self.service 20 | message = "oneshot service '{1}' did not exit after {2} second(s), {3}".format( 21 | service.type, 22 | service.name, self.process_timeout, 23 | "proceeding due to 'ignore_failures=True'" if service.ignore_failures else 24 | "terminating due to 'ignore_failures=False'") 25 | if not service.ignore_failures: 26 | self.terminate() 27 | raise Exception(message) 28 | -------------------------------------------------------------------------------- /tests/el-tests/notify-1/chaperone.conf: -------------------------------------------------------------------------------- 1 | settings: { 2 | env_set: { PATH: "$(TESTHOME)/bin:$(PATH)", SERVICE_NAME: "$(_CHAP_SERVICE)" }, 3 | process_timeout: 5, 4 | } 5 | 6 | test1-exit1.service: { 7 | type: notify, 8 | enabled: "$(ENABLE_EXIT1:-false)", 9 | command: "daemon bash -c 'logecho daemon running; sleep 3; logecho daemon exiting'", 10 | } 11 | 12 | test1-exit1b.service: { 13 | type: notify, 14 | enabled: "$(ENABLE_EXIT1B:-false)", 15 | command: "daemon --exit 3 bash -c 'logecho daemon running; sleep 3; logecho daemon exiting'", 16 | } 17 | 18 | test1-exit1c.service: { 19 | type: notify, 20 | enabled: "$(ENABLE_EXIT1C:-false)", 21 | command: "daemon --exit 3 --wait 8 bash -c 'logecho daemon running; sleep 3; logecho daemon exiting'", 22 | } 23 | 24 | test1-exit1d.service: { 25 | type: notify, 26 | enabled: "$(ENABLE_EXIT1D:-false)", 27 | command: "daemon bash -c 'logecho daemon running; sleep 3; sdnotify ERRNO=55'", 28 | } 29 | 30 | test1-exit1e.service: { 31 | type: notify, 32 | enabled: "$(ENABLE_EXIT1E:-false)", 33 | process_timeout: 15, 34 | command: "daemon bash -c 'logecho daemon running; sleep 3; sdnotify --ready --pid $$; sleep 2'", 35 | } 36 | 37 | # Debugging output for all 38 | 39 | default.logging: { 40 | selector: "*.debug", 41 | stdout: true, 42 | } 43 | -------------------------------------------------------------------------------- /chaperone/cproc/pt/forking.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from chaperone.cproc.subproc import SubProcess 3 | from chaperone.cutil.errors import ChProcessError 4 | 5 | class ForkingProcess(SubProcess): 6 | 7 | defer_exit_kills = True 8 | 9 | @asyncio.coroutine 10 | def process_started_co(self): 11 | result = yield from self.timed_wait(self.process_timeout, self._exit_timeout) 12 | if result is not None and not result.normal_exit: 13 | if self.ignore_failures: 14 | self.logwarn("{0} (ignored) failure on start-up with result '{1}'".format(self.name, result)) 15 | else: 16 | raise ChProcessError("{0} failed on start-up with result '{1}'".format(self.name, result), resultcode = result) 17 | yield from self.wait_for_pidfile() 18 | 19 | def _exit_timeout(self): 20 | service = self.service 21 | message = "forking service '{1}' did not exit after {2} second(s), {3}".format( 22 | service.type, 23 | service.name, self.process_timeout, 24 | "proceeding due to 'ignore_failures=True'" if service.ignore_failures else 25 | "terminating due to 'ignore_failures=False'") 26 | if not service.ignore_failures: 27 | self.terminate() 28 | raise Exception(message) 29 | -------------------------------------------------------------------------------- /tests/el-tests/simple-1/chaperone.conf: -------------------------------------------------------------------------------- 1 | test1-exit1.service: { 2 | type: simple, 3 | enabled: "$(ENABLE_EXIT1:-false)", 4 | command: "echo exit immediately", 5 | } 6 | 7 | # The test3 apache service is simple, but forks, so Chaperone is technically unaware of 8 | # its children. 9 | 10 | test3-apache.service: { 11 | type: simple, 12 | enabled: "$(ENABLE_APACHE3:-false)", 13 | command: "service apache2 start", 14 | uid: root, 15 | } 16 | 17 | test3-apache-verify.service: { 18 | type: oneshot, 19 | enabled: "$(ENABLE_APACHE3:-false)", 20 | command: "bash -c 'sleep 2; telchap stop test3-apache; ps ax'", 21 | service_groups: "IDLE", 22 | } 23 | 24 | # The test4 apache service uses a pidfile when it forks so Chaperone is aware of its pid. 25 | 26 | test4-apache.service: { 27 | type: simple, 28 | enabled: "$(ENABLE_APACHE4:-false)", 29 | pidfile: /run/apache2/apache2.pid, 30 | command: "service apache2 start", 31 | uid: root, 32 | } 33 | 34 | test4-apache-verify.service: { 35 | type: oneshot, 36 | enabled: "$(ENABLE_APACHE4:-false)", 37 | command: "bash -c 'sleep 2; telchap stop test4-apache; sleep 1; ps ax | ps -C apache2 || echo apache not running'", 38 | service_groups: "IDLE", 39 | } 40 | 41 | # Debugging output for all 42 | 43 | default.logging: { 44 | selector: "*.debug", 45 | stdout: true, 46 | } 47 | -------------------------------------------------------------------------------- /doc/source/status.rst: -------------------------------------------------------------------------------- 1 | .. _status: 2 | 3 | Chaperone Project Status 4 | ======================================================================== 5 | 6 | The Chaperone process manager is ready for public testing, and no 7 | longer in pre-release. It is relatively stable. 8 | 9 | Issues for Chaperone itself should be submitted on 10 | the `chaperone github issues page `_ 11 | 12 | Documentation status: 13 | 14 | * `Reference Section `_: Complete. 15 | Will be updated always to reflect feature changes and clarifications. 16 | * Usage Section: In progress. Will contain samples and best practices. 17 | * Tools Section: Not started. Command-line tools like ``telchap``, ``envcp``, and 18 | ``sdnotify`` which are bundled with Chaperone need documentation pages of 19 | their own. 20 | * Appendices: Documentation for chaperone-based images (such as those at 21 | `chaperone-docker `_ will 22 | be located as appendices of the Chaperone reference. 23 | 24 | There are several production-quality images which we are building both for 25 | our own use, and as samples of various Chaperone use-cases. 26 | 27 | These are separately maintained and have their own read-me pages at 28 | `chaperone-docker `_. 29 | 30 | Help is always appreciated. 31 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | This directory contains both Chaperone unit tests as well as more complex integration tests. The `run-all-tests.sh` script runs them all. 2 | 3 | However, integration tests in this directory have several requirements. They will run both on Ubuntu as well as RHEL. Docker 1.8.1 is required, since socket mount permissions have problems with SELinux for earlier versions. 4 | 5 | For both, you'll need everything Chaperone itself requires, and may need to install them manually since Chaperone may not be installed on the development system: 6 | 7 | pip3 install docopt 8 | pip3 install PyYAML 9 | pip3 install voluptuous 10 | pip3 install croniter 11 | 12 | You will also need a working `chapdev/chaperone-lamp` image. This is the image that is used for all of the tests in this directory and you can simply pull it if it isn't already available. 13 | 14 | Wait, there's more. 15 | 16 | For Ubuntu, you'll then need: 17 | 18 | apt-get install expect-lite 19 | apt-get install nc # should already be there 20 | 21 | For CentOS/RHEL, it is a bit more complicated. You'll need: 22 | 23 | yum install expect 24 | yum install nc 25 | 26 | and then you'll need to manually install `expect-lite` using the instructions [on the developer website](http://expect-lite.sourceforge.net/expect-lite_install.html). (It's pretty easy actually, and foolproof). 27 | -------------------------------------------------------------------------------- /tests/el-tests/simple-2/chaperone.conf: -------------------------------------------------------------------------------- 1 | settings: { 2 | detect_exit: false, 3 | } 4 | 5 | test1-exit1.service: { 6 | type: simple, 7 | enabled: "$(ENABLE_EXIT1:-false)", 8 | command: "echo exit immediately", 9 | } 10 | 11 | # The test3 apache service is simple, but forks, so Chaperone is technically unaware of 12 | # its children. 13 | 14 | test3-apache.service: { 15 | type: simple, 16 | enabled: "$(ENABLE_APACHE3:-false)", 17 | command: "service apache2 start", 18 | uid: root, 19 | } 20 | 21 | test3-apache-verify.service: { 22 | type: oneshot, 23 | enabled: "$(ENABLE_APACHE3:-false)", 24 | command: "bash -c 'sleep 2; telchap stop test3-apache; ps ax'", 25 | service_groups: "IDLE", 26 | } 27 | 28 | # The test4 apache service uses a pidfile when it forks so Chaperone is aware of its pid. 29 | 30 | test4-apache.service: { 31 | type: simple, 32 | enabled: "$(ENABLE_APACHE4:-false)", 33 | pidfile: /run/apache2/apache2.pid, 34 | command: "service apache2 start", 35 | uid: root, 36 | } 37 | 38 | test4-apache-verify.service: { 39 | type: oneshot, 40 | enabled: "$(ENABLE_APACHE4:-false)", 41 | command: "bash -c 'sleep 2; telchap stop test4-apache; sleep 1; ps ax | ps -C apache2 || echo apache not running'", 42 | service_groups: "IDLE", 43 | } 44 | 45 | # Debugging output for all 46 | 47 | default.logging: { 48 | selector: "*.debug", 49 | stdout: true, 50 | } 51 | -------------------------------------------------------------------------------- /chaperone/cutil/errors.py: -------------------------------------------------------------------------------- 1 | import errno 2 | 3 | class ChError(Exception): 4 | 5 | # Named the same as OSError so that exception code can detect the presence 6 | # of an errno for reporting purposes 7 | errno = None 8 | annotation = None 9 | 10 | def annotate(self, text): 11 | if self.annotation: 12 | self.annotation += ' ' + text 13 | else: 14 | self.annotation = text 15 | 16 | def __str__(self): 17 | supmsg = super().__str__() 18 | if self.annotation: 19 | supmsg += ' ' + self.annotation 20 | return supmsg 21 | 22 | def __init__(self, message = None, errno = None): 23 | super().__init__(message) 24 | if errno is not None: 25 | self.errno = errno 26 | 27 | class ChParameterError(ChError): 28 | errno = errno.EINVAL 29 | 30 | class ChNotFoundError(ChError): 31 | errno = errno.ENOENT 32 | 33 | class ChSystemError(ChError): 34 | pass 35 | 36 | class ChProcessError(ChError): 37 | 38 | def __init__(Self, message = None, errno = None, resultcode = None): 39 | if resultcode is not None and errno is None: 40 | errno = resultcode.errno 41 | super().__init__(message, errno) 42 | 43 | class ChVariableError(ChError): 44 | pass 45 | 46 | def get_errno_from_exception(ex): 47 | try: 48 | return ex.errno 49 | except AttributeError: 50 | return None 51 | -------------------------------------------------------------------------------- /samples/chaperone-devbase/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Assumes there is an "optional" apt-get proxy running on our HOST 4 | # on port 3142. You can run one by looking here: https://github.com/sameersbn/docker-apt-cacher-ng 5 | # Does no harm if nothing is running on that port. 6 | /setup-bin/ct_setproxy 7 | 8 | # see https://github.com/docker/docker/issues/1724 9 | apt-get update 10 | 11 | # Normal install steps 12 | apt-get -y install python3-pip 13 | 14 | # We install from the local directory rather than pip so we can test and develop. 15 | cd /setup-bin/chaperone 16 | python3 setup.py install 17 | 18 | # Now, just so there is no confusion, create a new, empty /var/log directory so that any logs 19 | # written will obviously be written by the current container software. Keep the old one so 20 | # it's there for reference so we can see what the distribution did. 21 | cd /var 22 | mv log log-dist 23 | mkdir log 24 | chmod 775 log 25 | chown root.syslog log 26 | 27 | # Customize some system files 28 | cp /setup-bin/dot.bashrc /root/.bashrc 29 | 30 | # Allow unfettered root access by users. This is done so that apps/init.d scripts can 31 | # have unfettered access to root on their first startup to configure userspace files 32 | # if needed (see mysql in chaperone-lamp for an example). At the end of the first startup 33 | # this is then locked down by apps/etc/init.sh. 34 | passwd -d root 35 | sed -i 's/nullok_secure/nullok/' /etc/pam.d/common-auth 36 | -------------------------------------------------------------------------------- /doc/docserver/etc/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # A quick script to initialize the system 3 | 4 | # We publish two variables for use in startup scripts: 5 | # 6 | # CONTAINER_INIT=1 if we are initializing the container for the first time 7 | # APPS_INIT=1 if we are initializing the $APPS_DIR for the first time 8 | # 9 | # Both may be relevant, since it's possible that the $APPS_DIR may be on a mount point 10 | # so it can be reused when starting up containers which refer to it. 11 | 12 | function dolog() { logger -t init.sh -p info $*; } 13 | 14 | apps_init_file="$APPS_DIR/var/run/apps_init.done" 15 | cont_init_file="/container_init.done" 16 | 17 | export CONTAINER_INIT=0 18 | export APPS_INIT=0 19 | 20 | if [ ! -f $cont_init_file ]; then 21 | dolog "initializing container for the first time" 22 | CONTAINER_INIT=1 23 | su -c "date >$cont_init_file" 24 | fi 25 | 26 | if [ ! -f $apps_init_file ]; then 27 | dolog "initializing $APPS_DIR for the first time" 28 | APPS_INIT=1 29 | mkdir -p $APPS_DIR/var/run $APPS_DIR/var/log 30 | chmod 777 $APPS_DIR/var/run $APPS_DIR/var/log 31 | date >$apps_init_file 32 | fi 33 | 34 | if [ -d $APPS_DIR/init.d ]; then 35 | for initf in $( find $APPS_DIR/init.d -type f -executable \! -name '*~' ); do 36 | dolog "running $initf..." 37 | $initf 38 | done 39 | fi 40 | 41 | if [ "$SECURE_ROOT" == "1" -a $CONTAINER_INIT == 1 ]; then 42 | dolog locking down root account 43 | su -c 'passwd -l root' 44 | fi 45 | -------------------------------------------------------------------------------- /chaperone/cutil/syslog_info.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from logging.handlers import SysLogHandler 3 | 4 | # Copy all syslog levels 5 | for k,v in SysLogHandler.__dict__.items(): 6 | if k.startswith('LOG_'): 7 | globals()[k] = v 8 | 9 | FACILITY = ('kern', 'user', 'mail', 'daemon', 'auth', 'syslog', 'lpr', 'news', 'uucp', 'cron', 'authpriv', 10 | 'ftp', 'ntp', 'audit', 'alert', 'altcron', 'local0', 'local1', 'local2', 'local3', 'local4', 11 | 'local5', 'local6', 'local7') 12 | FACILITY_DICT = {FACILITY[i]:i for i in range(len(FACILITY))} 13 | 14 | PRIORITY = ('emerg', 'alert', 'crit', 'err', 'warn', 'notice', 'info', 'debug') 15 | PRIORITY_DICT = {PRIORITY[i]:i for i in range(len(PRIORITY))} 16 | 17 | PRIORITY_DICT['warning'] = PRIORITY_DICT['warn'] 18 | PRIORITY_DICT['error'] = PRIORITY_DICT['err'] 19 | 20 | # Python equivalent for PRIORITY settings 21 | PRIORITY_PYTHON = (logging.CRITICAL, logging.CRITICAL, logging.CRITICAL, logging.ERROR, 22 | logging.WARNING, logging.INFO, logging.INFO, logging.DEBUG) 23 | 24 | def get_syslog_info(facility, priority): 25 | try: 26 | f = FACILITY[facility] 27 | except IndexError: 28 | f = '?' 29 | try: 30 | return f + '.' + PRIORITY[priority] 31 | except IndexError: 32 | return f + '.?' 33 | 34 | 35 | def syslog_to_python_lev(lev): 36 | if lev < 0 or lev > len(PRIORITY): 37 | return logging.DEBUG 38 | return PRIORITY_PYTHON[lev] 39 | -------------------------------------------------------------------------------- /samples/chaperone-devbase/apps/etc/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # A quick script to initialize the system 3 | 4 | # We publish two variables for use in startup scripts: 5 | # 6 | # CONTAINER_INIT=1 if we are initializing the container for the first time 7 | # APPS_INIT=1 if we are initializing the $APPS_DIR for the first time 8 | # 9 | # Both may be relevant, since it's possible that the $APPS_DIR may be on a mount point 10 | # so it can be reused when starting up containers which refer to it. 11 | 12 | function dolog() { logger -t init.sh -p info $*; } 13 | 14 | apps_init_file="$APPS_DIR/var/run/apps_init.done" 15 | cont_init_file="/container_init.done" 16 | 17 | export CONTAINER_INIT=0 18 | export APPS_INIT=0 19 | 20 | if [ ! -f $cont_init_file ]; then 21 | dolog "initializing container for the first time" 22 | CONTAINER_INIT=1 23 | su -c "date >$cont_init_file" 24 | fi 25 | 26 | if [ ! -f $apps_init_file ]; then 27 | dolog "initializing $APPS_DIR for the first time" 28 | APPS_INIT=1 29 | mkdir -p $APPS_DIR/var/run $APPS_DIR/var/log 30 | chmod 777 $APPS_DIR/var/run $APPS_DIR/var/log 31 | date >$apps_init_file 32 | fi 33 | 34 | if [ -d $APPS_DIR/init.d ]; then 35 | for initf in $( find $APPS_DIR/init.d -type f -executable \! -name '*~' ); do 36 | dolog "running $initf..." 37 | $initf 38 | done 39 | fi 40 | 41 | if [ "$SECURE_ROOT" == "1" -a $CONTAINER_INIT == 1 ]; then 42 | dolog locking down root account 43 | su -c 'passwd -l root' 44 | fi 45 | -------------------------------------------------------------------------------- /tests/bin/daemon: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | """ 4 | Forks a process in a daemon-like fashion for testing. 5 | 6 | Usage: 7 | daemon [--wait=seconds] [--ignore-signals] [--exit=code] COMMAND [ARGS ...] 8 | """ 9 | 10 | import signal 11 | import sys 12 | import subprocess 13 | from time import sleep 14 | from docopt import docopt 15 | 16 | import os 17 | 18 | from daemonutil import Daemon 19 | 20 | options = docopt(__doc__, options_first=True) 21 | 22 | if options['--ignore-signals']: 23 | signal.signal(signal.SIGTERM, lambda signum, frame: print("ignoring SIGTERM")) 24 | signal.signal(signal.SIGHUP, lambda signum, frame: print("ignoring SIGHUP")) 25 | signal.signal(signal.SIGINT, lambda signum, frame: print("ignoring SIGINT")) 26 | else: 27 | signal.signal(signal.SIGTERM, lambda signum, frame: not print("received SIGTERM")) 28 | signal.signal(signal.SIGHUP, lambda signum, frame: not print("received SIGHUP")) 29 | signal.signal(signal.SIGINT, lambda signum, frame: not print("received SIGINT")) 30 | 31 | if options['--wait']: 32 | print("Waiting {0} ...".format(options['--wait'])) 33 | sleep(float(options['--wait'])) 34 | 35 | args = [options['COMMAND']] + options['ARGS'] 36 | 37 | print("{1}:Launching {0} ...".format(args, os.getpid())) 38 | 39 | class mydaemon(Daemon): 40 | 41 | def run(self): 42 | subprocess.Popen(args, start_new_session=True) 43 | 44 | d = mydaemon() 45 | 46 | if options['--exit']: 47 | d.start(int(options['--exit'])) 48 | else: 49 | d.start() 50 | -------------------------------------------------------------------------------- /samples/chaperone-lamp/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MYSQL_ROOT_PW='ChangeMe' 4 | 5 | # Assumes there is an "optional" apt-get proxy running on our HOST 6 | # on port 3142. You can run one by looking here: https://github.com/sameersbn/docker-apt-cacher-ng 7 | # Does no harm if nothing is running on that port. 8 | /setup-bin/ct_setproxy 9 | 10 | # Normal install steps 11 | apt-get install -y apache2 12 | 13 | debconf-set-selections <<< "debconf debconf/frontend select Noninteractive" 14 | 15 | debconf-set-selections <<< "mysql-server mysql-server/root_password password $MYSQL_ROOT_PW" 16 | debconf-set-selections <<< "mysql-server mysql-server/root_password_again password $MYSQL_ROOT_PW" 17 | debconf-set-selections <<< "phpmyadmin phpmyadmin/dbconfig-install boolean true" 18 | debconf-set-selections <<< "phpmyadmin phpmyadmin/app-password password $MYSQL_ROOT_PW" 19 | debconf-set-selections <<< "phpmyadmin phpmyadmin/app-password-confirm password $MYSQL_ROOT_PW" 20 | debconf-set-selections <<< "phpmyadmin phpmyadmin/mysql/app-pass password $MYSQL_ROOT_PW" 21 | debconf-set-selections <<< "phpmyadmin phpmyadmin/mysql/admin-pass password $MYSQL_ROOT_PW" 22 | debconf-set-selections <<< "phpmyadmin phpmyadmin/reconfigure-webserver multiselect apache2" 23 | 24 | apt-get install -y mysql-server 25 | /usr/bin/mysqld_safe & 26 | 27 | # Install phpmyadmin. Actual setup occurs at first boot, since it depends on what user we run the container 28 | # as. 29 | apt-get install -y phpmyadmin 30 | php5enmod mcrypt 31 | 32 | apt-get install -y php-pear 33 | -------------------------------------------------------------------------------- /sandbox/distserv/chaperone.d/005-config.conf: -------------------------------------------------------------------------------- 1 | # 005-config.conf 2 | # 3 | # Put container configuration variables here. This should strictly be for configuration 4 | # variables that are passed into the container. 100% of container configuraiton should 5 | # be possible by setting these variables here or on the 'docker run' command line. 6 | 7 | settings: { 8 | 9 | env_set: { 10 | 11 | # This is the hostname of the host machine. Generally, this is only needed 12 | # by certain applications (such as those supporting SSL certiifcates, but is common 13 | # enough to include as a standard option. 14 | 15 | CONFIG_EXT_HOSTNAME: "$(CONFIG_EXT_HOSTNAME:-localhost)", 16 | 17 | # HTTP ports of exported ports. These are good policy to define in your "docker run" 18 | # command so that internal applications know what ports the public interfaces are 19 | # visible on. Sometimes this is necessary, such as when appliations push their 20 | # endpoints via API's or when webservers do redirects. The default launchers 21 | # for Chaperone containers handle this for you automatically. 22 | 23 | CONFIG_EXT_HTTP_PORT: "$(CONFIG_EXT_HTTP_PORT:-8080)", 24 | CONFIG_EXT_HTTPS_PORT: "$(CONFIG_EXT_HTTPS_PORT:-8443)", 25 | 26 | # Configure this to enable SSL and generate snakeoil keys for the given domain 27 | CONFIG_EXT_SSL_HOSTNAME: "$(CONFIG_EXT_SSL_HOSTNAME:-)", 28 | 29 | # Create additional configuration variables here. Start them with "CONFIG_" 30 | # so they can be easily identified... 31 | 32 | } 33 | 34 | } 35 | -------------------------------------------------------------------------------- /tests/bin/proctool: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | """ 4 | Tool to create processes for various purposes. 5 | 6 | Usage: 7 | proctool [--dump] [--hang] [--wait=seconds] [--ignore-signals] [--exit=code] [--notify=CMD] [MESSAGE] 8 | """ 9 | 10 | import signal 11 | import sys 12 | from time import sleep 13 | from docopt import docopt 14 | 15 | import os 16 | 17 | options = docopt(__doc__) 18 | 19 | if options['MESSAGE']: 20 | sys.stdout.write('proctool says: ' + options['MESSAGE'] + "\n") 21 | sys.stdout.flush() 22 | 23 | if options['--notify']: 24 | cmd = options['--notify'] 25 | os.system('sdnotify ' + cmd) 26 | 27 | if options['--dump']: 28 | print("UID:{0} GID:{1} PID:{2} Environment:".format(os.getuid(), os.getgid(), os.getpid())) 29 | for k,v in os.environ.items(): 30 | print(" {0}={1}".format(k,v)) 31 | 32 | if options['--ignore-signals']: 33 | signal.signal(signal.SIGTERM, lambda signum, frame: print("ignoring SIGTERM")) 34 | signal.signal(signal.SIGHUP, lambda signum, frame: print("ignoring SIGHUP")) 35 | signal.signal(signal.SIGINT, lambda signum, frame: print("ignoring SIGINT")) 36 | else: 37 | signal.signal(signal.SIGTERM, lambda signum, frame: print("received SIGTERM") or exit() ) 38 | signal.signal(signal.SIGHUP, lambda signum, frame: not print("received SIGHUP")) 39 | signal.signal(signal.SIGINT, lambda signum, frame: not print("received SIGINT")) 40 | 41 | if options['--wait']: 42 | sleep(float(options['--wait'])) 43 | 44 | if options['--hang']: 45 | while True: 46 | sleep(100) 47 | 48 | if options['--exit']: 49 | exit(int(options['--exit'])) 50 | -------------------------------------------------------------------------------- /tests/el-tests/fork-1/chaperone.conf: -------------------------------------------------------------------------------- 1 | settings: { 2 | env_set: { PATH: "$(TESTHOME)/bin:$(PATH)" } 3 | } 4 | 5 | test1-exit1.service: { 6 | type: forking, 7 | enabled: "$(ENABLE_EXIT1:-false)", 8 | command: "daemon bash -c 'logecho daemon running; sleep 5; logecho daemon exiting'", 9 | } 10 | 11 | test1-exit1b.service: { 12 | type: forking, 13 | enabled: "$(ENABLE_EXIT1B:-false)", 14 | command: "daemon --exit 3 bash -c 'logecho daemon running; sleep 5; logecho daemon exiting'", 15 | } 16 | 17 | # The test3 apache service is simple, but forks, so Chaperone is technically unaware of 18 | # its children. 19 | 20 | test3-apache.service: { 21 | type: forking, 22 | enabled: "$(ENABLE_APACHE3:-false)", 23 | command: "service apache2 start", 24 | uid: root, 25 | } 26 | 27 | test3-apache-verify.service: { 28 | type: oneshot, 29 | enabled: "$(ENABLE_APACHE3:-false)", 30 | command: "bash -c 'sleep 2; telchap stop test3-apache; ps ax'", 31 | service_groups: "IDLE", 32 | } 33 | 34 | # The test4 apache service uses a pidfile when it forks so Chaperone is aware of its pid. 35 | 36 | test4-apache.service: { 37 | type: forking, 38 | enabled: "$(ENABLE_APACHE4:-false)", 39 | pidfile: /run/apache2/apache2.pid, 40 | command: "service apache2 start", 41 | uid: root, 42 | } 43 | 44 | test4-apache-verify.service: { 45 | type: oneshot, 46 | enabled: "$(ENABLE_APACHE4:-false)", 47 | command: "bash -c 'sleep 2; telchap stop test4-apache; sleep 1; ps ax | ps -C apache2 || echo apache not running'", 48 | service_groups: "IDLE", 49 | } 50 | 51 | # Debugging output for all 52 | 53 | default.logging: { 54 | selector: "*.debug", 55 | stdout: true, 56 | } 57 | -------------------------------------------------------------------------------- /chaperone/cutil/format.py: -------------------------------------------------------------------------------- 1 | def fstr(s): 2 | if s is None: 3 | return '-' 4 | if isinstance(s, bool): 5 | return str(s).lower() 6 | return str(s) 7 | 8 | class TableFormatter(list): 9 | 10 | """ 11 | A quick formatting class which allows you to build a table, then output it 12 | neatly with columns and headings. 13 | """ 14 | 15 | attributes = None 16 | headings = None 17 | _sortfield = None 18 | 19 | def __init__(self, *args, sort=None): 20 | self.attributes = tuple(isinstance(a, tuple) and a[1] or a for a in args) 21 | self.headings = tuple(isinstance(a, tuple) and a[0] or a for a in args) 22 | self._hsize = list(len(h) for h in self.headings) 23 | if sort in self.attributes: 24 | self._sortfield = self.attributes.index(sort) 25 | 26 | def add_rows(self, rows): 27 | for r in rows: 28 | row = tuple(getattr(r, attr, None) for attr in self.attributes) 29 | for i in range(len(row)): 30 | self._hsize[i] = max(self._hsize[i], len(fstr(row[i]))) 31 | self.append(row) 32 | 33 | def get_formatted_data(self): 34 | if self._sortfield is not None: 35 | rows = sorted(self, key=lambda r: r[self._sortfield]) 36 | else: 37 | rows = self 38 | 39 | hz = self._hsize 40 | fieldcount = range(len(hz)) 41 | sep = " " 42 | dividers = tuple("-" * hz[i] for i in fieldcount) 43 | 44 | return "\n".join(sep.join(fstr(row[i]).ljust(hz[i]) 45 | for i in fieldcount) 46 | for row in [self.headings] + [dividers] + rows) 47 | -------------------------------------------------------------------------------- /chaperone/cutil/servers.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from functools import partial 3 | from chaperone.cutil.events import EventSource 4 | 5 | class ServerProtocol(asyncio.Protocol): 6 | 7 | @classmethod 8 | def buildProtocol(cls, owner, **kwargs): 9 | return partial(cls, owner, **kwargs) 10 | 11 | def __init__(self, owner, **kwargs): 12 | """ 13 | Copy keywords directly into attributes when each protocol is created. 14 | This creates flexibility so that various servers can pass information to protocols. 15 | """ 16 | 17 | super().__init__() 18 | 19 | self.owner = owner 20 | self.events = self.owner.events 21 | 22 | for k,v in kwargs.items(): 23 | setattr(self, k, v) 24 | 25 | def connection_made(self, transport): 26 | self.transport = transport 27 | self.events.onConnection(self.owner) 28 | 29 | def error_received(self, exc): 30 | self.events.onError(self.owner, exc) 31 | self.events.onClose(self.owner, exc) 32 | 33 | def connection_lost(self, exc): 34 | self.events.onClose(self.owner, exc) 35 | 36 | class Server: 37 | 38 | server = None 39 | 40 | def __init__(self, **kwargs): 41 | self.events = EventSource(**kwargs) 42 | 43 | @asyncio.coroutine 44 | def run(self): 45 | self.loop = asyncio.get_event_loop() 46 | self.server = yield from self._create_server() 47 | yield from self.server_running() 48 | 49 | @asyncio.coroutine 50 | def server_running(self): 51 | pass 52 | 53 | def close(self): 54 | s = self.server 55 | if s: 56 | if isinstance(s, tuple): 57 | s = s[0] 58 | s.close() 59 | -------------------------------------------------------------------------------- /sandbox/testimage: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Used to create an apps directory here in the sandbox which runs a 3 | # standard docker image, however uses the local chaperone sources 4 | # and creates an app directory here in the sandbox. This is for 5 | # development of chaperone itself, and allows you to duplicate the 6 | # environment of an image. Especially useful for reproducing problems 7 | # and troubleshooting images. 8 | 9 | if [ $# == 0 ]; then 10 | echo "usage: testimage image-suffix" 11 | exit 1 12 | fi 13 | 14 | # the cd trick assures this works even if the current directory is not current. 15 | cd ${0%/*} 16 | 17 | SUFFIX=$1 18 | shift # remaining arguments are for chaperone 19 | 20 | # Try with chaperone- prefix first 21 | IMAGE=chapdev/chaperone-$SUFFIX 22 | if ! docker inspect $IMAGE >/dev/null 2>&1; then 23 | IMAGE=chapdev/$SUFFIX 24 | fi 25 | 26 | SANDBOX=$PWD 27 | APPSDIR=$SANDBOX/apps-$SUFFIX 28 | 29 | bashcmd="/bin/bash --rcfile $SANDBOX/bash.bashrc" 30 | if [ "$1" == "-" ]; then 31 | bashcmd="" 32 | shift 33 | fi 34 | 35 | myuid=`id -u` 36 | mygid=`id -g` 37 | 38 | # Copy the apps into this sandbox directory so we can work on it. 39 | 40 | if [ ! -d $APPSDIR ]; then 41 | docker run -i --rm=true -v /home:/home $IMAGE --disable --exitkills --log err --user root \ 42 | /bin/bash -c "cp -a /apps $APPSDIR; chown -R $myuid:$mygid $APPSDIR" 43 | fi 44 | 45 | # Run the lamp image using our local copy of chaperone as well as the local apps directory 46 | 47 | docker run -t -i -e "TERM=$TERM" -e "EMACS=$EMACS" --rm=true -v /home:/home \ 48 | --name run-$SUFFIX \ 49 | --entrypoint $SANDBOX/bin/chaperone $IMAGE \ 50 | --create $USER:$myuid \ 51 | --default-home / \ 52 | --config $APPSDIR/chaperone.d $* $bashcmd 53 | -------------------------------------------------------------------------------- /doc/docserver/chaperone.d/120-apache2.conf: -------------------------------------------------------------------------------- 1 | # 120-apache2.conf 2 | # 3 | # Start up apache. This is a "simple" service, so chaperone will monitor Apache and restart 4 | # it if necessary. Note that apache2.conf refers to MYSQL_UNIX_PORT (set by 105-mysql.conf) 5 | # to tell PHP where MySQL is running. 6 | # 7 | # In the case where no USER variable is specified, we run as the www-data user. 8 | 9 | apache2.service: { 10 | command: "/usr/sbin/apache2 -f $(APPS_DIR)/etc/apache2.conf -DFOREGROUND", 11 | restart: true, 12 | uid: "$(USER:-www-data)", 13 | env_set: { 14 | APACHE_LOCK_DIR: /tmp, 15 | APACHE_PID_FILE: /tmp/apache2.pid, 16 | APACHE_RUN_USER: www-data, 17 | APACHE_RUN_GROUP: www-data, 18 | APACHE_LOG_DIR: "$(APPS_DIR)/var/log/apache2", 19 | APACHE_SITES_DIR: "$(APPS_DIR)/www", 20 | MYSQL_SOCKET: "$(APPS_DIR)/var/run/mysqld.sock", 21 | }, 22 | # If Apache2 does not require a database, you can leave this out. 23 | after: database, 24 | } 25 | 26 | # Use daily logging (the %d) so that log rotation isn't so important. Logs 27 | # will be created automatically for each day where they are requied. 28 | # See 300-logrotate.conf if you want to enable log rotation as a periodic 29 | # job. Note that chaperone watches for logs which are rotated and will 30 | # automatically open a new file if the old one is rotated. 31 | # 32 | # Write logs either as the USER= user, or as www-data. 33 | 34 | apache2.logging: { 35 | enabled: true, 36 | selector: 'local1.*;*.!err', 37 | file: '$(APPS_DIR)/var/log/apache2/apache-%d.log', 38 | uid: "$(USER:-www-data)", 39 | } 40 | 41 | apache2.logging: { 42 | enabled: true, 43 | selector: 'local1.err', 44 | stderr: true, 45 | file: '$(APPS_DIR)/var/log/apache2/error-%d.log', 46 | uid: "$(USER:-www-data)", 47 | } 48 | -------------------------------------------------------------------------------- /chaperone/cutil/patches.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import importlib 3 | 4 | # This module contains patches to Python. A patch wouldn't appear here if it didn't have major impact, 5 | # and they are constructed and researched carefully. Avoid if possible, please. 6 | 7 | # Patch routine for patching classes. Ignore ALL exceptions, since there could be any number of 8 | # reasons why a distribution may not allow such patching (though most do). Exact code is compared, 9 | # so there is little chance of an error in deciding if the patch is relevant. 10 | 11 | def PATCH_CLASS(module, clsname, member, oldstr, newfunc): 12 | try: 13 | cls = getattr(importlib.import_module(module), clsname) 14 | should_be = ''.join(inspect.getsourcelines(getattr(cls, member))[0]) 15 | if should_be == oldstr: 16 | setattr(cls, member, newfunc) 17 | except Exception: 18 | pass 19 | 20 | 21 | # PATCH for Issue23140: https://bugs.python.org/issue23140 22 | # WHERE asyncio 23 | # IMPACT Eliminates exceptions during process termination 24 | # WHY There is no workround except upgrading to Python 3.4.3, which dramatically affects 25 | # distro compatibility. Mostly, this benefits Ubuntu 14.04LTS. 26 | 27 | OLD_process_exited = """ def process_exited(self): 28 | # wake up futures waiting for wait() 29 | returncode = self._transport.get_returncode() 30 | while self._waiters: 31 | waiter = self._waiters.popleft() 32 | waiter.set_result(returncode) 33 | """ 34 | 35 | def NEW_process_exited(self): 36 | # wake up futures waiting for wait() 37 | returncode = self._transport.get_returncode() 38 | while self._waiters: 39 | waiter = self._waiters.popleft() 40 | if not waiter.cancelled(): 41 | waiter.set_result(returncode) 42 | 43 | PATCH_CLASS('asyncio.subprocess', 'SubprocessStreamProtocol', 'process_exited', OLD_process_exited, NEW_process_exited) 44 | -------------------------------------------------------------------------------- /samples/setup-bin/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This is a great little program to make it easy to share basic build components across 4 | # a set of docker files. Basically, you do this: 5 | # cd sandbox/someimage 6 | # ln -s ../setup-bin #if needed 7 | # ./setup-bin/build 8 | # 9 | 10 | helpmsg=" 11 | usage: setup/build\n 12 | \n 13 | -n name the image (else directoryname is used)\n 14 | -x disable the cache\n 15 | -y ask no questions and do the default\n 16 | -p ? specify prefix to use for build tag (default chapdev/)\n 17 | \n 18 | If you have additional arguments to docker, then include them after a --\n 19 | " 20 | 21 | if [ "$0" != './setup-bin/build' ] ; then 22 | echo 'Sorry, I only work if executed as "./setup-bin/build"' 23 | exit 1 24 | fi 25 | 26 | if [ ! -f Dockerfile ]; then 27 | echo 'Hey, where is your ./Dockerfile?' 28 | exit 1 29 | fi 30 | 31 | ipfx='chapdev/' 32 | buildargs=(-t ${PWD##*/}) 33 | noquestions='' 34 | 35 | while getopts "n:hxy" opt; do 36 | case $opt in 37 | n) 38 | buildargs[1]=$OPTARG 39 | ;; 40 | h) 41 | echo -e $helpmsg 42 | exit 0 43 | ;; 44 | y) 45 | noquestions='true' 46 | yn='y' 47 | ;; 48 | p) 49 | ipfx=$OPTARG 50 | ;; 51 | x) 52 | buildargs+=(--no-cache) 53 | ;; 54 | \?) 55 | exit 1 56 | ;; 57 | esac 58 | done 59 | 60 | shift $((OPTIND-1)) 61 | 62 | buildargs[1]=$ipfx${buildargs[1]} 63 | imagename=${buildargs[1]} 64 | echo Building image: $imagename 65 | 66 | oldimage=`docker images -q $imagename` 67 | 68 | echo docker build ${buildargs[*]} $* - 69 | tar czh . | docker build ${buildargs[*]} $* - 70 | 71 | newimage=`docker images -q $imagename` 72 | 73 | if [ "$oldimage" -a "$oldimage" != "$newimage" ]; then 74 | if [ ! "$noquestions" ]; then 75 | read -p "Delete old image $oldimage? (y/n) " yn 76 | fi 77 | if [ "$yn" = "y" ]; then 78 | docker rmi $oldimage 79 | echo $oldimage removed 80 | fi 81 | fi 82 | -------------------------------------------------------------------------------- /sandbox/distserv/chaperone.d/010-start.conf: -------------------------------------------------------------------------------- 1 | # 010-start.conf 2 | # 3 | # This is the first start-up file for the chaperone base images. Note that start-up files 4 | # are processed in order alphabetically, so settings in later files can override those in 5 | # earlier files. 6 | 7 | # General environmental settings. These settings apply to all services and logging entries. 8 | # There should be only one "settings" directive in each configuration file. But, any 9 | # settings encountered in subsequent configuration files can override or augment these. 10 | # Note that variables are expanded as late as possile. So, there can be variables 11 | # defined here which depend upon variables which will be defined later (such as _CHAP_SERVICE), 12 | # which is defined implicitly for each service. 13 | 14 | settings: { 15 | 16 | env_set: { 17 | 18 | 'LANG': 'en_US.UTF-8', 19 | 'LC_CTYPE': '$(LANG)', 20 | 'PATH': '$(APPS_DIR)/bin:/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/sbin', 21 | 'RANDFILE': '/tmp/openssl.rnd', 22 | 23 | # Uncomment the below to tell startup.sh to lock-down the root account after the first 24 | # successful start. 25 | #'SECURE_ROOT': '1', 26 | 27 | # Variables starting with _CHAP are internal and won't be exported to services, 28 | # so we derive public environment variables if needed... 29 | 'APPS_DIR': '$(_CHAP_CONFIG_DIR:-/)', 30 | 'CHAP_SERVICE_NAME': '$(_CHAP_SERVICE:-)', 31 | 'CHAP_TASK_MODE': '$(_CHAP_TASK_MODE:-)', 32 | 33 | # The best use-cases will want to move $(VAR_DIR) out of the container to keep 34 | # the container emphemeral, so all references to var should always use this 35 | # environment variable. 36 | 'VAR_DIR': '$(APPS_DIR)/var', 37 | 38 | CHAPERONE_ROOT: "`bash -c 'cd $(APPS_DIR)/../..; echo $PWD'`" 39 | }, 40 | 41 | } 42 | 43 | # For the console, we include everything which is a warning except authentication 44 | # messages and daemon messages which are not errors. 45 | 46 | console.logging: { 47 | enabled: true, 48 | stdout: true, 49 | selector: '*.warn;authpriv,auth.!*;daemon.!warn', 50 | } 51 | -------------------------------------------------------------------------------- /sandbox/distserv/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #Developer's startup script 3 | #Created by chaplocal on Thu Oct 15 03:47:31 UTC 2015 4 | 5 | IMAGE="chapdev/chaperone-apache" 6 | INTERACTIVE_SHELL="/bin/bash" 7 | 8 | # You can specify the external host and ports for your webserver here. These variables 9 | # are also passed into the container so that any application code which does redirects 10 | # can use these if need be. 11 | 12 | EXT_HOSTNAME=localhost 13 | EXT_HTTP_PORT=9980 14 | EXT_HTTPS_PORT=9943 15 | 16 | # Uncomment to enable SSL and specify the certificate hostname 17 | #EXT_SSL_HOSTNAME=secure.example.com 18 | 19 | PORTOPT="-p $EXT_HTTP_PORT:8080 -e CONFIG_EXT_HTTP_PORT=$EXT_HTTP_PORT \ 20 | -p $EXT_HTTPS_PORT:8443 -e CONFIG_EXT_HTTPS_PORT=$EXT_HTTPS_PORT" 21 | 22 | usage() { 23 | echo "Usage: run.sh [-d] [-p port#] [-h] [extra-chaperone-options]" 24 | echo " Run $IMAGE as a daemon or interactively (the default)." 25 | echo " First available port will be remapped to $EXT_HOSTNAME if possible." 26 | exit 27 | } 28 | 29 | if [ "$CHAP_SERVICE_NAME" != "" ]; then 30 | echo run.sh should be executed on your docker host, not inside a container. 31 | exit 32 | fi 33 | 34 | cd ${0%/*} # go to directory of this file 35 | APPS=$PWD 36 | cd .. 37 | 38 | options="-t -i -e TERM=$TERM --rm=true" 39 | shellopt="/bin/bash" 40 | 41 | while getopts ":-dp:n:" o; do 42 | case "$o" in 43 | d) 44 | options="-d" 45 | shellopt="" 46 | ;; 47 | n) 48 | options="$options --name $OPTARG" 49 | ;; 50 | p) 51 | PORTOPT="-p $OPTARG" 52 | ;; 53 | -) # first long option terminates 54 | break 55 | ;; 56 | *) 57 | usage 58 | ;; 59 | esac 60 | done 61 | shift $((OPTIND-1)) 62 | 63 | # Run the image with this directory as our local apps dir. 64 | # Create a user with a uid/gid based upon the file permissions of the chaperone.d 65 | # directory. 66 | 67 | MOUNT=${PWD#/}; MOUNT=/${MOUNT%%/*} # extract user mountpoint 68 | SELINUX_FLAG=$(sestatus 2>/dev/null | fgrep -q enabled && echo :z) 69 | 70 | docker run --name distserv $options -v $MOUNT:$MOUNT$SELINUX_FLAG $PORTOPT \ 71 | -e CONFIG_EXT_HOSTNAME="$EXT_HOSTNAME" \ 72 | -e CONFIG_EXT_SSL_HOSTNAME="$EXT_SSL_HOSTNAME" \ 73 | $IMAGE \ 74 | --create $USER:$APPS/chaperone.d --config $APPS/chaperone.d $* $shellopt 75 | -------------------------------------------------------------------------------- /tests/env_parse.py: -------------------------------------------------------------------------------- 1 | from prefix import * 2 | 3 | from chaperone.cutil.env import EnvScanner 4 | 5 | TEST1 = ( 6 | ('Nothing',), 7 | ('A normal $(expansion) is here',), 8 | ('An unterminated $(expansion is here',), 9 | ('Two $(expansions) are $(also) here',), 10 | ('Nested $(expansions are $(also) here) too.',), 11 | ('Nested $(expansions are "$(also" here) too.',), 12 | ('Nested $(expansions are ["$(also" here),$(next)] finally) too.',), 13 | ('Ignore $(stuff))) like this.',), 14 | ('escape \\$(stuff) like this.',), 15 | ('exp $(stuff) but \$(do not $(except [{$(foo)}] this) but \${not} like this.',), 16 | ('Nested ${expansions are ["$(also" here),$(next)] finally} too.',), 17 | ) 18 | 19 | TEST1 = ( 20 | ('Nothing', 'Nothing'), 21 | ('A normal $(expansion) is here', 'A normal is here'), 22 | ('An unterminated $(expansion is here', 'An unterminated $(expansion is here'), 23 | ('Two $(expansions) are $(also) here', 'Two are here'), 24 | ('Nested $(expansions are $(also) here) too.', 'Nested too.'), 25 | ('Nested $(expansions are "$(also" here) too.', 'Nested too.'), 26 | ('Nested $(expansions are ["$(also" here),$(next)] finally) too.', 'Nested too.'), 27 | ('Ignore $(stuff))) like this.', 'Ignore )) like this.'), 28 | ('escape \$(stuff) like this.', 'escape $(stuff) like this.'), 29 | ('exp $(stuff) but \$(do not $(except [{$(foo)}] this) but \${not} like this.', 'exp but $(do not but ${not} like this.'), 30 | ('Nested ${expansions are ["$(also" here),$(next)] finally} too.', 'Nested too.'), 31 | ) 32 | 33 | class ScanTester: 34 | 35 | def __init__(self, test): 36 | self._test = test 37 | self._scanner = EnvScanner() 38 | 39 | def run(self, tc): 40 | for t in self._test: 41 | r = self._scanner.parse(t[0], self.callback) 42 | #print(" ('{0}', '{1}'),".format(t[0], r)) 43 | tc.assertEqual(t[1], r) 44 | 45 | def callback(self, buf, whole): 46 | return "<"+buf+">" 47 | 48 | 49 | class TestScanner(unittest.TestCase): 50 | 51 | def test_parse1(self): 52 | t = ScanTester(TEST1) 53 | t.run(self) 54 | 55 | if __name__ == '__main__': 56 | unittest.main() 57 | -------------------------------------------------------------------------------- /doc/source/guide/chap-intro.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _intro: 3 | 4 | Introduction to Chaperone 5 | ========================= 6 | 7 | Overview 8 | -------- 9 | 10 | Container technologies like Docker and Rocket have changed dramatically the way 11 | we bundle and distribute applications. While many containers are built with 12 | a single contained process in mind, other applications require a small suite 13 | of processes bundled into the "black box" that containers provide. When this 14 | happens, the need arises for a container control system, but the available 15 | technologies such as ``systemd`` or ``upstart`` are both too modular and 16 | too heavy, resulting in "fat containers" which introduce the very kinds of 17 | overhead container technologies are designed to eliminate. 18 | 19 | Chaperone is designed to solve this problem by providing a single, self-contained 20 | "caretaker" process which provides the following capabilities within the container: 21 | 22 | * Dependency-based parallel start-up of services. 23 | * A robust process manager with service types for forking, oneshot, simple, 24 | and notify service types modelled after systemd. 25 | * Port-triggered services inside the container using the inetd service type. 26 | * A "cron" service type to schedule periodic tasks. 27 | * A built-in highly configurable syslog service which can direct syslog 28 | messages to multiple output files and duplicate selected streams or severities 29 | to the container stdout as well. 30 | * Control capabilities so that services can be stopped, started, or restarted easily 31 | at the command line or within application programs. 32 | * Emulation of systemd's ``sd_notify`` capability, allocating notify sockets 33 | for each service so that cgroups and other privileges are not needed 34 | within the container. Chaperone also recognizes a passed-in ``NOTIFY_SOCKET`` 35 | and will inform the host systemd of final container readiness and status. 36 | * Features to support the creation of "mini-systems" within a single directory 37 | so that system services can run in userspace, or be mounted on host shares 38 | to keep development processes and production processes as close to identical 39 | as possible (see ``chaperone-lamp`` for an example of how this can be realized). 40 | 41 | In addition, many incidental features are present, such as process monitoring and 42 | zombie clean-up, clean shutdown and container restarts, and interactive console 43 | process detection so that applications know when they are being run interactively. 44 | 45 | -------------------------------------------------------------------------------- /sandbox/centos.d/sys1.conf: -------------------------------------------------------------------------------- 1 | settings: { 2 | env_inherit: ['SANDBOX', '_*'], 3 | env_set: {'TERM': 'xpath-revisited', 4 | 'QUESTIONER': 'the-law', 5 | 'WITHIN-HOME': '$(HOME)/inside-home', 6 | 'INTERACTIVE': '$(_CHAPERONE_INTERACTIVE)', 7 | 'CONFIG_DIR': '$(_CHAPERONE_CONFIG_DIR)', 8 | 'PROCTOOL': '$(SANDBOX)/proctool', 9 | 'ENV': '$(SANDBOX)/.shinit', 10 | 'PATH': '$(SANDBOX):/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/bin', 11 | }, 12 | uid: 0, 13 | idle_delay: 1, 14 | debug: true, 15 | } 16 | 17 | notify.service: { 18 | type: notify, 19 | command: "$(PROCTOOL) --wait 20 --dump --notify '--ready' 'notify process'", 20 | stdout: inherit, 21 | enabled: true, 22 | } 23 | 24 | fake1.service: { 25 | command: "$(PROCTOOL) --hang 'fake1 process'", 26 | enabled: false, 27 | } 28 | 29 | fake2.service: { 30 | command: "$(PROCTOOL) --hang 'fake2 service'", 31 | enabled: false, 32 | stdout: inherit, 33 | uid: 1000, 34 | env_inherit: ['Q*'], 35 | } 36 | 37 | fake3.service: { 38 | command: "$(PROCTOOL) 'oneshot service'", 39 | enabled: false, 40 | type: oneshot, 41 | stdout: inherit, 42 | ignore_failures: true, 43 | uid: garyw, 44 | service_group: 'earlystuff', 45 | before: "default", 46 | } 47 | 48 | exittest.service: { 49 | enabled: true, 50 | restart: true, 51 | restart_limit: 5, 52 | ignore_failures: true, 53 | command: "$(PROCTOOL) --exit 20 'Exiting with 20'", 54 | } 55 | 56 | repeat.service: { 57 | command: "$(SANDBOX)/repeat -i4 'Repeat to stdout'", 58 | enabled: false, 59 | } 60 | 61 | repeat_err.service: { 62 | command: "$(SANDBOX)/repeat -i4 -e 'Repeat to stderr'", 63 | enabled: false, 64 | } 65 | 66 | beforemain.service: { 67 | type: "oneshot", 68 | enabled: false, 69 | command: "sh -c 'echo START IDLE TASK; sleep 2; echo ENDING IDLE TASK'", 70 | stdout: inherit, 71 | before: "MAIN", 72 | service_group: "IDLE", 73 | } 74 | 75 | main.logging: { 76 | filter: "[chaperone].*", 77 | file: /var/log/chaperone-%d.log, 78 | enabled: true, 79 | } 80 | 81 | console.logging: { 82 | stdout: true, 83 | filter: '*.warn;![debian-start].*;authpriv,auth.!*;!/Repeat to std/.*', 84 | extended: true, 85 | enabled: true, 86 | } 87 | 88 | debian.logging: { 89 | filter: '[debian-start].*', 90 | file: /var/log/debian-start.log, 91 | enabled: true, 92 | } 93 | 94 | syslog.logging: { 95 | filter: '*.info;![debian-start].*;![chaperone].*', 96 | file: '/var/log/syslog-%d-%H%M', 97 | enabled: true, 98 | } 99 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import subprocess 4 | from setuptools import setup, find_packages 5 | 6 | if sys.version_info < (3,): 7 | print("You must run setup.py with Python 3 only. Python 2 distributions are not supported.") 8 | exit(1) 9 | 10 | ourdir = os.path.dirname(__file__) 11 | 12 | def read(fname): 13 | return open(os.path.join(ourdir, fname)).read() 14 | 15 | def get_version(): 16 | return subprocess.check_output([sys.executable, os.path.join("chaperone/cproc/version.py")]).decode().strip() 17 | 18 | def which(program): 19 | def is_exe(fpath): 20 | return os.path.isfile(fpath) and os.access(fpath, os.X_OK) 21 | 22 | fpath, fname = os.path.split(program) 23 | if fpath: 24 | if is_exe(program): 25 | return program 26 | else: 27 | for path in os.environ["PATH"].split(os.pathsep): 28 | path = path.strip('"') 29 | exe_file = os.path.join(path, program) 30 | if is_exe(exe_file): 31 | return exe_file 32 | return None 33 | 34 | requires_list = ['docopt>=0.6.2', 'PyYAML>=3.1.1', 'voluptuous>=0.8.7', 'aiocron>=0.3'] 35 | 36 | if which('gcc'): 37 | requires_list += ["setproctitle>=1.1.8"] 38 | 39 | setup( 40 | name = "chaperone", 41 | version = get_version(), 42 | description = 'Simple system init daemon for Docker-like environments', 43 | long_description = read('README'), 44 | packages = find_packages(), 45 | #test_suite = "pyt_tests.tests.test_all", 46 | entry_points={ 47 | 'console_scripts': [ 48 | 'chaperone = chaperone.exec.chaperone:main_entry', 49 | 'telchap = chaperone.exec.telchap:main_entry', 50 | 'envcp = chaperone.exec.envcp:main_entry', 51 | 'sdnotify = chaperone.exec.sdnotify:main_entry', 52 | 'sdnotify-exec = chaperone.exec.sdnotify_exec:main_entry', 53 | ], 54 | }, 55 | license = "Apache Software License", 56 | author = "Gary Wisniewski", 57 | author_email = "garyw@blueseastech.com", 58 | url = "http://github.com/garywiz/chaperone", 59 | keywords = "docker init systemd syslog", 60 | 61 | install_requires = requires_list, 62 | 63 | classifiers = [ 64 | "Development Status :: 5 - Production/Stable", 65 | "Intended Audience :: Developers", 66 | "License :: OSI Approved :: Apache Software License", 67 | "Natural Language :: English", 68 | "Operating System :: POSIX :: Linux", 69 | "Programming Language :: Python :: 3", 70 | "Topic :: System :: Logging", 71 | "Topic :: System :: Boot :: Init", 72 | ] 73 | ) 74 | -------------------------------------------------------------------------------- /tests/syslog_spec.py: -------------------------------------------------------------------------------- 1 | from prefix import * 2 | 3 | from chaperone.cutil.syslog import _syslog_spec_matcher 4 | 5 | SPECS = ( 6 | ('*.*', '(True)'), 7 | ('[crond].*', '((g and "crond" == g.lower()))'), 8 | ('.*', 'Invalid log spec syntax: .*'), 9 | ('kern.*;kern.!=crit', '((not (f==0) or not p==2)) and (((f==0)))'), 10 | ('KERN.*;kern.!crit', '((not (f==0) or not p<=2)) and (((f==0)))'), 11 | ('kern.crit', '((f==0) and p<=2)'), 12 | ('*.=emerg;*.=crit', '(p==0) or (p==2)'), 13 | ('/not and\/or able/.*', '(bool(s._regexes[0].search(buf)))'), 14 | ('*.*;![debian-start].*;authpriv,auth.!*', '(not ((g and "debian-start" == g.lower())) and (not (f==10 or f==4)))'), 15 | ('*.*;![debian-start].*;!authpriv,auth.*', '(not ((g and "debian-start" == g.lower())) and not ((f==10 or f==4)))'), 16 | ('*.*;![debian-start].*;!authpriv,auth.!crit', '(not ((g and "debian-start" == g.lower())) and (not (f==10 or f==4) and not p<=2))'), 17 | ('kern.*', '((f==0))'), 18 | ('*.*;*.!*', '((False))'), 19 | ('*.*;![chaperone].*', '(not ((g and "chaperone" == g.lower())))'), 20 | ('kern.*;!auth,authpriv.*', '(not ((f==4 or f==10))) and (((f==0)))'), 21 | ('[cron].*;[daemon-tools].crit;/password/.!err', '((not bool(s._regexes[0].search(buf)) or not p<=3)) and (((g and "cron" == g.lower())) or ((g and "daemon-tools" == g.lower()) and p<=2))'), 22 | ('kern.*;![cron].!err', '((not (g and "cron" == g.lower()) and not p<=3)) and (((f==0)))'), 23 | ('[chaperone].err;[logrotate].err;!kern.*', '(not ((f==0))) and (((g and "chaperone" == g.lower()) and p<=3) or ((g and "logrotate" == g.lower()) and p<=3))'), 24 | ('/panic/.*;/segfault/.*;*.!=debug', '((not p==7)) and ((bool(s._regexes[0].search(buf))) or (bool(s._regexes[1].search(buf))))'), 25 | ) 26 | 27 | 28 | class TestSyslogSpec(unittest.TestCase): 29 | 30 | def test_specs(self): 31 | for s in SPECS: 32 | try: 33 | sm = _syslog_spec_matcher(s[0]).debugexpr 34 | except Exception as ex: 35 | sm = ex 36 | if 'unexpected' in str(sm): 37 | raise 38 | #Uncomment to generate the test table, but CHECK IT carefully! 39 | #print("('{0:40} '{1}'),".format(s[0]+"',", sm)) 40 | self.assertEqual(str(sm), s[1]) 41 | 42 | if __name__ == '__main__': 43 | unittest.main() 44 | -------------------------------------------------------------------------------- /tests/events.py: -------------------------------------------------------------------------------- 1 | from prefix import * 2 | 3 | from chaperone.cutil.events import EventSource 4 | 5 | class handlers: 6 | 7 | def __init__(self): 8 | self.results = list() 9 | 10 | def handler1(self, val): 11 | self.results.append("handler1:" + val) 12 | 13 | def handler2(self, val): 14 | self.results.append("handler2:" + val) 15 | 16 | def handler3(self, val): 17 | self.results.append("handler3:" + val) 18 | 19 | class TestEvents(unittest.TestCase): 20 | 21 | def setUp(self): 22 | self.h = handlers() 23 | self.e = EventSource() 24 | 25 | def test_event1(self): 26 | self.e.add(onH1 = self.h.handler1) 27 | self.e.add(onH1 = self.h.handler1) 28 | self.e.onH1("First trigger") 29 | self.e.onH1("Second trigger") 30 | self.assertEqual(self.h.results, 31 | ['handler1:First trigger', 'handler1:First trigger', 'handler1:Second trigger', 'handler1:Second trigger']) 32 | self.e.remove(onH1 = self.h.handler1) 33 | self.e.onH1("Third trigger") 34 | self.e.remove(onH1 = self.h.handler1) 35 | self.e.onH1("Fourth trigger") 36 | self.assertEqual(self.h.results, 37 | ['handler1:First trigger', 'handler1:First trigger', 'handler1:Second trigger', 'handler1:Second trigger', 'handler1:Third trigger']) 38 | 39 | def test_event2(self): 40 | self.e.add(onH1 = self.h.handler1) 41 | self.assertRaisesRegex(TypeError, 'but 3 were given', lambda: self.e.onH1("arg1", "arg2")) 42 | 43 | def test_event3(self): 44 | self.e.add(onMulti = self.h.handler1) 45 | self.e.add(onMulti = self.h.handler2) 46 | self.e.onMulti("TWO") 47 | self.e.add(onMulti = self.h.handler3) 48 | self.e.onMulti("THREE") 49 | self.assertEqual(self.h.results, 50 | ['handler1:TWO', 'handler2:TWO', 'handler1:THREE', 'handler2:THREE', 'handler3:THREE']) 51 | self.e.remove(onMulti = self.h.handler2) 52 | self.e.onMulti("AFTER-REMOVE") 53 | self.assertEqual(self.h.results, 54 | ['handler1:TWO', 'handler2:TWO', 'handler1:THREE', 'handler2:THREE', 'handler3:THREE', 'handler1:AFTER-REMOVE', 'handler3:AFTER-REMOVE']) 55 | self.e.remove(onMulti = self.h.handler1) 56 | self.e.remove(onMulti = self.h.handler2) 57 | self.e.remove(onMulti = self.h.handler3) 58 | self.e.onMulti("EMPTY") 59 | self.assertEqual(self.h.results, 60 | ['handler1:TWO', 'handler2:TWO', 'handler1:THREE', 'handler2:THREE', 'handler3:THREE', 'handler1:AFTER-REMOVE', 'handler3:AFTER-REMOVE']) 61 | 62 | if __name__ == '__main__': 63 | unittest.main() 64 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | .. chaperone documentation master file, created by 2 | sphinx-quickstart on Mon May 6 17:19:12 2013. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Chaperone: A lightweight, all-in-one process manager for lean containers 7 | ======================================================================== 8 | 9 | Chaperone is a lightweight alternative to process environment managers 10 | like ``systemd`` or ``upstart``. While chaperone provides an extensive 11 | feature set, including dependency-based startup, syslog logging, zombie harvesting, 12 | and job scheduling, it does all of this in a single self-contained process that can 13 | run as a "system init" daemon or can run in userspace. 14 | 15 | This makes Chaperone an ideal tool for managing "small" process spaces like Docker 16 | containers while still providing the system services many daemons expect. 17 | 18 | If you are using Chaperone with Docker, we suggest reading the :ref:`intro`, then try out 19 | the ``chaperone-lamp`` Docker image by 20 | `chaperone-docker github page `_ 21 | 22 | Any bugs should be reported as issues at https://github.com/garywiz/chaperone/issues. 23 | 24 | Current status of Chaperone and related repositories is located on the 25 | :ref:`Project Status ` page. 26 | 27 | Contents 28 | -------- 29 | 30 | .. toctree:: 31 | :maxdepth: 2 32 | 33 | guide/chap-intro.rst 34 | guide/chap-using.rst 35 | ref/index.rst 36 | 37 | Downloading and Installing 38 | -------------------------- 39 | 40 | The easiest way to install ``chaperone`` is using ``pip`` from the https://pypi.python.org/pypi/chaperone package:: 41 | 42 | # Ubuntu or debian prerequisites... 43 | apt-get install python3-pip 44 | 45 | # chaperone (may be all you need) 46 | pip3 install chaperone 47 | 48 | If you're interested in the source code, or contributing, you can find the ``chaperone`` source code 49 | at https://github.com/garywiz/chaperone. 50 | 51 | 52 | License 53 | ------- 54 | 55 | Copyright (c) 2015, Gary J. Wisniewski 56 | 57 | Licensed under the Apache License, Version 2.0 (the "License"); 58 | you may not use this file except in compliance with the License. 59 | You may obtain a copy of the License at 60 | 61 | http://www.apache.org/licenses/LICENSE-2.0 62 | 63 | Unless required by applicable law or agreed to in writing, software 64 | distributed under the License is distributed on an "AS IS" BASIS, 65 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 66 | See the License for the specific language governing permissions and 67 | limitations under the License. 68 | -------------------------------------------------------------------------------- /tests/service_order.py: -------------------------------------------------------------------------------- 1 | from prefix import * 2 | 3 | from chaperone.cutil.config import ServiceDict 4 | 5 | OT1 = { 6 | 'one.service': { }, 7 | 'two.service': { 'service_groups': 'foobar', 'after': 'default' }, 8 | 'three.service': { 'service_groups': 'system', 'before': 'four.service' }, 9 | 'four.service': { 'service_groups': 'system', 'before': 'default' }, 10 | 'five.service': { }, 11 | 'six.service': { 'after': 'seven.service' }, 12 | 'seven.service': { }, 13 | 'eight.service': { 'service_groups': 'system', 'before': 'default' }, 14 | } 15 | 16 | OT2 = { 17 | 'one.service': { }, 18 | 'two.service': { 'service_groups': 'foobar', 'after': 'default' }, 19 | 'three.service': { 'service_groups': 'system', 'before': 'two.service' }, 20 | 'four.service': { 'service_groups': 'system', 'before': 'three.service' }, 21 | 'five.service': { }, 22 | 'six.service': { }, 23 | 'seven.service': { } 24 | } 25 | 26 | OT3 = { 27 | 'one.service': { }, 28 | 'two.service': { 'before': 'default' }, 29 | 'three.service': { 'service_groups': 'system', 'before': 'four.service' }, 30 | 'four.service': { 'service_groups': 'system', 'before': 'default' }, 31 | 'five.service': { 'before': 'two.service' }, 32 | 'six.service': { 'after': 'seven.service' }, 33 | 'seven.service': { } 34 | } 35 | 36 | def printlist(title, d): 37 | return 38 | print(title) 39 | for item in d: 40 | print(" ", item) 41 | 42 | def checkorder(result, *series): 43 | """ 44 | Checks to be sure that the items listed in 'series' are in order in the result set. 45 | """ 46 | results = [r.name for r in result] 47 | indexes = list(map(lambda item: results.index(item+".service"), series)) 48 | for n in range(len(indexes)-1): 49 | if indexes[n] > indexes[n+1]: 50 | return False 51 | return True 52 | 53 | class TestServiceOrder(unittest.TestCase): 54 | 55 | def test_order1(self): 56 | sc = ServiceDict(OT1.items()) 57 | slist = sc.get_startup_list() 58 | printlist("startup list: ", slist) 59 | self.assertTrue(checkorder(slist, 'three', 'four', 'seven', 'six', 'two')) 60 | self.assertTrue(checkorder(slist, 'three', 'one', 'two')) 61 | self.assertTrue(checkorder(slist, 'eight', 'one', 'two')) 62 | 63 | def test_order2(self): 64 | sc = ServiceDict(OT2.items()) 65 | slist = sc.get_startup_list() 66 | printlist("startup list: ", slist) 67 | self.assertTrue(checkorder(slist, 'four', 'three', 'two')) 68 | 69 | def test_order3(self): 70 | sc = ServiceDict(OT3.items()) 71 | self.assertRaisesRegex(Exception, '^circular', lambda: sc.get_startup_list()) 72 | 73 | if __name__ == '__main__': 74 | unittest.main() 75 | -------------------------------------------------------------------------------- /chaperone/cutil/proc.py: -------------------------------------------------------------------------------- 1 | import os 2 | from chaperone.cutil.misc import get_signal_name 3 | 4 | class ProcStatus(int): 5 | 6 | _other_error = None 7 | _errno = None 8 | 9 | def __new__(cls, val): 10 | try: 11 | intval = int(val) 12 | except ValueError: 13 | rval = int.__new__(cls, 0) 14 | rval._other_error = str(val) 15 | return rval 16 | 17 | return int.__new__(cls, intval) 18 | 19 | @property 20 | def exited(self): 21 | return os.WIFEXITED(self) 22 | 23 | @property 24 | def signaled(self): 25 | return os.WIFSIGNALED(self) 26 | 27 | @property 28 | def stopped(self): 29 | return os.WIFSTOPPED(self) 30 | 31 | @property 32 | def continued(self): 33 | return os.WIFCONTINUED(self) 34 | 35 | @property 36 | def exit_status(self): 37 | status = (os.WIFEXITED(self) or None) and os.WEXITSTATUS(self) 38 | if not status and self._errno: 39 | return 1 # default to exit_status = 1 in the case of an errno value 40 | return status 41 | 42 | @property 43 | def normal_exit(self): 44 | return self.exit_status == 0 and not self._other_error 45 | 46 | @property 47 | def errno(self): 48 | "Map situation to an errno, even if contrived, unless one was provided." 49 | if self._errno is not None: 50 | return self._errno 51 | if self.signal: 52 | return 4 #EINTR 53 | return 8 #ENOEXEC 54 | @errno.setter 55 | def errno(self, val): 56 | self._errno = val 57 | 58 | @property 59 | def exit_message(self): 60 | es = self.exit_status 61 | if es is not None: 62 | return os.strerror(es) 63 | return None 64 | 65 | @property 66 | def signal(self): 67 | if os.WIFSTOPPED(self): 68 | return os.WSTOPSIG(self) 69 | if os.WIFSIGNALED(self): 70 | return os.WTERMSIG(self) 71 | return None 72 | 73 | @property 74 | def briefly(self): 75 | if self.signaled or self.stopped: 76 | return get_signal_name(self.signal) 77 | if self.exited: 78 | return "exit({0})".format(self.exit_status) 79 | return '?' 80 | 81 | def __format__(self, spec): 82 | if spec: 83 | return int.__format__(self, spec) 84 | msg = "" 94 | -------------------------------------------------------------------------------- /sandbox/user.d/sys1.conf: -------------------------------------------------------------------------------- 1 | settings: { 2 | env_inherit: ['SANDBOX', '_*'], 3 | env_set: {'TERM': 'xpath-revisited', 4 | 'QUESTIONER': 'the-law', 5 | 'WITHIN-HOME': '$(HOME)/inside-home', 6 | 'INTERACTIVE': '$(_CHAP_INTERACTIVE)', 7 | 'CONFIG_DIR': '$(_CHAP_CONFIG_DIR)', 8 | 'PROCTOOL': '$(SANDBOX)/proctool', 9 | 'ENV': '$(SANDBOX)/.shinit', 10 | 'PATH': '$(SANDBOX):/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/bin', 11 | 'APPS_PATH': '$(HOME:-)/apps', 12 | }, 13 | #uid: 0, 14 | idle_delay: 1, 15 | debug: true, 16 | } 17 | cron1.service: { 18 | type: cron, 19 | stdout: inherit, stderr: inherit, 20 | interval: '* * * * *', 21 | command: "$(PROCTOOL) --wait 20 'running cron1.service'" 22 | } 23 | hometest.service: { 24 | type: oneshot, 25 | command: "$(PROCTOOL) my.$(APPS_PATH).apps-path", 26 | } 27 | 28 | fake1.service: { 29 | command: "$(PROCTOOL) --hang 'fake1 process'", 30 | enabled: false, 31 | } 32 | 33 | fake2.service: { 34 | command: "$(PROCTOOL) --hang 'fake2 service'", 35 | enabled: true, 36 | stdout: inherit, 37 | uid: 1000, 38 | env_inherit: ['Q*', 'SANDBOX', 'PROCTOOL'], 39 | } 40 | 41 | fake3.service: { 42 | command: "$(PROCTOOL) 'oneshot service'", 43 | enabled: false, 44 | type: oneshot, 45 | stdout: inherit, 46 | ignore_failures: true, 47 | uid: garyw, 48 | service_groups: 'earlystuff', 49 | before: "default", 50 | } 51 | 52 | exittest.service: { 53 | enabled: false, 54 | restart: true, 55 | restart_limit: 5, 56 | ignore_failures: true, 57 | command: "$(PROCTOOL) --exit 20 'Exiting with 20'", 58 | } 59 | 60 | repeat.service: { 61 | command: "$(SANDBOX)/repeat -i4 'Repeat to stdout'", 62 | enabled: false, 63 | } 64 | 65 | repeat_err.service: { 66 | command: "$(SANDBOX)/repeat -i4 -e 'Repeat to stderr'", 67 | enabled: false, 68 | } 69 | 70 | beforemain.service: { 71 | type: "oneshot", 72 | enabled: false, 73 | command: "sh -c 'echo START IDLE TASK; sleep 2; echo ENDING IDLE TASK'", 74 | stdout: inherit, 75 | before: "MAIN", 76 | service_groups: "IDLE", 77 | } 78 | 79 | main.logging: { 80 | filter: "[chaperone].*", 81 | file: "$(HOME)/tmp/chaperone-%d.log", 82 | enabled: true, 83 | } 84 | 85 | console.logging: { 86 | stdout: true, 87 | filter: '*.warn;![debian-start].*;authpriv,auth.!*;!/Repeat to std/.*', 88 | extended: true, 89 | enabled: true, 90 | } 91 | 92 | debian.logging: { 93 | filter: '[debian-start].*', 94 | file: "$(HOME)/tmp/debian-start.log", 95 | enabled: true, 96 | } 97 | 98 | syslog.logging: { 99 | filter: '*.info;![debian-start].*;![chaperone].*', 100 | file: '$(HOME)/tmp/syslog-%d-%H%M', 101 | enabled: true, 102 | } 103 | -------------------------------------------------------------------------------- /tests/bin/test-driver: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Assumes the current directory contains executable files and runs them all. 3 | 4 | function relpath() { python -c "import os,sys;print(os.path.relpath(*(sys.argv[1:])))" "$@"; } 5 | 6 | function extract_title() { 7 | script=$1 8 | title=`sed -n 's/^#TITLE: *//p' $script` 9 | [ "$title" == "" ] && title=$script 10 | echo $title 11 | } 12 | 13 | export CHTEST_CONTAINER_NAME=CHAP-TEST-CONTAINER-$$ 14 | 15 | function kill_test_container() { 16 | sleep 1 # Sometimes it takes docker a while to actually kill the container. 17 | if docker inspect $CHTEST_CONTAINER_NAME >/dev/null 2>&1; then 18 | echo Container still running: Forcing removal 19 | docker kill $CHTEST_CONTAINER_NAME >/dev/null 20 | docker rm -v $CHTEST_CONTAINER_NAME >/dev/null 21 | fi 22 | } 23 | 24 | shellmode=0 25 | if [ "$1" == '--shell' ]; then 26 | shellmode=1 27 | shift 28 | fi 29 | 30 | export TESTDIR=$(readlink -f $1) 31 | export TESTHOME=$PWD 32 | export CHTEST_HOME=$TESTDIR/_temp-$$_ 33 | 34 | if [ "$CHTEST_LOGDIR" == "" ]; then 35 | export CHTEST_LOGDIR=$TESTHOME/test_logs 36 | fi 37 | 38 | if [ "$2" == "" ]; then 39 | IMAGE_NAME=chapdev/chaperone-lamp 40 | else 41 | IMAGE_NAME=$2 42 | fi 43 | 44 | export CHTEST_IMAGE=$IMAGE_NAME 45 | 46 | if [ ! -d $TESTDIR ]; then 47 | exit 48 | fi 49 | 50 | if [ -e $CHTEST_HOME ]; then 51 | echo "Can't continue... $CHTEST_HOME already exists." 52 | exit 1 53 | fi 54 | 55 | if [ "`which expect-lite`" == "" ]; then 56 | echo "expect-lite must be installed for tests to run" 57 | exit 1 58 | fi 59 | 60 | mkdir -p $CHTEST_LOGDIR 61 | 62 | if [ $shellmode == 1 ]; then 63 | mkdir $CHTEST_HOME 64 | expect-lite-image-run --disable-services /bin/bash 65 | rm -rf $CHTEST_HOME 66 | exit 67 | fi 68 | 69 | ( 70 | exitcode=0 71 | for sf in $( find $TESTDIR -type f -executable \! -name '*~' ); do 72 | if [ "$CHTEST_ONLY_ENDSWITH" != "" -a "${sf%*/$CHTEST_ONLY_ENDSWITH}" == "$sf" ]; then 73 | continue 74 | fi 75 | mkdir $CHTEST_HOME; cd $CHTEST_HOME 76 | logfile=$CHTEST_LOGDIR/$(basename $TESTDIR)_${sf/*\//}.log 77 | rm -f $logfile.err 78 | title=$(extract_title $sf) 79 | echo "RUNNING TEST: $title" 80 | echo "" >>$logfile.err 81 | echo "##" >>$logfile.err 82 | echo "## RUNNING TEST: $title" >>$logfile.err 83 | echo "## $sf" >>$logfile.err 84 | echo "##" >>$logfile.err 85 | if ! $sf >>$logfile.err 2>&1; then 86 | echo "TEST FAILED: $sf (see $(relpath $logfile.err $TESTHOME))" 87 | exitcode=2 88 | else 89 | mv $logfile.err $logfile 90 | fi 91 | kill_test_container 92 | cd $TESTDIR; [ ! -f keep.tempdir ] && rm -rf $CHTEST_HOME 93 | done 94 | if [ $exitcode != 0 ]; then 95 | echo "Some tests failed in: $TESTDIR" 96 | fi 97 | exit $exitcode 98 | ) 99 | -------------------------------------------------------------------------------- /doc/docserver/chaperone.d/010-start.conf: -------------------------------------------------------------------------------- 1 | # 010-start.conf 2 | # 3 | # This is the first start-up file for the chaperone base images. Note that start-up files 4 | # are processed in order alphabetically, so settings in later files can override those in 5 | # earlier files. 6 | 7 | # General environmental settings. These settings apply to all services and logging entries. 8 | # There should be only one "settings" directive in each configuration file. But, any 9 | # settings encountered in subsequent configuration files can override or augment these. 10 | # Note that variables are expanded as late as possile. So, there can be variables 11 | # defined here which depend upon variables which will be defined later (such as _CHAP_SERVICE), 12 | # which is defined implicitly for each service. 13 | 14 | settings: { 15 | 16 | env_set: { 17 | 18 | 'LANG': 'en_US.UTF-8', 19 | 'LC_CTYPE': '$(LANG)', 20 | 'PATH': '$(APPS_DIR)/bin:/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin:/usr/local/sbin', 21 | 22 | # Uncomment the below to tell init.sh to lock-down the root account after the first 23 | # successful start. 24 | #'SECURE_ROOT': '1', 25 | 26 | # Variables starting with _CHAP are internal and won't be exported to services, 27 | # so we derive public environment variables if needed... 28 | 'APPS_DIR': '$(_CHAP_CONFIG_DIR:-/)', 29 | 'CHAP_SERVICE_NAME': '$(_CHAP_SERVICE:-)', 30 | 'CHAP_TASK_MODE': '$(_CHAP_TASK_MODE:-)', 31 | }, 32 | 33 | } 34 | 35 | # This is the startup script which manages the contents of $(APPS_DIR)/init.d. It will 36 | # run each of the init.d scripts in sequence. Because this is part of the special "INIT" 37 | # group, it will be run before any other service which is not in the group. This makes 38 | # it unnecessary to worry about 'before:' and 'after:' settings for init scripts. 39 | 40 | init.service: { 41 | type: oneshot, 42 | command: '/bin/bash $(APPS_DIR)/etc/init.sh', 43 | before: 'default,database,application', 44 | service_groups: 'INIT', 45 | } 46 | 47 | # We select all messages from the "chaperone" program itself, which will include 48 | # all messages which originate from the chaperone daemon. We put these in a single 49 | # log file which will be appended to on each run, so that if these log files 50 | # are on a mounted user volume, they will accumulate for historical purposes. 51 | 52 | chaperone.logging: { 53 | enabled: true, 54 | selector: '[chaperone].*', 55 | file: '$(APPS_DIR)/var/log/chaperone.log', 56 | } 57 | 58 | # The rest, except for chaperone, goes to the syslog 59 | 60 | syslog.logging: { 61 | enabled: true, 62 | selector: '*.info;![chaperone].*', 63 | file: '$(APPS_DIR)/var/log/syslog.log', 64 | } 65 | 66 | # For the console, we include everything which is a warning except authentication 67 | # messages and daemon messages which are not errors. 68 | 69 | console.logging: { 70 | enabled: true, 71 | stdout: true, 72 | selector: '*.warn;authpriv,auth.!*;daemon.!warn', 73 | } 74 | -------------------------------------------------------------------------------- /sandbox/test.d/sys1.conf: -------------------------------------------------------------------------------- 1 | settings: { 2 | env_inherit: ['SANDBOX', '_*'], 3 | env_set: {'TERM': 'xpath-revisited', 4 | 'QUESTIONER': 'the-law', 5 | 'WITHIN-HOME': '$(HOME)/inside-home', 6 | 'INTERACTIVE': '$(_CHAP_INTERACTIVE)', 7 | 'CONFIG_DIR': '$(_CHAP_CONFIG_DIR)', 8 | 'PROCTOOL': '$(SANDBOX)/proctool', 9 | 'ENV': '$(SANDBOX)/.shinit', 10 | 'PATH': '$(SANDBOX):/usr/local/sbin:/usr/local/bin:/services/$(_CHAP_SERVICE)/bin:/usr/sbin:/usr/bin:/bin', 11 | 'APPS_PATH': '$(HOME:-)/apps', 12 | }, 13 | uid: 0, 14 | idle_delay: 1, 15 | debug: true, 16 | } 17 | cron1.service: { 18 | type: cron, 19 | stdout: inherit, stderr: inherit, 20 | interval: "*/2 * * * *", 21 | command: "proctool --wait 2 'running cron1.service'" 22 | } 23 | hometest.service: { 24 | type: oneshot, 25 | command: "$(PROCTOOL) my.$(APPS_PATH).apps-path", 26 | } 27 | 28 | fake1.service: { 29 | command: "$(PROCTOOL) --dump --hang 'fake1 process'", 30 | env_set: { 'PATH': '/binno/proctool:$(PATH)' }, 31 | env_unset: [ '*HOME*', 'APPS_PATH' ], 32 | stdout: inherit, 33 | enabled: true, 34 | debug: true, 35 | } 36 | 37 | fake2.service: { 38 | command: "$(PROCTOOL) --hang 'fake2 service'", 39 | enabled: false, 40 | stdout: inherit, 41 | uid: 1000, 42 | env_inherit: ['Q*', 'SANDBOX', 'PROCTOOL'], 43 | } 44 | 45 | fake3.service: { 46 | command: "$(PROCTOOL) 'oneshot service'", 47 | enabled: false, 48 | type: oneshot, 49 | stdout: inherit, 50 | ignore_failures: true, 51 | uid: garyw, 52 | service_groups: 'earlystuff', 53 | before: "default", 54 | } 55 | 56 | exittest.service: { 57 | enabled: true, 58 | restart: true, 59 | restart_limit: 3, 60 | ignore_failures: true, 61 | command: "$(PROCTOOL) --exit 20 'Exiting with 20'", 62 | } 63 | 64 | repeat.service: { 65 | command: "$(SANDBOX)/repeat -i4 'Repeat to stdout'", 66 | enabled: true, 67 | } 68 | 69 | repeat_err.service: { 70 | command: "$(SANDBOX)/repeat -i4 -e 'Repeat to stderr'", 71 | enabled: false, 72 | } 73 | 74 | beforemain.service: { 75 | type: "oneshot", 76 | enabled: false, 77 | command: "sh -c 'echo START IDLE TASK; sleep 2; echo ENDING IDLE TASK'", 78 | stdout: inherit, 79 | before: "MAIN", 80 | service_groups: "IDLE", 81 | } 82 | 83 | main.logging: { 84 | selector: "[chaperone].*", 85 | file: /var/log/chaperone-%d.log, 86 | enabled: true, 87 | } 88 | 89 | console.logging: { 90 | stdout: true, 91 | selector: '*.warn;![debian-start].*;authpriv,auth.!*;!/Repeat to std/.*', 92 | extended: true, 93 | enabled: true, 94 | } 95 | 96 | debian.logging: { 97 | selector: '[debian-start].*', 98 | file: /var/log/debian-start.log, 99 | enabled: true, 100 | } 101 | 102 | syslog.logging: { 103 | selector: '*.info;![debian-start].*;![chaperone].*', 104 | file: '/var/log/syslog-%d-%H%M', 105 | enabled: true, 106 | } 107 | -------------------------------------------------------------------------------- /chaperone/exec/sdnotify.py: -------------------------------------------------------------------------------- 1 | """ 2 | Systemd notify tool (compatible with systemd-notify) 3 | 4 | Usage: 5 | sdnotify [options] [VARIABLE=VALUE ...] 6 | 7 | Options: 8 | --pid PID Inform chaperone/systemd of MAINPID 9 | (must say --pid=self if you want the programs PID) 10 | --status=STATUS Inform chaperone/systemd of status information 11 | --ready Send the ready signal (READY=1) 12 | --booted Indicate whether we were booted with systemd. 13 | (Note: Always indicates 'no', exit status 1.) 14 | --ignore Silently ignore inability to send notifications. 15 | (Always ignored if NOTIFY_SOCKET is not set.) 16 | 17 | All of the above specified will be sent in the order given above, then 18 | any VARIABLE=VALUE pairs will be sent. 19 | 20 | This is provided by Chaperone as an alternative to systemd-notify for distros 21 | which may not have one. 22 | """ 23 | 24 | # perform any patches first 25 | import chaperone.cutil.patches 26 | 27 | # regular code begins 28 | import sys 29 | import os 30 | import socket 31 | from docopt import docopt 32 | 33 | from chaperone.cproc.version import VERSION_MESSAGE 34 | 35 | def _mkabstract(socket_name): 36 | if socket_name.startswith('@'): 37 | socket_name = '\0%s' % socket_name[1:] 38 | return socket_name 39 | 40 | 41 | def do_notify(msg): 42 | notify_socket = os.getenv('NOTIFY_SOCKET') 43 | if notify_socket: 44 | sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) 45 | try: 46 | sock.connect(_mkabstract(notify_socket)) 47 | sock.sendall(msg.encode()) 48 | except EnvironmentError as ex: 49 | raise Exception("Systemd notification failed: " + str(ex)) 50 | finally: 51 | sock.close() 52 | 53 | def main_entry(): 54 | options = docopt(__doc__, version=VERSION_MESSAGE) 55 | 56 | mlist = list() 57 | 58 | if options['--pid']: 59 | pid = options['--pid'] 60 | if pid == 'self': 61 | mlist.append("MAINPID="+str(os.getpid())) 62 | else: 63 | try: 64 | pidval = int(pid) 65 | except ValueError: 66 | print("error: not a valid PID '{0}'".format(pid)) 67 | exit(1) 68 | mlist.append("MAINPID="+str(pid)) 69 | 70 | if options['--status']: 71 | mlist.append("STATUS=" + options['--status']) 72 | 73 | if options['--ready']: 74 | mlist.append("READY=1") 75 | 76 | for vv in options['VARIABLE=VALUE']: 77 | vvs = vv.split('=') 78 | if len(vvs) != 2: 79 | print("error: not a valid format for VARIABLE=VALUE, '{0}'".format(vv)) 80 | exit(1) 81 | mlist.append("{0}={1}".format(vvs[0].upper(), vvs[1])) 82 | 83 | for msg in mlist: 84 | try: 85 | do_notify(msg) 86 | except Exception as ex: 87 | if not options['--ignore']: 88 | print("error: could not send sd_notify message, " + str(ex)) 89 | exit(1) 90 | 91 | if options['--booted']: 92 | exit(1) 93 | -------------------------------------------------------------------------------- /chaperone/cutil/logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sys 4 | import traceback 5 | from time import strftime 6 | 7 | from logging.handlers import SysLogHandler 8 | from functools import partial 9 | 10 | import chaperone.cutil.syslog_info as syslog_info 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | _root_logger = logging.getLogger(None) 15 | _stderr_handler = logging.StreamHandler() 16 | _cur_level = logging.NOTSET 17 | 18 | _format = logging.Formatter() 19 | _stderr_handler.setFormatter(_format) 20 | 21 | _root_logger.addHandler(_stderr_handler) 22 | 23 | 24 | def set_log_level(lev): 25 | global _cur_level 26 | 27 | _cur_level = syslog_info.syslog_to_python_lev(lev) 28 | logger.setLevel(_cur_level) 29 | 30 | 31 | def set_custom_handler(handler, enable = True): 32 | if enable: 33 | _root_logger.addHandler(handler) 34 | _root_logger.removeHandler(_stderr_handler) 35 | logger.setLevel(logging.DEBUG) 36 | else: 37 | _root_logger.removeHandler(handler) 38 | _root_logger.addHandler(_stderr_handler) 39 | logger.setLevel(_cur_level) 40 | 41 | 42 | def _versatile_logprint(delegate, fmt, *args, 43 | facility=None, exceptions=False, 44 | program=None, pid=None, **kwargs): 45 | """ 46 | In addition to standard log formatting, the following two special cases are 47 | covered: 48 | 1. If there are no formatting characters (%), then simply concatenate repr() of *args 49 | 2. If there are '{' formatting arguments, then apply new-style .format using arguments 50 | provided. 51 | 52 | Additionally, you can pass an exception as the first argument: 53 | 1. If no other arguments are provided, then the exception message will be the 54 | log item. 55 | 2. A traceback will be printed in the case where the logger priority level is set to debug. 56 | """ 57 | 58 | if isinstance(fmt, Exception): 59 | ex = fmt 60 | args = list(args) 61 | if len(args) == 0: 62 | fmt = [str(ex)] 63 | else: 64 | fmt = args.pop(0) 65 | else: 66 | ex = None 67 | 68 | if facility is not None or program or pid: 69 | extra = kwargs['extra'] = {} 70 | if facility: 71 | extra['_facility'] = facility 72 | if program: 73 | extra['program_name'] = str(program) 74 | if pid: 75 | extra['program_pid'] = str(pid) 76 | 77 | 78 | if ex and (exceptions or logger.level == logging.DEBUG): # use python level here 79 | trace = "\n" + traceback.format_exc() 80 | else: 81 | trace = "" 82 | 83 | if not len(args): 84 | delegate(fmt, **kwargs) 85 | elif '%' not in fmt: 86 | if '{' in fmt: 87 | delegate('%s', fmt.format(*args) + trace, **kwargs) 88 | else: 89 | delegate('%s', " ".join([repr(a) for a in args]) + trace, **kwargs) 90 | else: 91 | delegate(fmt, *args, **kwargs) 92 | 93 | warn = partial(_versatile_logprint, logger.warning) 94 | info = partial(_versatile_logprint, logger.info) 95 | debug = partial(_versatile_logprint, logger.debug, exceptions=True) 96 | error = partial(_versatile_logprint, logger.error) 97 | -------------------------------------------------------------------------------- /doc/source/ref/config-format.rst: -------------------------------------------------------------------------------- 1 | .. chaperone documentation 2 | configuration directives 3 | 4 | .. _config.file-format: 5 | 6 | Configuration File Format 7 | ========================= 8 | 9 | Chaperone's configuration is contained either in a single file, or a directory of configuration. 10 | You specify the configuration with the :ref:`--config ` switch on the command line. 11 | If none is specified, the default `/etc/chaperone.d` is used. If a directory is chosen, then only the 12 | top-level of the directory will be searched, and only files ending in ``.conf`` or ``.yaml`` will be 13 | recognized and read in alphabetic order. 14 | 15 | Configuration files are written using `YAML Version 2 `_. For example, you can 16 | define two chaperone services like this:: 17 | 18 | mysql.service: 19 | command: "/etc/init.d/mysql start" 20 | 21 | apache2.service: 22 | command: "/etc/init.d/apache2 start" 23 | after: mysql.service 24 | 25 | While the above works perfectly fine, we prefer to use the `YAML "flow style" `_ which 26 | looks very similar to JSON. In flow format, the above looks like this:: 27 | 28 | mysql.service: { 29 | command: "/etc/init.d/mysql start" 30 | } 31 | 32 | apache2.service: { 33 | command: "/etc/init.d/apache2 start", 34 | after: mysql.service, 35 | } 36 | 37 | The flow style is both easy to read, and works better when configurations become more complex. So, throughout 38 | the chaperone documentation, we'll stick to the flow format. 39 | 40 | Comments can be included both between lines and at the end of lines using the hash symbol (``#``). Here is a complete well-commented 41 | configuration section for a sample service that's included with the ``chaperone-baseimage`` docker image:: 42 | 43 | # This is a sample oneshot service that runs at IDLE time, just before 44 | # the console app, if present. It will output something so at least 45 | # something appears on the screen. 46 | 47 | sample.service: { 48 | # This is a oneshot service, but most likely a real applicaton will be another type 49 | # such as 'simple', or 'forking'. 50 | type: oneshot, 51 | enabled: true, # CHANGE TO 'false' so this app doesn't run any more 52 | 53 | # Command output goes directly to stdout instead of to the syslog. 54 | # Note that you normally want to have services output to the syslog, because 55 | # chaperone's logging directives allow you to echo syslog data to stdout. That's 56 | # a better place to control things (see 010-start.conf). 57 | command: "$(APPS_DIR)/bin/sample_app", 58 | stdout: inherit, 59 | 60 | # Because we're in the IDLE group, we will run only after all system services have 61 | # started. However, if there is a command line program, like /bin/bash, we want to 62 | # run before that one. All upper-case group names have special meanings. However, 63 | # You can define your own service groups, then use them to declare startup 64 | # dependencies. 65 | service_groups: "IDLE", 66 | before: "CONSOLE", 67 | 68 | # These environment variables will be added only for your service 69 | env_set: { 70 | 'INTERACTIVE': '$(_CHAP_INTERACTIVE)', 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /tests/el-tests/cron-1/chaperone.conf: -------------------------------------------------------------------------------- 1 | settings: { 2 | env_set: { PATH: "$(TESTHOME)/bin:$(PATH)" } 3 | } 4 | 5 | cron1-echo.service: { 6 | type: cron, 7 | enabled: "$(ENABLE_CRON1:-false)", 8 | interval: "* * * * * */10", 9 | command: "echo from cron1", 10 | } 11 | 12 | # ENABLE_APACHE4: Complex timing. Apache running in the foreground, but has untracked 13 | # processes. Cron job simulates what logrotate does, stopping and starting it. The key 14 | # here is that Chaperone shouldn't shut the system down inadvertently as processes terminate 15 | # and restart. 16 | 17 | test4-apache.service: { 18 | type: simple, 19 | enabled: "$(ENABLE_APACHE4:-false)", 20 | command: "service apache2 start", 21 | uid: root, 22 | } 23 | 24 | test4-simrotate.service: { 25 | type: cron, 26 | command: "bash $(_CHAP_CONFIG_DIR)/simulate-rotate.sh test4-apache", 27 | enabled: "$(ENABLE_APACHE4:-false)", 28 | interval: "* * * * * */10", 29 | service_groups: IDLE, 30 | } 31 | 32 | # ENABLE_APACHE5: Same deal, but this time Chaperone knows about apache and telchap can do its job 33 | 34 | test5-apache.service: { 35 | type: simple, 36 | enabled: "$(ENABLE_APACHE5:-false)", 37 | command: "service apache2 start", 38 | pidfile: "/run/apache2/apache2.pid", 39 | uid: root, 40 | } 41 | 42 | test5-simrotate.service: { 43 | type: cron, 44 | command: "bash $(_CHAP_CONFIG_DIR)/simulate-rotate.sh test5-apache telchap", 45 | enabled: "$(ENABLE_APACHE5:-false)", 46 | interval: "* * * * * */10", 47 | service_groups: IDLE, 48 | } 49 | 50 | # ENABLE_APACHE6: Just a test to be sure we can kill Apache AND chaperone with a non-scheduled background job. 51 | 52 | test6-apache.service: { 53 | type: simple, 54 | enabled: "$(ENABLE_APACHE6:-false)", 55 | command: "service apache2 start", 56 | uid: root, 57 | } 58 | 59 | test6-simrotate.service: { 60 | type: oneshot, 61 | command: "bash -c 'sleep 5; kill-from-pidfile /run/apache2/apache2.pid'", 62 | enabled: "$(ENABLE_APACHE6:-false)", 63 | interval: "* * * * * */10", 64 | service_groups: IDLE, 65 | } 66 | 67 | # ENABLE_APACHE7: Just a test to be sure we can kill Apache AND chaperone with a non-scheduled background job. 68 | 69 | test7-apache.service: { 70 | type: simple, 71 | enabled: "$(ENABLE_APACHE7:-false)", 72 | command: "service apache2 start", 73 | uid: root, 74 | } 75 | 76 | test7-simrotate.service: { 77 | type: cron, 78 | command: "bash -c 'sleep 5; kill-from-pidfile /run/apache2/apache2.pid'", 79 | enabled: "$(ENABLE_APACHE7:-false)", 80 | interval: "* * * * * */8", 81 | service_groups: IDLE, 82 | } 83 | 84 | # ENABLE_APACHE8: Cron job kills apache and disables self, container should die 85 | 86 | test8-apache.service: { 87 | type: simple, 88 | enabled: "$(ENABLE_APACHE8:-false)", 89 | command: "service apache2 start", 90 | uid: root, 91 | } 92 | 93 | test8-simrotate.service: { 94 | type: cron, 95 | command: "bash -c 'sleep 5; kill-from-pidfile /run/apache2/apache2.pid; telchap stop test8-simrotate'", 96 | enabled: "$(ENABLE_APACHE8:-false)", 97 | interval: "* * * * * */8", 98 | service_groups: IDLE, 99 | } 100 | 101 | # Debugging output for all 102 | 103 | default.logging: { 104 | selector: "*.debug", 105 | stdout: true, 106 | } 107 | -------------------------------------------------------------------------------- /chaperone/cutil/events.py: -------------------------------------------------------------------------------- 1 | 2 | IS_EVENT = lambda e: e.startswith('on') and len(e) > 2 and e[2:3].isupper() 3 | 4 | def SWALLOW_EVENT(*args, **kwargs): 5 | pass 6 | 7 | 8 | class EventSource: 9 | """ 10 | This is a elegant generic class to set up and handle events. 11 | 12 | Events are always identified by keyword arguments of the format 13 | onXxxxx. 14 | 15 | def __init__(self, **kwargs): 16 | events = EventSource() 17 | kwargs = events.add(**kwargs) 18 | 19 | def foo(self): 20 | self.events.onMiscEvent() 21 | 22 | 23 | """ 24 | 25 | __events = None 26 | 27 | def __init__(self, **kwargs): 28 | self.__events = dict() 29 | if kwargs: 30 | self._exec_kwargs(self._do_add, kwargs) 31 | 32 | def __getattribute__(self, key): 33 | if IS_EVENT(key): 34 | return self.__events.get(key, SWALLOW_EVENT) 35 | 36 | return object.__getattribute__(self, key) 37 | 38 | def _exec_kwargs(self, oper, kwargs): 39 | events = [e for e in kwargs.keys() if IS_EVENT(e)] 40 | if not events: 41 | return kwargs 42 | 43 | for e in events: 44 | oper(e, kwargs[e]) 45 | del kwargs[e] 46 | 47 | return kwargs 48 | 49 | def clear(self): 50 | "Removes all event handlers." 51 | self.__events.clear() 52 | 53 | def reset(self, **kwargs): 54 | "Removes all event handlers and sets new ones." 55 | self.__events.clear() 56 | return self._exec_kwargs(self._do_add, kwargs) 57 | 58 | def add(self, **kwargs): 59 | """ 60 | Adds one or more events: 61 | add(onError = handler, onExit = handler) 62 | 63 | Returns the kwargs not processed. 64 | """ 65 | return self._exec_kwargs(self._do_add, kwargs) 66 | 67 | def remove(self, **kwargs): 68 | """ 69 | Removes one or more events: 70 | remove(onError = handler, onExit = handler) 71 | 72 | Returns the kwargs not processed. 73 | """ 74 | return self._exec_kwargs(self._do_remove, kwargs) 75 | 76 | def _do_add(self, name, value): 77 | assert callable(value) 78 | 79 | e = self.__events.get(name) 80 | 81 | # No such event, add a singleton 82 | if not e: 83 | self.__events[name] = value 84 | return 85 | 86 | # Add to multi-event dispatcher 87 | try: 88 | e.__eventlist.append(value) 89 | return 90 | except AttributeError: 91 | pass 92 | 93 | # Create multi-event dispatcher 94 | 95 | displist = [e, value] 96 | def dispatcher(*args, _displist = displist, **kwargs): 97 | for edisp in _displist: 98 | edisp(*args, **kwargs) 99 | dispatcher.__eventlist = displist 100 | 101 | self.__events[name] = dispatcher 102 | 103 | def _do_remove(self, name, value): 104 | e = self.__events.get(name) 105 | 106 | if not name: 107 | return 108 | 109 | try: 110 | e.__eventlist.remove(value) 111 | except ValueError: 112 | return # not in list, ignore 113 | except AttributeError: 114 | try: 115 | del self.__events[name] # singleton 116 | except KeyError: 117 | return # no singleton, ignore 118 | -------------------------------------------------------------------------------- /chaperone/cproc/pt/cron.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from aiocron import crontab 3 | from chaperone.cutil.logging import error, warn, debug, info 4 | from chaperone.cutil.syslog_info import LOG_CRON 5 | from chaperone.cproc.subproc import SubProcess 6 | from chaperone.cutil.errors import ChParameterError 7 | 8 | _CRON_SPECIALS = { 9 | '@yearly': '0 0 1 1 *', 10 | '@annually': '0 0 1 1 *', 11 | '@monthly': '0 0 1 * *', 12 | '@weekly': '0 0 * * 0', 13 | '@daily': '0 0 * * *', 14 | '@hourly': '0 * * * *', 15 | } 16 | 17 | class CronProcess(SubProcess): 18 | 19 | syslog_facility = LOG_CRON 20 | 21 | _cron = None 22 | _fut_monitor = None 23 | 24 | def __init__(self, service, family=None): 25 | super().__init__(service, family) 26 | if not self.interval: 27 | raise ChParameterError("interval= property missing, required for cron service '{0}'".format(self.name)) 28 | 29 | # Support specials with or without the @ 30 | real_interval = _CRON_SPECIALS.get(self.interval) or _CRON_SPECIALS.get('@'+self.interval) or self.interval 31 | 32 | # make a status note 33 | self.note = "{0} ({1})".format(self.interval, real_interval) if self.interval != real_interval else real_interval 34 | 35 | self._cron = crontab(real_interval, func=self._cron_hit, start=False) 36 | 37 | def default_status(self): 38 | if self._cron.handle: 39 | return 'waiting' 40 | return None 41 | 42 | @property 43 | def scheduled(self): 44 | return self._cron and self._cron.handle 45 | 46 | @asyncio.coroutine 47 | def start(self): 48 | """ 49 | Takes over startup and sets up our cron loop to handle starts instead. 50 | """ 51 | if not self.enabled or self._cron.handle: 52 | return 53 | 54 | self.start_attempted = True 55 | 56 | # Start up cron 57 | try: 58 | self._cron.start() 59 | except Exception: 60 | raise ChParameterError("not a valid cron interval specification, '{0}'".format(self.interval)) 61 | 62 | self.loginfo("cron service {0} scheduled using interval spec '{1}'".format(self.name, self.interval)) 63 | 64 | @asyncio.coroutine 65 | def _cron_hit(self): 66 | if self.enabled: 67 | if not self.family.system_alive: 68 | return 69 | if self.running: 70 | self.logwarn("cron service {0} is still running when next interval expired, will not run again", self.name) 71 | else: 72 | self.loginfo("cron service {0} running CMD ( {1} )", self.name, self.command) 73 | try: 74 | yield from super().start() 75 | except Exception as ex: 76 | self.logerror(ex, "cron service {0} failed to start: {1}", self.name, ex) 77 | yield from self.reset(); 78 | 79 | @property 80 | def stoppable(self): 81 | return self.scheduled 82 | 83 | @asyncio.coroutine 84 | def stop(self): 85 | self._cron.stop() 86 | yield from super().stop() 87 | 88 | @asyncio.coroutine 89 | def process_started_co(self): 90 | if self._fut_monitor and not self._fut_monitor.cancelled(): 91 | self._fut_monitor.cancel() 92 | self._fut_monitor = None 93 | 94 | # We have a successful start. Monitor this service. 95 | 96 | self._fut_monitor = asyncio.async(self._monitor_service()) 97 | self.add_pending(self._fut_monitor) 98 | 99 | @asyncio.coroutine 100 | def _monitor_service(self): 101 | result = yield from self.wait() 102 | if isinstance(result, int) and result > 0: 103 | yield from self._abnormal_exit(result) 104 | else: 105 | yield from self.reset() 106 | -------------------------------------------------------------------------------- /samples/chaperone-lamp/apps/etc/mysql/my.cnf: -------------------------------------------------------------------------------- 1 | # 2 | # The MySQL database server configuration file. 3 | # 4 | # You can copy this to one of: 5 | # - "/etc/mysql/my.cnf" to set global options, 6 | # - "~/.my.cnf" to set user-specific options. 7 | # 8 | # One can use all long options that the program supports. 9 | # Run program with --help to get a list of available options and with 10 | # --print-defaults to see which it would actually understand and use. 11 | # 12 | # For explanations see 13 | # http://dev.mysql.com/doc/mysql/en/server-system-variables.html 14 | 15 | # This will be passed to all mysql clients 16 | # It has been reported that passwords should be enclosed with ticks/quotes 17 | # escpecially if they contain "#" chars... 18 | # Remember to edit /etc/mysql/debian.cnf when changing the socket location. 19 | [client] 20 | port = 3306 21 | #socket = /var/run/mysqld/mysqld.sock 22 | 23 | # Here is entries for some specific programs 24 | # The following values assume you have at least 32M ram 25 | 26 | # This was formally known as [safe_mysqld]. Both versions are currently parsed. 27 | [mysqld_safe] 28 | #socket = /var/run/mysqld/mysqld.sock 29 | nice = 0 30 | 31 | [mysqld] 32 | # 33 | # * Basic Settings 34 | # 35 | #pid-file = /var/run/mysqld/mysqld.pid 36 | #socket = /var/run/mysqld/mysqld.sock 37 | port = 3306 38 | basedir = /usr 39 | #datadir = /var/lib/mysql 40 | tmpdir = /tmp 41 | lc-messages-dir = /usr/share/mysql 42 | skip-external-locking 43 | # 44 | # Instead of skip-networking the default is now to listen only on 45 | # localhost which is more compatible and is not less secure. 46 | bind-address = 127.0.0.1 47 | # 48 | # * Fine Tuning 49 | # 50 | key_buffer = 16M 51 | max_allowed_packet = 16M 52 | thread_stack = 192K 53 | thread_cache_size = 8 54 | # This replaces the startup script and checks MyISAM tables if needed 55 | # the first time they are touched 56 | myisam-recover = BACKUP 57 | #max_connections = 100 58 | #table_cache = 64 59 | #thread_concurrency = 10 60 | # 61 | # * Query Cache Configuration 62 | # 63 | query_cache_limit = 1M 64 | query_cache_size = 16M 65 | # 66 | # * Logging and Replication 67 | # 68 | # Both location gets rotated by the cronjob. 69 | # Be aware that this log type is a performance killer. 70 | # As of 5.1 you can enable the log at runtime! 71 | #general_log_file = /var/log/mysql/mysql.log 72 | #general_log = 1 73 | # 74 | # Error log - should be very few entries. 75 | # 76 | #log_error = /var/log/mysql/error.log 77 | # 78 | # Here you can see queries with especially long duration 79 | #log_slow_queries = /var/log/mysql/mysql-slow.log 80 | #long_query_time = 2 81 | #log-queries-not-using-indexes 82 | # 83 | # The following can be used as easy to replay backup logs or for replication. 84 | # note: if you are setting up a replication slave, see README.Debian about 85 | # other settings you may need to change. 86 | #server-id = 1 87 | #log_bin = /var/log/mysql/mysql-bin.log 88 | expire_logs_days = 10 89 | max_binlog_size = 100M 90 | #binlog_do_db = include_database_name 91 | #binlog_ignore_db = include_database_name 92 | # 93 | # * InnoDB 94 | # 95 | # InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/. 96 | # Read the manual for more InnoDB related options. There are many! 97 | # 98 | # * Security Features 99 | # 100 | # Read the manual, too, if you want chroot! 101 | # chroot = /var/lib/mysql/ 102 | # 103 | # For generating SSL certificates I recommend the OpenSSL GUI "tinyca". 104 | # 105 | # ssl-ca=/etc/mysql/cacert.pem 106 | # ssl-cert=/etc/mysql/server-cert.pem 107 | # ssl-key=/etc/mysql/server-key.pem 108 | 109 | 110 | 111 | [mysqldump] 112 | quick 113 | quote-names 114 | max_allowed_packet = 16M 115 | 116 | [mysql] 117 | #no-auto-rehash # faster start of mysql but no tab completition 118 | 119 | [isamchk] 120 | key_buffer = 16M 121 | 122 | # 123 | # * IMPORTANT: Additional settings that can override those from this file! 124 | # The files must end with '.cnf', otherwise they'll be ignored. 125 | # 126 | #!includedir /etc/mysql/conf.d/ 127 | -------------------------------------------------------------------------------- /chaperone/cproc/watcher.py: -------------------------------------------------------------------------------- 1 | import os 2 | import asyncio 3 | import threading 4 | 5 | from functools import partial 6 | from asyncio.unix_events import BaseChildWatcher 7 | 8 | from chaperone.cutil.logging import warn, info, debug 9 | from chaperone.cutil.proc import ProcStatus 10 | from chaperone.cutil.misc import get_signal_name 11 | from chaperone.cutil.events import EventSource 12 | 13 | class InitChildWatcher(BaseChildWatcher): 14 | """An init-responsible child watcher. 15 | 16 | Plugs into the asyncio child watcher framework to allow harvesting of both known and unknown 17 | child processes. 18 | """ 19 | def __init__(self, **kwargs): 20 | super().__init__() 21 | self.events = EventSource(**kwargs) 22 | self._callbacks = {} 23 | self._lock = threading.Lock() 24 | self._zombies = {} 25 | self._forks = 0 26 | self._no_processes = None 27 | self._had_children = False 28 | 29 | def close(self): 30 | self._callbacks.clear() 31 | self._zombies.clear() 32 | super().close() 33 | 34 | def __enter__(self): 35 | with self._lock: 36 | self._forks += 1 37 | 38 | return self 39 | 40 | def __exit__(self, a, b, c): 41 | with self._lock: 42 | self._forks -= 1 43 | 44 | if self._forks or not self._zombies: 45 | return 46 | 47 | collateral_victims = str(self._zombies) 48 | self._zombies.clear() 49 | 50 | info( 51 | "Caught subprocesses termination from unknown pids: %s", 52 | collateral_victims) 53 | 54 | @property 55 | def number_of_waiters(self): 56 | return len(self._callbacks) 57 | 58 | def add_child_handler(self, pid, callback, *args): 59 | assert self._forks, "Must use the context manager" 60 | with self._lock: 61 | try: 62 | returncode = self._zombies.pop(pid) 63 | except KeyError: 64 | # The child is running. 65 | self._callbacks[pid] = callback, args 66 | return 67 | 68 | # The child is dead already. We can fire the callback. 69 | callback(pid, returncode, *args) 70 | 71 | def remove_child_handler(self, pid): 72 | try: 73 | del self._callbacks[pid] 74 | return True 75 | except KeyError: 76 | return False 77 | 78 | def check_processes(self): 79 | # Checks to see if any processes terminated, and triggers onNoProcesses 80 | self._do_waitpid_all() 81 | 82 | def _do_waitpid_all(self): 83 | # Because of signal coalescing, we must keep calling waitpid() as 84 | # long as we're able to reap a child. 85 | while True: 86 | try: 87 | pid, status = os.waitpid(-1, os.WNOHANG) 88 | debug("REAP pid={0},status={1}".format(pid,status)) 89 | except ChildProcessError: 90 | # No more child processes exist. 91 | if self._had_children: 92 | debug("no child processes present") 93 | self.events.onNoProcesses() 94 | return 95 | else: 96 | self._had_children = True 97 | if pid == 0: 98 | # A child process is still alive. 99 | return 100 | 101 | returncode = ProcStatus(status) 102 | 103 | with self._lock: 104 | try: 105 | callback, args = self._callbacks.pop(pid) 106 | except KeyError: 107 | # unknown child 108 | if self._forks: 109 | # It may not be registered yet. 110 | self._zombies[pid] = returncode 111 | continue 112 | callback = None 113 | 114 | if callback is None: 115 | info( 116 | "Caught subprocess termination from unknown pid: " 117 | "%d -> %d", pid, returncode) 118 | else: 119 | callback(pid, returncode, *args) 120 | -------------------------------------------------------------------------------- /sandbox/distserv/etc/apache2.conf: -------------------------------------------------------------------------------- 1 | # This is the main Apache server configuration file. It contains the 2 | # configuration directives that give the server its instructions. 3 | # See http://httpd.apache.org/docs/2.4/ for detailed information about 4 | # the directives and /usr/share/doc/apache2/README.Debian about Debian specific 5 | # hints. 6 | 7 | # This is a CHAPERONE-specific configuration designed to keep things lean. It is based loosely 8 | # on Ubuntu 14.04 /etc/apache2/apache2.conf, and every attempt has been made to assure that 9 | # system-installed modules and configurations will work. 10 | 11 | # The chaperone configuration is designed to work within a self-contained application directory 12 | # defined by APPS_DIR. Note that it may be a user directory, and thus chaperone allows 13 | # Apache to run entirely under any user account, along with a MySQL server that is also 14 | # sequestered in the same way. This means that you can have containers "point" to apps 15 | # directories on your host server and manage per-container resources consistently in 16 | # those directories during development, until you move the entire apps directory into 17 | # a production container environment or image. 18 | 19 | # 20 | # The accept serialization lock file MUST BE STORED ON A LOCAL DISK. 21 | # 22 | Mutex file:${APACHE_LOCK_DIR} default 23 | 24 | PidFile ${APACHE_PID_FILE} 25 | 26 | # Timeout: The number of seconds before receives and sends time out. 27 | Timeout 300 28 | KeepAlive On 29 | MaxKeepAliveRequests 100 30 | KeepAliveTimeout 5 31 | 32 | # Note that the user and group are defined in chaperone.d/120-apache.conf 33 | #User ${APACHE_RUN_USER} 34 | #Group ${APACHE_RUN_GROUP} 35 | 36 | # The default is off because it'd be overall better for the net if people 37 | # had to knowingly turn this feature on, since enabling it means that 38 | # each client request will result in AT LEAST one lookup request to the 39 | # nameserver. 40 | HostnameLookups Off 41 | 42 | # ErrorLog: The location of the error log file. 43 | # We dump errors to syslog so that we can easily duplicate it to the container stderr if we want. 44 | ErrorLog syslog:local1 45 | 46 | # Available values: trace8, ..., trace1, debug, info, notice, warn, 47 | # error, crit, alert, emerg. 48 | LogLevel warn 49 | 50 | # Include standard Debian/Ubuntu module configuration: 51 | Include /etc/apache2/mods-enabled/*.load 52 | Include /etc/apache2/mods-enabled/*.conf 53 | 54 | # CHAPERONE: Override to listen on 8080 and 8443 55 | Listen 8080 56 | 57 | 58 | Listen 8443 59 | 60 | 61 | Listen 8443 62 | 63 | 64 | # Sets the default security model of the Apache2 HTTPD server. It does 65 | # not allow access to the root filesystem outside of /usr/share and /var/www. 66 | # The former is used by web applications packaged in Debian, 67 | # the latter may be used for local directories served by the web server. If 68 | # your system is serving content from a sub-directory in /srv you must allow 69 | # access here, or in any related virtual host. 70 | 71 | Options FollowSymLinks 72 | AllowOverride None 73 | Require all denied 74 | 75 | 76 | 77 | AllowOverride None 78 | Require all granted 79 | 80 | 81 | DocumentRoot ${CHAPERONE_ROOT} 82 | 83 | 84 | Options Indexes FollowSymLinks 85 | AllowOverride None 86 | Require all granted 87 | 88 | 89 | AccessFileName .htaccess 90 | 91 | # The following lines prevent .htaccess and .htpasswd files from being 92 | # viewed by Web clients. 93 | 94 | Require all denied 95 | 96 | 97 | 98 | # The following directives define some format nicknames for use with 99 | # a CustomLog directive. 100 | LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined 101 | LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined 102 | LogFormat "%h %l %u %t \"%r\" %>s %O" common 103 | LogFormat "%{Referer}i -> %U" referer 104 | LogFormat "%{User-agent}i" agent 105 | 106 | # Include of directories ignores editors' and dpkg's backup files, 107 | # see README.Debian for details. 108 | 109 | # Include generic snippets of statements 110 | IncludeOptional /etc/apache2/conf-enabled/*.conf 111 | -------------------------------------------------------------------------------- /tests/bin/daemonutil.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Generic linux daemon base class for python 3.x. 4 | 5 | From: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/ 6 | 7 | Thank you! 8 | 9 | """ 10 | 11 | import sys, os, time, atexit, signal 12 | 13 | class Daemon: 14 | """A generic daemon class. 15 | 16 | Usage: subclass the daemon class and override the run() method.""" 17 | 18 | def __init__(self, pidfile = None): 19 | self.pidfile = pidfile 20 | 21 | def daemonize(self, exitwith = 0): 22 | """Deamonize class. UNIX double fork mechanism.""" 23 | 24 | sys.stdout.flush() 25 | sys.stderr.flush() 26 | 27 | try: 28 | pid = os.fork() 29 | if pid > 0: 30 | # exit first parent 31 | sys.exit(exitwith) 32 | except OSError as err: 33 | sys.stderr.write('fork #1 failed: {0}\n'.format(err)) 34 | sys.exit(1) 35 | 36 | # decouple from parent environment 37 | os.chdir('/') 38 | os.setsid() 39 | os.umask(0) 40 | 41 | # do second fork 42 | try: 43 | pid = os.fork() 44 | if pid > 0: 45 | 46 | # exit from second parent 47 | sys.exit(0) 48 | except OSError as err: 49 | sys.stderr.write('fork #2 failed: {0}\n'.format(err)) 50 | sys.exit(1) 51 | 52 | # redirect standard file descriptors 53 | sys.stdout.flush() 54 | sys.stderr.flush() 55 | si = open(os.devnull, 'r') 56 | so = open(os.devnull, 'a+') 57 | se = open(os.devnull, 'a+') 58 | 59 | os.dup2(si.fileno(), sys.stdin.fileno()) 60 | os.dup2(so.fileno(), sys.stdout.fileno()) 61 | os.dup2(se.fileno(), sys.stderr.fileno()) 62 | 63 | # write pidfile 64 | if self.pidfile: 65 | atexit.register(self.delpid) 66 | 67 | pid = str(os.getpid()) 68 | with open(self.pidfile,'w+') as f: 69 | f.write(pid + '\n') 70 | 71 | def delpid(self): 72 | os.remove(self.pidfile) 73 | 74 | def start(self, exitwith = 0): 75 | """Start the daemon.""" 76 | 77 | # Check for a pidfile to see if the daemon already runs 78 | if self.pidfile: 79 | try: 80 | with open(self.pidfile,'r') as pf: 81 | 82 | pid = int(pf.read().strip()) 83 | except IOError: 84 | pid = None 85 | 86 | if pid: 87 | message = "pidfile {0} already exist. " + \ 88 | "Daemon already running?\n" 89 | sys.stderr.write(message.format(self.pidfile)) 90 | sys.exit(1) 91 | 92 | # Start the daemon 93 | self.daemonize(exitwith) 94 | self.run() 95 | 96 | def stop(self): 97 | """Stop the daemon.""" 98 | 99 | assert self.pidfile, "Requires pidfile to use stop()" 100 | 101 | # Get the pid from the pidfile 102 | try: 103 | with open(self.pidfile,'r') as pf: 104 | pid = int(pf.read().strip()) 105 | except IOError: 106 | pid = None 107 | 108 | if not pid: 109 | message = "pidfile {0} does not exist. " + \ 110 | "Daemon not running?\n" 111 | sys.stderr.write(message.format(self.pidfile)) 112 | return # not an error in a restart 113 | 114 | # Try killing the daemon process 115 | try: 116 | while 1: 117 | os.kill(pid, signal.SIGTERM) 118 | time.sleep(0.1) 119 | except OSError as err: 120 | e = str(err.args) 121 | if e.find("No such process") > 0: 122 | if os.path.exists(self.pidfile): 123 | os.remove(self.pidfile) 124 | else: 125 | print (str(err.args)) 126 | sys.exit(1) 127 | 128 | def restart(self): 129 | """Restart the daemon.""" 130 | self.stop() 131 | self.start() 132 | 133 | def run(self): 134 | """You should override this method when you subclass Daemon. 135 | 136 | It will be called after the process has been daemonized by 137 | start() or restart().""" 138 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # ![](https://s.gravatar.com/avatar/62c4c783c4d7233c73f3a114578df650.jpg?s=50) Chaperone 3 | 4 | [![Gitter](https://badges.gitter.im/Join_Chat.svg)](https://gitter.im/garywiz/chaperone?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![PyPI version](https://badge.fury.io/py/chaperone.svg)](https://badge.fury.io/py/chaperone) 5 | 6 | Chaperone is a lean init-style startup manager for Docker-like containers. It runs as a single lightweight full-featured process which runs at the root of a docker container tree and provides all of the following functionality, plus much more: 7 | 8 | * Monitoring for all processes in the container, automatically shutting down the 9 | container when the last process exits. 10 | * A complete, configurable syslog facility built in and provided on /dev/log 11 | so daemons and other services can have output captured. Configurable 12 | to handle log-file rotation, duplication to stdout/stderr, and full Linux 13 | logging facility, severity support. No syslog daemon is required in your 14 | container. 15 | * The ability to start up system services in dependency order, with options 16 | for per-service environment variables, restart options, and stdout/stderr capture either 17 | to the log service or stdout. 18 | * A built-in cron scheduling service. 19 | * Emulation of systemd notifications (sd_notify) so services can post 20 | ready and status notifications to chaperone. 21 | * Process monitoring and zombie elimination, along with organized system 22 | shutdown to assure all daemons shut-down gracefully. 23 | * The ability to have an optional controlling process, specified on the 24 | docker command line, to simplify creating containers which have development 25 | mode vs. production mode. 26 | * Complete configuration using a ``chaperone.d`` directory which can be located 27 | in various places, and even allows different configurations 28 | within the container, triggered based upon which user is selected at start-up. 29 | * Default behavior designed out-of-the-box to work with simple Docker containers 30 | for quick start-up for lean containers. 31 | * More... 32 | 33 | If you want to try it out quickly, the best place to start is on the 34 | [chaperone-docker](https://github.com/garywiz/chaperone-docker) repository 35 | page. There is a quick section called "Try it out" that uses images 36 | available now on Docker Hub. 37 | 38 | For full details of features 39 | and usage: [see the documentation](http://garywiz.github.io/chaperone/index.html). 40 | 41 | There is some debate about whether docker containers should be transformed into 42 | complete systems (so-called "fat containers"). However, it is clear that many 43 | containers contain one or more services to provide a single "composite feature", 44 | but that such containers need a special, more streamlined approach to managing 45 | a number of common daemons. 46 | 47 | Chaperone is the best answer I've come up with so far, and was inspired by 48 | The [Phusion baseimage-docker](http://phusion.github.io/baseimage-docker/) approach. 49 | However, unlike the Phusion image, it does not require adding daemons for logging, 50 | system services (such as runit). Chaperone is designed to be self-contained. 51 | 52 | Status 53 | ------ 54 | 55 | Chaperone is now stable and ready for production. If you are currently starting up your 56 | container services with Bash scripts, Chaperone is probably a much better choice. 57 | 58 | Full status is [now part of the documentation](http://garywiz.github.io/chaperone/status.html). 59 | 60 | Downloading and Installing 61 | -------------------------- 62 | 63 | The easiest way to install `chaperone`` is using ``pip`` from the https://pypi.python.org/pypi/chaperone package: 64 | 65 | # Ubuntu or debian prerequisites... 66 | apt-get install python3-pip 67 | 68 | # chaperone installation (may be all you need) 69 | pip3 install chaperone 70 | 71 | License 72 | ------- 73 | 74 | Copyright (c) 2015, Gary J. Wisniewski 75 | 76 | Licensed under the Apache License, Version 2.0 (the "License"); 77 | you may not use this file except in compliance with the License. 78 | You may obtain a copy of the License at 79 | 80 | http://www.apache.org/licenses/LICENSE-2.0 81 | 82 | Unless required by applicable law or agreed to in writing, software 83 | distributed under the License is distributed on an "AS IS" BASIS, 84 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 85 | See the License for the specific language governing permissions and 86 | limitations under the License. 87 | -------------------------------------------------------------------------------- /samples/chaperone-lamp/apps/etc/apache2.conf: -------------------------------------------------------------------------------- 1 | # This is the main Apache server configuration file. It contains the 2 | # configuration directives that give the server its instructions. 3 | # See http://httpd.apache.org/docs/2.4/ for detailed information about 4 | # the directives and /usr/share/doc/apache2/README.Debian about Debian specific 5 | # hints. 6 | 7 | # This is a CHAPERONE-specific configuration designed to keep things lean. It is based loosely 8 | # on Ubuntu 14.04 /etc/apache2/apache2.conf, and every attempt has been made to assure that 9 | # system-installed modules and configurations will work. 10 | 11 | # The chaperone configuration is designed to work within a self-contained application directory 12 | # defined by APPS_DIR. Note that it may be a user directory, and thus chaperone allows 13 | # Apache to run entirely under any user account, along with a MySQL server that is also 14 | # sequestered in the same way. This means that you can have containers "point" to apps 15 | # directories on your host server and manage per-container resources consistently in 16 | # those directories during development, until you move the entire apps directory into 17 | # a production container environment or image. 18 | 19 | # 20 | # The accept serialization lock file MUST BE STORED ON A LOCAL DISK. 21 | # 22 | Mutex file:${APACHE_LOCK_DIR} default 23 | 24 | PidFile ${APACHE_PID_FILE} 25 | 26 | # Timeout: The number of seconds before receives and sends time out. 27 | Timeout 300 28 | KeepAlive On 29 | MaxKeepAliveRequests 100 30 | KeepAliveTimeout 5 31 | 32 | # Note that the user and group are defined in chaperone.d/120-apache.conf 33 | #User ${APACHE_RUN_USER} 34 | #Group ${APACHE_RUN_GROUP} 35 | 36 | # The default is off because it'd be overall better for the net if people 37 | # had to knowingly turn this feature on, since enabling it means that 38 | # each client request will result in AT LEAST one lookup request to the 39 | # nameserver. 40 | HostnameLookups Off 41 | 42 | # ErrorLog: The location of the error log file. 43 | # We dump errors to syslog so that we can easily duplicate it to the container stderr if we want. 44 | ErrorLog syslog:local1 45 | 46 | # Available values: trace8, ..., trace1, debug, info, notice, warn, 47 | # error, crit, alert, emerg. 48 | LogLevel warn 49 | 50 | # Include standard Debian/Ubuntu module configuration: 51 | Include /etc/apache2/mods-enabled/*.load 52 | Include /etc/apache2/mods-enabled/*.conf 53 | 54 | # CHAPERONE: Override to listen on 8080 and 8443 55 | Listen 8080 56 | 57 | 58 | Listen 8443 59 | 60 | 61 | Listen 8443 62 | 63 | 64 | # Sets the default security model of the Apache2 HTTPD server. It does 65 | # not allow access to the root filesystem outside of /usr/share and /var/www. 66 | # The former is used by web applications packaged in Debian, 67 | # the latter may be used for local directories served by the web server. If 68 | # your system is serving content from a sub-directory in /srv you must allow 69 | # access here, or in any related virtual host. 70 | 71 | Options FollowSymLinks 72 | AllowOverride None 73 | Require all denied 74 | 75 | 76 | 77 | AllowOverride None 78 | Require all granted 79 | 80 | 81 | 82 | Options Indexes FollowSymLinks 83 | AllowOverride None 84 | Require all granted 85 | 86 | 87 | AccessFileName .htaccess 88 | 89 | # The following lines prevent .htaccess and .htpasswd files from being 90 | # viewed by Web clients. 91 | 92 | Require all denied 93 | 94 | 95 | 96 | # The following directives define some format nicknames for use with 97 | # a CustomLog directive. 98 | LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined 99 | LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined 100 | LogFormat "%h %l %u %t \"%r\" %>s %O" common 101 | LogFormat "%{Referer}i -> %U" referer 102 | LogFormat "%{User-agent}i" agent 103 | 104 | # Include of directories ignores editors' and dpkg's backup files, 105 | # see README.Debian for details. 106 | 107 | # Include generic snippets of statements 108 | IncludeOptional /etc/apache2/conf-enabled/*.conf 109 | 110 | ## 111 | ## CHAPERONE SPECIFICS 112 | ## 113 | 114 | # Apache configuration files for chaperone sites (Note that we do NOT look in /etc/apache2/sites-enabled) 115 | IncludeOptional ${APACHE_SITES_DIR}/sites.d/*.conf 116 | 117 | # Point MySQL socket to the right spot 118 | php_admin_value mysql.default_socket ${APPS_DIR}/var/run/mysqld.sock 119 | php_admin_value mysqli.default_socket ${APPS_DIR}/var/run/mysqld.sock 120 | -------------------------------------------------------------------------------- /doc/source/guide/chap-docker-simple.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _chap.example-docker: 3 | 4 | A Simple Docker Example 5 | ======================= 6 | 7 | The following example creates a simple Docker container running an Apache daemon and an SSH server, both 8 | managed by Chaperone. 9 | 10 | In this example, we'll use Chaperone to run both processes as ``root``, configured to work exactly 11 | as they were configured in the Ubuntu distribution. This example is based upon a 12 | `similar example from docker.com `_ which 13 | uses `Supervisor `_ as it's process manager. Chaperone provides a far 14 | more powerful featureset than 'supervisor' with a much smaller container footprint. 15 | 16 | Creating a Dockerfile 17 | --------------------- 18 | 19 | We'll start by creating a basic ``Dockerfile`` for our new image:: 20 | 21 | FROM ubuntu:14.04 22 | MAINTAINER garyw@blueseastech.com 23 | 24 | Now, we can install ``openssh-server``, ``apache2``, and ``python3-pip``, then use 25 | ``pip3`` to install Chaperone itself. We also need to create a few directories 26 | that will be needed by the installed software:: 27 | 28 | RUN apt-get update && \ 29 | apt-get install -y openssh-server apache2 python3-pip && \ 30 | pip3 install chaperone 31 | RUN mkdir -p /var/lock/apache2 /var/run/apache2 /var/run/sshd /etc/chaperone.d 32 | 33 | Adding Chaperone's Configuration File 34 | ------------------------------------- 35 | 36 | Now, let's add a configuration file for Chaperone. Chaperone looks in 37 | ``/etc/chaperone.d`` by default and will read any configuration files it finds there. 38 | So, we'll copy our single configuration there so Chaperone reads it upon startup:: 39 | 40 | COPY chaperone.conf /etc/chaperone.d/chaperone.conf 41 | 42 | Let's take a look at what's inside ``chaperone.conf``:: 43 | 44 | sshd.service: { 45 | command: "/usr/sbin/sshd -D" 46 | } 47 | 48 | apache2.service: { 49 | command: "bash -c 'source /etc/apache2/envvars && exec /usr/sbin/apache2 -DFOREGROUND'", 50 | } 51 | 52 | console.logging: { 53 | selector: '*.warn', 54 | stdout: true, 55 | } 56 | 57 | The above is a complete configuration file with three sections. the first two start up 58 | both ``sshd`` and ``apache2``. The third section tells Chaperone to intercept all ``syslog`` 59 | messages and redirect them to ``stdout``. That way, we'll be able to use the ``docker logs`` 60 | command to inspect the status of the running container. 61 | 62 | The above is really a simple configuration, but you can use the complete :ref:`set of service directives ` 63 | to control how each service behaves. 64 | 65 | Exposing Ports and Running Chaperone 66 | ------------------------------------ 67 | 68 | Let's finish our ``Dockerfile`` by exposing some required ports and specifying Chaperone 69 | as the ``ENTRYPOINT`` so that Chaperone will start first and manage our container:: 70 | 71 | EXPOSE 22 80 72 | ENTRYPOINT ["/usr/local/bin/chaperone"] 73 | 74 | Here, we've exposed ports 22 and 80 on the container and we're running the 75 | ``/usr/local/bin/chaperone`` binary when the container launches. 76 | 77 | Building the Image 78 | ------------------ 79 | 80 | We can now build our new image:: 81 | 82 | $ docker build -t /chap-sample . 83 | 84 | Running the Container 85 | --------------------- 86 | 87 | Once you've built an image, you can launch a container from it:: 88 | 89 | $ docker run -p 22 -p 80 -t -i /chap-sample 90 | 91 | Jul 21 04:08:19 6d3e4eee4265 apache2[6]: AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 172.17.0.90. Set the 'ServerName' directive globally to suppress this message 92 | 93 | And when you want to stop it, just use ``Ctrl-C``:: 94 | 95 | C-c C-c^C 96 | Ctrl-C ... killing chaperone. 97 | Jul 21 04:08:23 6d3e4eee4265 chaperone[1]: Request made to kill system. (forced) 98 | Jul 21 04:08:23 6d3e4eee4265 chaperone[1]: sshd.service terminated abnormally with 99 | 100 | What's Next? 101 | ------------ 102 | 103 | You can build upon the above simple sample if you want. That gives you maximum flexibility to design 104 | your container service environmetn exactly as you want. If so, we recommend you scan the 105 | :ref:`reference` section so you know what features are available. 106 | 107 | If you want, you can also use the complete set of pre-built Chaperone images 108 | `available here on Docker Hub `_. These images 109 | are excellent examples of complete Chaperone-managed development and production environments. 110 | You can learn more by reading the introduction to these 111 | images `on their GitHub page `_. 112 | -------------------------------------------------------------------------------- /doc/docserver/etc/apache2.conf: -------------------------------------------------------------------------------- 1 | # This is the main Apache server configuration file. It contains the 2 | # configuration directives that give the server its instructions. 3 | # See http://httpd.apache.org/docs/2.4/ for detailed information about 4 | # the directives and /usr/share/doc/apache2/README.Debian about Debian specific 5 | # hints. 6 | 7 | # This is a CHAPERONE-specific configuration designed to keep things lean. It is based loosely 8 | # on Ubuntu 14.04 /etc/apache2/apache2.conf, and every attempt has been made to assure that 9 | # system-installed modules and configurations will work. 10 | 11 | # The chaperone configuration is designed to work within a self-contained application directory 12 | # defined by APPS_DIR. Note that it may be a user directory, and thus chaperone allows 13 | # Apache to run entirely under any user account, along with a MySQL server that is also 14 | # sequestered in the same way. This means that you can have containers "point" to apps 15 | # directories on your host server and manage per-container resources consistently in 16 | # those directories during development, until you move the entire apps directory into 17 | # a production container environment or image. 18 | 19 | # 20 | # The accept serialization lock file MUST BE STORED ON A LOCAL DISK. 21 | # 22 | Mutex file:${APACHE_LOCK_DIR} default 23 | 24 | PidFile ${APACHE_PID_FILE} 25 | 26 | # Timeout: The number of seconds before receives and sends time out. 27 | Timeout 300 28 | KeepAlive On 29 | MaxKeepAliveRequests 100 30 | KeepAliveTimeout 5 31 | 32 | # Note that the user and group are defined in chaperone.d/120-apache.conf 33 | #User ${APACHE_RUN_USER} 34 | #Group ${APACHE_RUN_GROUP} 35 | 36 | # The default is off because it'd be overall better for the net if people 37 | # had to knowingly turn this feature on, since enabling it means that 38 | # each client request will result in AT LEAST one lookup request to the 39 | # nameserver. 40 | HostnameLookups Off 41 | 42 | # ErrorLog: The location of the error log file. 43 | # We dump errors to syslog so that we can easily duplicate it to the container stderr if we want. 44 | ErrorLog syslog:local1 45 | 46 | # Available values: trace8, ..., trace1, debug, info, notice, warn, 47 | # error, crit, alert, emerg. 48 | LogLevel warn 49 | 50 | # Include standard Debian/Ubuntu module configuration: 51 | Include /etc/apache2/mods-enabled/*.load 52 | Include /etc/apache2/mods-enabled/*.conf 53 | 54 | # CHAPERONE: Override to listen on 8080 and 8443 55 | Listen 8080 56 | 57 | 58 | Listen 8443 59 | 60 | 61 | Listen 8443 62 | 63 | 64 | # Sets the default security model of the Apache2 HTTPD server. It does 65 | # not allow access to the root filesystem outside of /usr/share and /var/www. 66 | # The former is used by web applications packaged in Debian, 67 | # the latter may be used for local directories served by the web server. If 68 | # your system is serving content from a sub-directory in /srv you must allow 69 | # access here, or in any related virtual host. 70 | 71 | Options FollowSymLinks 72 | AllowOverride None 73 | Require all denied 74 | 75 | 76 | 77 | AllowOverride None 78 | Require all granted 79 | 80 | 81 | 82 | Options Indexes FollowSymLinks 83 | AllowOverride None 84 | Require all granted 85 | 86 | 87 | AccessFileName .htaccess 88 | 89 | # The following lines prevent .htaccess and .htpasswd files from being 90 | # viewed by Web clients. 91 | 92 | Require all denied 93 | 94 | 95 | 96 | # The following directives define some format nicknames for use with 97 | # a CustomLog directive. 98 | LogFormat "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined 99 | LogFormat "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined 100 | LogFormat "%h %l %u %t \"%r\" %>s %O" common 101 | LogFormat "%{Referer}i -> %U" referer 102 | LogFormat "%{User-agent}i" agent 103 | 104 | # Include of directories ignores editors' and dpkg's backup files, 105 | # see README.Debian for details. 106 | 107 | # Include generic snippets of statements 108 | IncludeOptional /etc/apache2/conf-enabled/*.conf 109 | 110 | ## 111 | ## CHAPERONE SPECIFICS 112 | ## 113 | 114 | # Point MySQL socket to the right spot 115 | #php_admin_value mysql.default_socket ${MYSQL_UNIX_PORT} 116 | #php_admin_value mysqli.default_socket ${MYSQL_UNIX_PORT} 117 | 118 | # Sit definition added here 119 | 120 | 121 | 122 | # The ServerName directive sets the request scheme, hostname and port that 123 | # the server uses to identify itself. 124 | #ServerName www.example.com 125 | 126 | ServerAdmin webmaster@localhost 127 | DocumentRoot ${APPS_DIR}/../build/html 128 | 129 | # Errors go to the syslog so they can be duplicated to the console easily 130 | ErrorLog syslog:local1 131 | CustomLog ${APACHE_LOG_DIR}/default-access.log combined 132 | 133 | 134 | -------------------------------------------------------------------------------- /doc/source/guide/chap-docker-smaller.rst: -------------------------------------------------------------------------------- 1 | 2 | .. _chap.small-docker: 3 | 4 | Creating Small Docker Images 5 | ============================ 6 | 7 | The default official Docker images are not always very compact. For example, the official Ubuntu image 8 | is about 180MB, and the official Java image is a whopping 810MB! 9 | 10 | This is made worse by some distributions (like Ubuntu and Debian) which have defaults which don't cater 11 | to small image sizes and prefer to assure that things you *might* need are installed. So, for example, 12 | installing Python's package manager ``pip`` will cause about 200MB of extra packages to be installed just 13 | "in case" some package requires the full compiler toolchain (which most Python packages, including Chaperone, do not). 14 | 15 | Chaperone, including all its dependences, need take up no more than 35-40MB maximum, including Python3. 16 | 17 | So, here is a quick guide to creating small Chaperone packages with a minimum of effort. 18 | 19 | 20 | Eliminating Ubuntu/Debian Recommended Packages 21 | ---------------------------------------------- 22 | 23 | The simplest thing you can do when installing packages under Ubuntu or Debian is use the ``--no-install-recommends`` switch 24 | when you run ``apt-get``. For example, the :ref:`Simple Docker Example ` section recommends you install Chaperone, Apache and SSH like this:: 25 | 26 | RUN apt-get update && \ 27 | apt-get install -y openssh-server apache2 python3-pip && \ 28 | pip3 install chaperone 29 | RUN mkdir -p /var/lock/apache2 /var/run/apache2 /var/run/sshd /etc/chaperone.d 30 | 31 | If you do, you end up with a docker image which is 451MB:: 32 | 33 | $ docker images 34 | REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE 35 | sample-simple latest 328d42703323 34 minutes ago 451.8 MB 36 | $ 37 | 38 | However, if you change the install commands to:: 39 | 40 | RUN apt-get update && \ 41 | apt-get install -y --no-install-recommends openssh-server apache2 python3-pip && \ 42 | pip3 install chaperone 43 | 44 | The functionally equivalent image is only 242MB:: 45 | 46 | sample-simple latest 8839acc1e4ef 24 minutes ago 242 MB 47 | 48 | A Small Ubuntu Base Image with Chaperone 49 | ---------------------------------------- 50 | 51 | The sample image above contains both SSH as well as Apache. However, let's assume that you want 52 | to create the simplest Chaperone base image possible. Here is the ``Dockerfile`` to start with:: 53 | 54 | FROM ubuntu:14.04 55 | RUN apt-get update && \ 56 | apt-get install -y --no-install-recommends python3-pip && \ 57 | pip3 install chaperone 58 | RUN mkdir -p /etc/chaperone.d 59 | COPY chaperone.conf /etc/chaperone.d/chaperone.conf 60 | ENTRYPOINT ["/usr/local/bin/chaperone"] 61 | 62 | The following ``chaperone.conf`` can serve as your starting point:: 63 | 64 | your.service: { 65 | command: "logger -p warn 'Replace this with your service'", 66 | } 67 | 68 | console.logging: { 69 | selector: '*.warn', 70 | stdout: true, 71 | } 72 | 73 | If you build the above image, it will be just 226MB, only 38MB larger than the Ubuntu image:: 74 | 75 | $ docker images 76 | REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE 77 | base-ubuntu latest 182521cfa43e About an hour ago 226 MB 78 | 79 | 80 | A 53MB Alpine Image with Chaperone 81 | ---------------------------------- 82 | 83 | If you really care about keeping your images as minimal as possible, consider using 84 | `Alpine Linux `_ as your base image. Alpine is a simple, 85 | stripped down distribution that is ideal for creating lean, mean containers. 86 | 87 | Here's a ``Dockerfile`` that will create small Alpine Linux image, complete with both 88 | Chaperone as well as Python3:: 89 | 90 | FROM alpine:3.2 91 | RUN apk add --update python3 && pip3 install chaperone 92 | RUN mkdir -p /etc/chaperone.d 93 | COPY chaperone.conf /etc/chaperone.d/chaperone.conf 94 | ENTRYPOINT ["/usr/bin/chaperone"] 95 | 96 | The resulting image is less than 53MB:: 97 | 98 | $ docker images 99 | REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE 100 | base-alpine latest 1c9d85d9bb67 About an hour ago 52.59 MB 101 | 102 | 103 | Pre-Built Images 104 | ---------------- 105 | 106 | When building our official Chaperone base images (`located here on Docker Hub `_), 107 | we used the techniques above to create versatile images with reasonably sophisticated start-ups. They may be 108 | overkill for most applications, but they may also serve as good configuration examples. 109 | 110 | Notably, the `chaperone-alpinejava `_ image is a good 111 | example of what's possible. It contains a complete Oracle 8 production environment, Python 3, Chaperone, and 112 | it's a remarkably small 216MB! 113 | 114 | Hopefully the above information is a useful way to get started at streamlining images. 115 | 116 | 117 | -------------------------------------------------------------------------------- /chaperone/cproc/pt/inetd.py: -------------------------------------------------------------------------------- 1 | import os 2 | import asyncio 3 | from copy import copy 4 | from chaperone.cutil.logging import error, warn, debug, info 5 | from chaperone.cproc.subproc import SubProcess 6 | from chaperone.cutil.syslog_info import LOG_DAEMON 7 | from chaperone.cutil.errors import ChParameterError 8 | from chaperone.cutil.servers import Server, ServerProtocol 9 | 10 | class InetdServiceProtocol(ServerProtocol): 11 | 12 | _fd = None 13 | 14 | def acquire_socket(self, sock): 15 | # Prepare the socket so it's inheritable 16 | sock.setblocking(True) 17 | self._fd = sock.detach() 18 | sock.close() 19 | 20 | future = asyncio.async(self.start_socket_process(self._fd)) 21 | future.add_done_callback(self._done) 22 | 23 | self.process.counter += 1 24 | 25 | return True 26 | 27 | def _done(self, f): 28 | # Close the socket regardless 29 | if self._fd is not None: 30 | os.close(self._fd) 31 | 32 | @asyncio.coroutine 33 | def start_socket_process(self, fd): 34 | process = self.process 35 | service = process.service 36 | 37 | if not process.family.system_alive: 38 | process.logdebug("{0} received connection on port {1}; ignored, system no longer alive".format(service.name, service.port)) 39 | return 40 | 41 | process.logdebug("{0} received connection on port {2}; attempting start '{1}'... ".format(service.name, " ".join(service.exec_args), 42 | service.port)) 43 | 44 | kwargs = {'stdout': fd, 45 | 'stderr': fd, 46 | 'stdin': fd} 47 | 48 | if service.directory: 49 | kwargs['cwd'] = service.directory 50 | 51 | env = process.get_expanded_environment().get_public_environment() 52 | 53 | if service.debug: 54 | if not env: 55 | process.logdebug("{0} environment is empty", service.name) 56 | else: 57 | process.logdebug("{0} environment:", service.name) 58 | for k,v in env.items(): 59 | process.logdebug(" {0} = '{1}'".format(k,v)) 60 | 61 | create = asyncio.create_subprocess_exec(*service.exec_args, preexec_fn=process._setup_subprocess, 62 | env=env, **kwargs) 63 | 64 | proc = self._proc = yield from create 65 | self.pid = proc.pid 66 | 67 | process.logdebug("{0} instance connected to port {1}", service.name, service.port) 68 | 69 | process.add_process(proc) 70 | yield from proc.wait() 71 | process.remove_process(proc) 72 | 73 | if not proc.returncode.normal_exit: 74 | self.logerror("{2} exit status for pid={0} is '{1}'".format(proc.pid, proc.returncode, service.name)) 75 | 76 | 77 | class InetdService(Server): 78 | 79 | def __init__(self, process): 80 | super().__init__() 81 | self.process = process 82 | 83 | def _create_server(self): 84 | return asyncio.get_event_loop().create_server(InetdServiceProtocol.buildProtocol(self, process=self.process), 85 | '0.0.0.0', 86 | self.process.port) 87 | 88 | class InetdProcess(SubProcess): 89 | 90 | syslog_facility = LOG_DAEMON 91 | server = None 92 | counter = 0 93 | 94 | def __init__(self, service, family=None): 95 | super().__init__(service, family) 96 | self._proclist = set() 97 | 98 | if not service.port: 99 | raise ChParameterError("inetd-type service {0} requires 'port=' parameter".format(self.name)) 100 | 101 | def add_process(self, proc): 102 | self._proclist.add(proc) 103 | 104 | def remove_process(self, proc): 105 | self._proclist.discard(proc) 106 | 107 | @property 108 | def scheduled(self): 109 | return self.server is not None 110 | 111 | @property 112 | def note(self): 113 | if self.server: 114 | msg = "waiting on port " + str(self.port) 115 | if self.counter: 116 | msg += "; req recvd = " + str(self.counter) 117 | if len(self._proclist): 118 | msg += "; running = " + str(len(self._proclist)) 119 | return msg 120 | 121 | @asyncio.coroutine 122 | def start_subprocess(self): 123 | """ 124 | Takes over process startup and sets up our own server socket. 125 | """ 126 | 127 | self.server = InetdService(self) 128 | yield from self.server.run() 129 | 130 | self.loginfo("inetd service {0} listening on port {1}".format(self.name, self.port)) 131 | 132 | @asyncio.coroutine 133 | def reset(self, dependents = False, enable = False, restarts_ok = False): 134 | if self.server: 135 | self.server.close() 136 | self.server = None 137 | plist = copy(self._proclist) 138 | if plist: 139 | self.logwarn("{0} terminating {1} processes on port {2} that are still running".format(self.name, len(plist), self.port)) 140 | for p in plist: 141 | p.terminate() 142 | yield from super().reset(dependents, enable, restarts_ok) 143 | 144 | @asyncio.coroutine 145 | def final_stop(self): 146 | yield from self.reset() 147 | -------------------------------------------------------------------------------- /chaperone/cproc/pt/notify.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import socket 3 | import re 4 | from functools import partial 5 | 6 | from chaperone.cutil.errors import ChProcessError 7 | from chaperone.cutil.proc import ProcStatus 8 | from chaperone.cutil.notify import NotifyListener 9 | from chaperone.cproc.subproc import SubProcess 10 | 11 | class NotifyProcess(SubProcess): 12 | 13 | process_timeout = 300 14 | defer_exit_kills = True 15 | 16 | _fut_monitor = None 17 | _listener = None 18 | _ready_event = None 19 | 20 | def _close_listener(self): 21 | if self._listener: 22 | self._listener.close() 23 | self._listener = None 24 | 25 | @asyncio.coroutine 26 | def process_prepare_co(self, environ): 27 | if not self._listener: 28 | self._listener = NotifyListener('@/chaperone/' + self.service.name, 29 | onNotify = self._notify_received) 30 | yield from self._listener.run() 31 | 32 | environ['NOTIFY_SOCKET'] = self._listener.socket_name 33 | 34 | # Now, set up an event which is triggered upon ready 35 | self._ready_event = asyncio.Event() 36 | 37 | def _notify_timeout(self): 38 | service = self.service 39 | message = "notify service '{1}' did not receive ready notification after {2} second(s), {3}".format( 40 | service.type, 41 | service.name, self.process_timeout, 42 | "proceeding due to 'ignore_failures=True'" if service.ignore_failures else 43 | "terminating due to 'ignore_failures=False'") 44 | if not service.ignore_failures: 45 | self.terminate() 46 | raise ChProcessError(message) 47 | 48 | @asyncio.coroutine 49 | def reset(self, dependents = False, enable = False, restarts_ok = False): 50 | yield from super().reset(dependents, enable, restarts_ok) 51 | self._close_listener() 52 | 53 | @asyncio.coroutine 54 | def final_stop(self): 55 | yield from super().final_stop() 56 | self._close_listener() 57 | 58 | @asyncio.coroutine 59 | def process_started_co(self): 60 | if self._fut_monitor and not self._fut_monitor.cancelled(): 61 | self._fut_monitor.cancel() 62 | self._fut_monitor = None 63 | 64 | yield from self.do_startup_pause() 65 | 66 | self._fut_monitor = asyncio.async(self._monitor_service()) 67 | self.add_pending(self._fut_monitor) 68 | 69 | if self._ready_event: 70 | try: 71 | if not self.process_timeout: 72 | raise asyncio.TimeoutError() 73 | yield from asyncio.wait_for(self._ready_event.wait(), self.process_timeout) 74 | except asyncio.TimeoutError: 75 | self._ready_event = None 76 | self._notify_timeout() 77 | else: 78 | if self._ready_event: 79 | self._ready_event = None 80 | rc = self.returncode 81 | if rc is not None and not rc.normal_exit: 82 | if self.ignore_failures: 83 | warn("{0} (ignored) failure on start-up with result '{1}'".format(self.name, rc)) 84 | else: 85 | raise ChProcessError("{0} failed with reported error {1}".format(self.name, rc), resultcode = rc) 86 | 87 | @asyncio.coroutine 88 | def _monitor_service(self): 89 | """ 90 | We only care about errors here. The rest is dealt with by having notifications 91 | occur. 92 | """ 93 | result = yield from self.wait() 94 | if isinstance(result, int) and result > 0: 95 | self._setready() # simulate ready 96 | self._ready_event = None 97 | self._close_listener() 98 | yield from self._abnormal_exit(result) 99 | 100 | def _notify_received(self, which, var, value): 101 | callfunc = getattr(self, "notify_" + var.upper(), None) 102 | #print("NOTIFY RECEIVED", var, value) 103 | if callfunc: 104 | callfunc(value) 105 | 106 | def _setready(self): 107 | if self._ready_event: 108 | self._ready_event.set() 109 | return True 110 | return False 111 | 112 | def notify_MAINPID(self, value): 113 | try: 114 | pid = int(value) 115 | except ValueError: 116 | self.logdebug("{0} got MAINPID={1}, but not a valid pid#", self.name, value) 117 | return 118 | self.pid = pid 119 | 120 | def notify_BUSERROR(self, value): 121 | code = ProcStatus(value) 122 | if not self._setready(): 123 | self.process_exit(code) 124 | else: 125 | self.returncode = code 126 | 127 | def notify_ERRNO(self, value): 128 | try: 129 | intval = int(value) 130 | except ValueError: 131 | self.logdebug("{0} got ERROR={1}, not a valid error code", self.name, value) 132 | return 133 | code = ProcStatus(intval << 8) 134 | if not self._setready(): 135 | self.process_exit(code) 136 | else: 137 | self.returncode = code 138 | 139 | def notify_READY(self, value): 140 | if value == "1": 141 | self._setready() 142 | 143 | def notify_STATUS(self, value): 144 | self.note = value 145 | 146 | @property 147 | def status(self): 148 | if self._ready_event: 149 | return "activating" 150 | return super().status 151 | -------------------------------------------------------------------------------- /chaperone/exec/envcp.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copy text files and expand environment variables as you copy. 3 | 4 | Usage: 5 | envcp [options] FILE ... 6 | 7 | Options: 8 | --strip suffix If the destination is a directory, strip "suffix" 9 | off source files. 10 | --overwrite Overwrite destination files rather than exiting 11 | with an error. 12 | -v --verbose Display progress. 13 | -a --archive Preserve permissions when copying. 14 | --shell-enable Enable shell escapes using backticks, as in $(`ls`) 15 | --xprefix char The leading string to identify a variable. Defaults to '$' 16 | --xgrouping chars Grouping types which are recognized, defaults to '({' 17 | 18 | Copies a file to a destination file (two arguments), or any number of files to a destination 19 | directory. As files are copied, environment variables will be expanded. If the destination 20 | is a directory, then --strip can be used to specify a file suffix to be stripped off. 21 | 22 | Formats allowed: are $(ENV) or ${ENV}. The bareword $ENV is not recognized. 23 | """ 24 | 25 | # perform any patches first 26 | import chaperone.cutil.patches 27 | 28 | # regular code begins 29 | import sys 30 | import os 31 | import asyncio 32 | import shlex 33 | from docopt import docopt 34 | 35 | from chaperone.cproc.version import VERSION_MESSAGE 36 | from chaperone.cutil.env import Environment 37 | 38 | def check_canwrite(flist, overok): 39 | for f in flist: 40 | if os.path.exists(f) and not overok: 41 | print("error: file {0} exists, won't overwrite".format(f)) 42 | exit(1) 43 | 44 | def main_entry(): 45 | options = docopt(__doc__, version=VERSION_MESSAGE) 46 | 47 | files = options['FILE'] 48 | 49 | start = options['--xprefix'] 50 | braces = options['--xgrouping'] 51 | 52 | if braces: 53 | if any([b not in '{([' for b in braces]): 54 | print("error: --xgrouping can accept one or more of '{{', '[', or '(' only. Not this: '{0}'.".format(braces)) 55 | exit(1) 56 | 57 | # Enable or disable, but don't cache them if enabled 58 | Environment.set_backtick_expansion(bool(options['--shell-enable']), False) 59 | 60 | Environment.set_parse_parameters(start, braces) 61 | 62 | env = Environment() 63 | 64 | # Support stdin/stdout behavior if '-' is the only file specified on the command line 65 | 66 | if '-' in files: 67 | if len(files) > 1: 68 | print("error: '-' for stdin/stdout cannot be combined with other filename arguments") 69 | exit(1) 70 | sys.stdout.write(env.expand(sys.stdin.read())) 71 | sys.stdout.flush() 72 | exit(0) 73 | 74 | if len(files) < 2: 75 | print("error: must include two or more filename arguments") 76 | exit(1) 77 | 78 | destdir = os.path.abspath(files[-1]); 79 | destfile = None 80 | 81 | if os.path.isdir(destdir): 82 | if not os.access(destdir, os.W_OK|os.X_OK): 83 | print("error: directory {0} exists but is not writable".format(destdir)) 84 | st = options['--strip'] 85 | if st: 86 | files = [(f, os.path.basename(f).rstrip(st)) for f in files[:-1]] 87 | else: 88 | files = [(f, os.path.basename(f)) for f in files[:-1]] 89 | check_canwrite([os.path.join(destdir, p[1]) for p in files], options['--overwrite']) 90 | elif len(files) != 2: 91 | print("error: destination is not a directory and more than 2 files specified") 92 | exit(1) 93 | else: 94 | destfile = files[1] 95 | files = [(files[0], files[0])] 96 | check_canwrite([destfile], options['--overwrite']) 97 | 98 | # files is now a list of pairs [(source, dest-basename), ...] 99 | 100 | for curpair in files: 101 | if not os.path.exists(curpair[0]): 102 | print("error: file does not exist, {0}".format(curpair[0])) 103 | exit(1) 104 | if not os.access(curpair[0], os.R_OK): 105 | print("error: file is not readable, {0}".format(curpair[0])) 106 | exit(1) 107 | 108 | for curpair in files: 109 | if not destfile: 110 | destfile = os.path.join(destdir, curpair[1]) 111 | try: 112 | oldstat = os.stat(curpair[0]) 113 | oldf = open(curpair[0], 'r') 114 | except Exception as ex: 115 | print("error: cannot open input file {0}: {1}".format(curpair[0], ex)) 116 | exit(1) 117 | try: 118 | newf = open(destfile, 'w') 119 | except Exception as ex: 120 | print("error: cannot open output file {0}: {1}".format(destfile, ex)) 121 | exit(1) 122 | 123 | newf.write(env.expand(oldf.read())) 124 | oldf.close() 125 | newf.close() 126 | 127 | if options['--archive']: 128 | # ATTEMPT to retain permissions 129 | try: 130 | os.chown(destfile, oldstat.st_uid, oldstat.st_gid); 131 | except PermissionError: 132 | # Try them separately. User first, then group. 133 | try: 134 | os.chown(destfile, oldstat.st_uid, -1); 135 | except PermissionError: 136 | pass 137 | try: 138 | os.chown(destfile, -1, oldstat.st_gid); 139 | except PermissionError: 140 | pass 141 | try: 142 | os.chmod(destfile, oldstat.st_mode); 143 | except PermissionError: 144 | pass 145 | try: 146 | os.utime(destfile, times=(oldstat.st_atime, oldstat.st_mtime)) 147 | except PermissionError: 148 | pass 149 | 150 | if options['--verbose']: 151 | print("envcp {0} {1}".format(curpair[0], destfile)) 152 | 153 | destfile = None 154 | --------------------------------------------------------------------------------