├── Dockerfile ├── README.md ├── cmd.sh ├── docker-wrapper.sh └── rancher-cli-download.sh /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:xenial 2 | 3 | LABEL "Maintainer Chris Mosetick " 4 | 5 | ARG github_token 6 | 7 | RUN \ 8 | sed -i 's@http://archive.ubuntu.com/ubuntu/@http://ubuntu.osuosl.org/ubuntu@g' /etc/apt/sources.list && \ 9 | apt-get update && \ 10 | apt-get -y install software-properties-common wget curl jq git iptables ca-certificates apparmor && \ 11 | add-apt-repository ppa:webupd8team/java -y && \ 12 | apt-get update 13 | 14 | RUN \ 15 | (echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections) && \ 16 | apt-get install -y oracle-java8-installer oracle-java8-set-default 17 | 18 | ENV JAVA_HOME /usr/lib/jvm/java-8-oracle 19 | ENV PATH $JAVA_HOME/bin:$PATH 20 | ENV JENKINS_SWARM_VERSION 3.3 21 | ENV HOME /home/jenkins-slave 22 | 23 | 24 | RUN \ 25 | useradd -c "Jenkins Slave user" -d $HOME -m jenkins-slave && \ 26 | curl --create-dirs -sSLo $HOME/swarm-client-$JENKINS_SWARM_VERSION-jar-with-dependencies.jar https://repo.jenkins-ci.org/releases/org/jenkins-ci/plugins/swarm-client/$JENKINS_SWARM_VERSION/swarm-client-$JENKINS_SWARM_VERSION-jar-with-dependencies.jar 27 | COPY cmd.sh /cmd.sh 28 | 29 | # setup our local files first 30 | COPY docker-wrapper.sh /usr/local/bin/docker-wrapper 31 | RUN chmod +x /usr/local/bin/docker-wrapper 32 | # Always install latest version of Rancher CLI using Github API call in bash script 33 | # pass in --build-arg github_token= to make the download authenticated 34 | COPY rancher-cli-download.sh /usr/local/bin/rancher-cli-download.sh 35 | 36 | 37 | # now we install docker in docker - thanks to https://github.com/jpetazzo/dind 38 | # We install newest docker into our docker in docker container 39 | RUN \ 40 | curl -fsSLO https://get.docker.com/builds/Linux/x86_64/docker-latest.tgz && \ 41 | tar --strip-components=1 -xvzf docker-latest.tgz -C /usr/local/bin && \ 42 | chmod +x /usr/local/bin/docker 43 | 44 | RUN \ 45 | /usr/local/bin/rancher-cli-download.sh && \ 46 | tar xvf rancher-linux-amd64* && \ 47 | cp rancher-v*/rancher /usr/local/bin && \ 48 | chmod +x /usr/local/bin/rancher && \ 49 | rm -rf /var/cache/apt/* 50 | 51 | VOLUME /var/lib/docker 52 | 53 | #ENV JENKINS_USERNAME jenkins 54 | #ENV JENKINS_PASSWORD jenkins 55 | #ENV JENKINS_MASTER http://jenkins:8080 56 | 57 | ENTRYPOINT ["/usr/local/bin/docker-wrapper"] 58 | CMD /bin/bash /cmd.sh 59 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # jenkins-slaves 2 | Jenkin swarm slaves with docker installed and accessible to docker slave. 3 | 4 | 5 | ## Running 6 | 7 | To run a Docker container customizing the different tools with your credentials 8 | 9 | docker run \ 10 | -e JENKINS_USERNAME=jenkins \ 11 | -e JENKINS_PASSWORD=jenkins \ 12 | -e JENKINS_MASTER=http://jenkins:8080 \ 13 | rancher/jenkins-slave 14 | 15 | ### Optional Environment Variables 16 | 17 | You can specify optional environment variables below when invoking docker run to customize the behavior of the swarm client. 18 | 19 | | Parameter | Default Value | Description | 20 | |-----------------|---------------------|----------------------------------------------------------------------------------| 21 | | SLAVE_EXECUTORS | number of cpu cores | This value specifies the number of concurrent jobs this worker can process | 22 | | SLAVE_NAME | swarm-client | This value specifies the name of slave that will appear on Jenkins UI | 23 | | SLAVE_LABELS | None | This value specifies the labels you want to give for the launching slave | 24 | | SLAVE_MODE | None | This value specifies job allocation mode for the slave (`normal` or `exclusive`) | 25 | -------------------------------------------------------------------------------- /cmd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # jenkins swarm slave 4 | JAR=`ls -1 $HOME/swarm-client-*.jar | tail -n 1` 5 | 6 | PARAMS="" 7 | if [ ! -z "$JENKINS_USERNAME" ]; then 8 | PARAMS="$PARAMS -username $JENKINS_USERNAME" 9 | fi 10 | if [ ! -z "$JENKINS_PASSWORD" ]; then 11 | PARAMS="$PARAMS -password $JENKINS_PASSWORD" 12 | fi 13 | if [ ! -z "$SLAVE_EXECUTORS" ]; then 14 | PARAMS="$PARAMS -executors $SLAVE_EXECUTORS" 15 | fi 16 | if [ ! -z "$SLAVE_LABELS" ]; then 17 | for LABEL in $SLAVE_LABELS; do 18 | PARAMS="$PARAMS -labels $LABEL" 19 | done 20 | fi 21 | if [ ! -z "$SLAVE_NAME" ]; then 22 | PARAMS="$PARAMS -name $SLAVE_NAME" 23 | fi 24 | if [ ! -z "$SLAVE_MODE" ]; then 25 | PARAMS="$PARAMS -mode $SLAVE_MODE" 26 | fi 27 | if [ ! -z "$JENKINS_MASTER" ]; then 28 | PARAMS="$PARAMS -master $JENKINS_MASTER" 29 | else 30 | if [ ! -z "$JENKINS_SERVICE_PORT" ]; then 31 | # kubernetes environment variable 32 | PARAMS="$PARAMS -master http://$SERVICE_HOST:$JENKINS_SERVICE_PORT" 33 | fi 34 | fi 35 | 36 | java -jar $JAR $PARAMS -fsroot $HOME -------------------------------------------------------------------------------- /docker-wrapper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | for i in {0..6} 4 | do 5 | mknod -m0660 /dev/loop$i b 7 $i 6 | done 7 | 8 | # If we have docker bind mounted in - no need. 9 | if (docker version); then 10 | echo "Docker is already bind mounted in - we are good to go..." 11 | exec "$@" 12 | exit 13 | fi 14 | 15 | echo "Launching docker server DND style..." 16 | 17 | # First, make sure that cgroups are mounted correctly. 18 | CGROUP=/sys/fs/cgroup 19 | 20 | [ -d $CGROUP ] || 21 | mkdir $CGROUP 22 | 23 | mountpoint -q $CGROUP || 24 | mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || { 25 | echo "Could not make a tmpfs mount. Did you use --privileged?" 26 | exit 1 27 | } 28 | 29 | if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security 30 | then 31 | mount -t securityfs none /sys/kernel/security || { 32 | echo "Could not mount /sys/kernel/security." 33 | echo "AppArmor detection and -privileged mode might break." 34 | } 35 | fi 36 | 37 | # Mount the cgroup hierarchies exactly as they are in the parent system. 38 | for SUBSYS in $(cut -d: -f2 /proc/1/cgroup) 39 | do 40 | [ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS 41 | mountpoint -q $CGROUP/$SUBSYS || 42 | mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS 43 | 44 | # The two following sections address a bug which manifests itself 45 | # by a cryptic "lxc-start: no ns_cgroup option specified" when 46 | # trying to start containers withina container. 47 | # The bug seems to appear when the cgroup hierarchies are not 48 | # mounted on the exact same directories in the host, and in the 49 | # container. 50 | 51 | # Named, control-less cgroups are mounted with "-o name=foo" 52 | # (and appear as such under /proc//cgroup) but are usually 53 | # mounted on a directory named "foo" (without the "name=" prefix). 54 | # Systemd and OpenRC (and possibly others) both create such a 55 | # cgroup. To avoid the aforementioned bug, we symlink "foo" to 56 | # "name=foo". This shouldn't have any adverse effect. 57 | echo $SUBSYS | grep -q ^name= && { 58 | NAME=$(echo $SUBSYS | sed s/^name=//) 59 | ln -s $SUBSYS $CGROUP/$NAME 60 | } 61 | 62 | # Likewise, on at least one system, it has been reported that 63 | # systemd would mount the CPU and CPU accounting controllers 64 | # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" 65 | # but on a directory called "cpu,cpuacct" (note the inversion 66 | # in the order of the groups). This tries to work around it. 67 | [ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct 68 | done 69 | 70 | # Note: as I write those lines, the LXC userland tools cannot setup 71 | # a "sub-container" properly if the "devices" cgroup is not in its 72 | # own hierarchy. Let's detect this and issue a warning. 73 | grep -q :devices: /proc/1/cgroup || 74 | echo "WARNING: the 'devices' cgroup should be in its own hierarchy." 75 | grep -qw devices /proc/1/cgroup || 76 | echo "WARNING: it looks like the 'devices' cgroup is not mounted." 77 | 78 | # Now, close extraneous file descriptors. 79 | pushd /proc/self/fd >/dev/null 80 | for FD in * 81 | do 82 | case "$FD" in 83 | # Keep stdin/stdout/stderr 84 | [012]) 85 | ;; 86 | # Nuke everything else 87 | *) 88 | eval exec "$FD>&-" 89 | ;; 90 | esac 91 | done 92 | popd >/dev/null 93 | 94 | 95 | # If a pidfile is still around (for example after a container restart), 96 | # delete it so that docker can start. 97 | rm -rf /var/run/docker.pid 98 | 99 | docker daemon $DOCKER_DAEMON_ARGS & 100 | (( timeout = 60 + SECONDS )) 101 | until docker info >/dev/null 2>&1 102 | do 103 | if (( SECONDS >= timeout )); then 104 | echo 'Timed out trying to connect to internal docker host.' >&2 105 | break 106 | fi 107 | sleep 1 108 | done 109 | exec "$@" 110 | -------------------------------------------------------------------------------- /rancher-cli-download.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source /.dockerenv 4 | 5 | # $github_token set at docker build time via: --build-arg github_token= 6 | URL=$(curl -H "Authorization: token $github_token" -s https://api.github.com/repos/rancher/cli/releases/latest | jq -r ".assets[] | select(.name | contains(\"linux-amd64\")) | select(.content_type | contains(\"gzip\")) | .browser_download_url") 7 | 8 | wget $URL 9 | --------------------------------------------------------------------------------