├── jenkins
├── plugin-list.txt
├── jenkins.model.JenkinsLocationConfiguration.xml_template
├── var-lib-jenkins.mount
├── config.xml_template
├── jenkins-swarm-service.bat_template
├── example-job.xml
├── jenkins-swarm.service
└── jenkins-swarm-service.xml
├── .gitignore
├── haproxy
├── haproxy_template
└── haproxy.cfg_template
├── provision
└── shell
│ ├── resolve.sh
│ ├── gluster-additional-node_configuration.sh
│ ├── resolve.bat
│ ├── haproxy.sh
│ ├── cluster.sh
│ ├── common-dependencies_centos.sh
│ ├── haproxy_configuration.sh
│ ├── common-dependencies_win.bat
│ ├── jenkins-agent_linux.sh
│ ├── gluster-node.sh
│ ├── gluster-initial-node_configuration.sh
│ ├── jenkins-agent_win.bat
│ ├── jenkins-master.sh
│ ├── jenkins-master_configuration.sh
│ └── cluster_configuration.sh
├── conf.env
├── README.md
└── Vagrantfile
/jenkins/plugin-list.txt:
--------------------------------------------------------------------------------
1 | swarm
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.DS_Store*
2 |
3 | .idea
4 | .vagrant
5 |
--------------------------------------------------------------------------------
/haproxy/haproxy_template:
--------------------------------------------------------------------------------
1 | # Set ENABLED to 1 if you want the init script to start haproxy.
2 | ENABLED=1
3 | # Add extra flags here.
4 | #EXTRAOPTS="-de -m 16"
5 |
--------------------------------------------------------------------------------
/jenkins/jenkins.model.JenkinsLocationConfiguration.xml_template:
--------------------------------------------------------------------------------
1 |
2 |
3 | JENKINS_URL
4 |
5 |
--------------------------------------------------------------------------------
/jenkins/var-lib-jenkins.mount:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Mounting Jenkins Home directory manually
3 |
4 | [Mount]
5 | What=gluster-node-0:/jenkins_home
6 | Where=/var/lib/jenkins
7 | Type=glusterfs
8 | Options=direct-io-mode=disable,negative-timeout=2
9 |
10 | [Install]
11 | WantedBy=multi-user.target
12 |
--------------------------------------------------------------------------------
/provision/shell/resolve.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 |
5 | echo "10.11.11.21 gluster-node-0" | sed -e '/'"$(hostname)"'/d' >> /etc/hosts
6 |
7 | echo "10.11.11.31 jenkins-master-1" >> /etc/hosts
8 | echo "10.11.11.32 jenkins-master-2" >> /etc/hosts
9 |
10 | echo "10.11.11.11 load-balancer" | sed -e '/'"$(hostname)"'/d' >> /etc/hosts
11 |
--------------------------------------------------------------------------------
/jenkins/config.xml_template:
--------------------------------------------------------------------------------
1 |
2 |
3 | RESTART
4 | false
5 | 1
6 | ${JENKINS_HOME}/workspace/${ITEM_FULLNAME}
7 | JENKINS_JNLP_PORT
8 |
9 |
--------------------------------------------------------------------------------
/provision/shell/gluster-additional-node_configuration.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
5 | cd ${DIR}
6 |
7 |
8 | source ./conf.env
9 |
10 |
11 | ADDITIONAL_GLUSTER_NODES_INDEX_FILE_PATH="${1}"
12 |
13 |
14 |
15 | nodeIP=$(ip address | grep -oP "(?:${PRIVATE_NETWORK_SLASH24_PREFIX})[\S]+(?=/)")
16 | echo "${nodeIP}" >> ${ADDITIONAL_GLUSTER_NODES_INDEX_FILE_PATH}
17 |
--------------------------------------------------------------------------------
/provision/shell/resolve.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | SET NEWLINE=^& echo.
4 | SET ETC_HOSTS=%WINDIR%\system32\drivers\etc\hosts
5 |
6 |
7 |
8 | :: NOTE: this is just an example and currently not in use
9 |
10 | SET SOME_HOSTNAME=localhost
11 | SET SOME_HOST_IP=127.0.0.1
12 |
13 | @echo on
14 | FIND /C /I "%SOME_HOSTNAME%" %ETC_HOSTS%
15 | IF %ERRORLEVEL% NEQ 0 ECHO %NEWLINE%^%SOME_HOST_IP% %SOME_HOSTNAME%>>%ETC_HOSTS%
16 | @echo off
17 |
18 |
--------------------------------------------------------------------------------
/provision/shell/haproxy.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
5 | cd ${DIR}
6 |
7 |
8 | source ./conf.env
9 |
10 |
11 |
12 | yum install haproxy -y
13 |
14 |
15 |
16 | systemctl enable firewalld
17 | systemctl start firewalld
18 | firewall-cmd --zone=public --add-port=${EXTERNAL_LOAD_BALANCER_PORT}/tcp --permanent # jenkins web interface
19 | firewall-cmd --zone=public --add-port=${JENKINS_JNLP_PORT}/tcp --permanent # JNLP
20 | firewall-cmd --reload
21 |
--------------------------------------------------------------------------------
/provision/shell/cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
5 | cd ${DIR}
6 |
7 |
8 | source ./conf.env
9 |
10 |
11 |
12 | yum install -y pacemaker pcs psmisc policycoreutils-python
13 |
14 |
15 | firewall-cmd --permanent --add-service=high-availability
16 | firewall-cmd --reload
17 |
18 | systemctl enable pcsd.service
19 | systemctl start pcsd.service
20 |
21 | #set password for corosync communication user
22 | echo "${COROSYNC_USER_NAME}:${COROSYNC_USER_PASSWORD}" | chpasswd
23 |
--------------------------------------------------------------------------------
/jenkins/jenkins-swarm-service.bat_template:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | call C:\ProgramData\chocolatey\bin\RefreshEnv.cmd
4 | SET DIR=%~dp0
5 |
6 | SET JAVA_EXE=java
7 | SET JENKINS_SWARM_EXEC=%DIR%\swarm-client.jar
8 |
9 | SET BROADCAST_IP=PRIVATE_NETWORK_SLASH24_PREFIX.255
10 | SET SLAVE_PREFIX=win-2012r2
11 |
12 |
13 |
14 | java -jar "%JENKINS_SWARM_EXEC%" ^
15 | -autoDiscoveryAddress "%BROADCAST_IP%" ^
16 | -executors 2 ^
17 | -name "%SLAVE_PREFIX%" ^
18 | -labels "%SLAVE_PREFIX%" ^
19 | -fsroot C:\jenkins-agent
20 |
21 |
22 |
--------------------------------------------------------------------------------
/provision/shell/common-dependencies_centos.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
5 | cd ${DIR}
6 |
7 |
8 | source "${DIR}/conf.env"
9 |
10 |
11 |
12 | yum check-update -y
13 | yum update -y
14 |
15 |
16 | yum install wget curl git nano -y
17 |
18 |
19 | groupadd --gid 1024 "${PLATFORM_USER_GROUP}"
20 | adduser --uid 1024 --gid 1024 "${PLATFORM_USER_NAME}"
21 | echo "${PLATFORM_USER_NAME}:${PLATFORM_USER_PW}" | chpasswd
22 | echo "chef ALL=(ALL) NOPASSWD:ALL" >> "/etc/sudoers.d/${PLATFORM_USER_NAME}"
23 |
--------------------------------------------------------------------------------
/provision/shell/haproxy_configuration.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
5 | cd ${DIR}
6 |
7 |
8 | source ./conf.env
9 |
10 |
11 |
12 | sed -i \
13 | -e 's/EXTERNAL_LOAD_BALANCER_PORT/'"${EXTERNAL_LOAD_BALANCER_PORT}"'/g' \
14 | -e 's/PRIVATE_NETWORK_SLASH24_PREFIX/'"${PRIVATE_NETWORK_SLASH24_PREFIX}"'/g' \
15 | -e 's/JENKINS_WEB_PORT/'"${JENKINS_WEB_PORT}"'/g' \
16 | -e 's/JENKINS_JNLP_PORT/'"${JENKINS_JNLP_PORT}"'/g' \
17 | ./haproxy.cfg_template
18 | cp ./haproxy.cfg_template /etc/haproxy/haproxy.cfg
19 | cp ./haproxy_template /etc/default/haproxy
20 |
21 | systemctl enable haproxy
22 | systemctl start haproxy
23 |
--------------------------------------------------------------------------------
/provision/shell/common-dependencies_win.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 |
4 | :: NOTE: required because when using the script in this context (vagrant, provisioning, batch), the
5 | :: cmd env does not respect the changed PATH var by chocolaty
6 | call c:\ProgramData\chocolatey\bin\RefreshEnv.cmd
7 |
8 |
9 | :: set cwd to location of this file
10 | SET DIR=%~dp0
11 |
12 |
13 | choco install -y curl
14 |
15 | :: Disable firewall
16 | netsh advfirewall set privateprofile state off
17 |
18 | :: Disable automatic updates
19 | reg add "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update" /v AUOptions /t REG_DWORD /d 0 /f
20 | sc config wuauserv start= disabled
21 | net stop wuauserv
22 |
--------------------------------------------------------------------------------
/conf.env:
--------------------------------------------------------------------------------
1 | PREFIX="jha"
2 |
3 | LINUX_AGENTS=1
4 | WINDOWS_AGENTS=1
5 | START_WINDOWS_AGENTS_HEADLESS=false
6 |
7 | PRIVATE_NETWORK_SLASH24_PREFIX="10.11.11"
8 |
9 | CREATE_LOAD_BALANCER=true
10 | EXTERNAL_LOAD_BALANCER_IP="192.168.11.11"
11 | EXTERNAL_LOAD_BALANCER_PORT="80"
12 |
13 | GLUSTER_BRICK_ID="jenkins_home"
14 | GLUSTER_JENKINS_BRICK_DIR="/bricks/jenkins"
15 | ADDITIONAL_GLUSTER_NODES=1
16 |
17 | JENKINS_WEB_PORT="8080"
18 | JENKINS_JNLP_PORT="5000"
19 | JENKINS_HOME_PATH="/var/lib/jenkins"
20 | JENKINS_USER_NAME="jenkins"
21 | JENKINS_USER_ID="888"
22 | JENKINS_URL="http://192.168.11.11/"
23 |
24 | COROSYNC_USER_NAME="hacluster"
25 | COROSYNC_USER_PASSWORD="(ns3cUre"
26 |
27 | PLATFORM_USER_NAME="jha-user"
28 | PLATFORM_USER_PW="jha-user"
29 | PLATFORM_USER_GROUP="jha-user"
--------------------------------------------------------------------------------
/jenkins/example-job.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | false
6 |
7 |
8 | true
9 | false
10 | false
11 | false
12 |
13 |
14 | */10 * * * *
15 |
16 |
17 | false
18 |
19 |
20 | sleep 60
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/provision/shell/jenkins-agent_linux.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
5 | cd ${DIR}
6 |
7 |
8 | source ./conf.env
9 |
10 |
11 |
12 | SWARM_VERSION="3.13"
13 | INSTALL_PATH="/etc/jenkins-swarm"
14 | JAR_LOCATION="${INSTALL_PATH}/swarm-client.jar"
15 | SERVICE_FILE="jenkins-swarm.service"
16 |
17 |
18 |
19 | yum install -y patch openssl-devel.x86_64 gcc gcc-c++ kernel-devel make
20 |
21 |
22 | yum install java -y
23 |
24 | mkdir -p "${INSTALL_PATH}"
25 | curl --output "${JAR_LOCATION}" \
26 | --location \
27 | --silent \
28 | "https://repo.jenkins-ci.org/releases/org/jenkins-ci/plugins/swarm-client/${SWARM_VERSION}/swarm-client-${SWARM_VERSION}.jar"
29 |
30 |
31 | cp ./${SERVICE_FILE} "/etc/systemd/system/${SERVICE_FILE}"
32 |
33 | systemctl daemon-reload
34 | systemctl enable ${SERVICE_FILE}
35 | systemctl start ${SERVICE_FILE}
36 |
--------------------------------------------------------------------------------
/jenkins/jenkins-swarm.service:
--------------------------------------------------------------------------------
1 |
2 | [Unit]
3 | Description=discovery service to itself as jenkins slaves
4 |
5 | After=network.target
6 |
7 | # NOTE: attempting to restart infinitely
8 | StartLimitIntervalSec=0
9 |
10 |
11 |
12 | [Service]
13 | Type=simple
14 |
15 | User=root
16 | Group=root
17 |
18 | StandardOutput=journal
19 | StandardError=journal
20 | SyslogIdentifier=jenkins-swarm
21 |
22 | Environment=AGENT_PREFIX=linux
23 | Environment=NETWORK_INTERFACE=eth1
24 |
25 | ExecStart=/bin/sh -c 'BROADCAST_IP=$(ip address show dev ${NETWORK_INTERFACE} | grep -oP "(?<=brd\s)[\S]+(?:\s)"); \
26 | exec /bin/java -jar /etc/jenkins-swarm/swarm-client.jar \
27 | -autoDiscoveryAddress $${BROADCAST_IP} \
28 | -executors 2 \
29 | -name ${AGENT_PREFIX} \
30 | -labels ${AGENT_PREFIX} \
31 | -fsroot /jenkins-agent'
32 |
33 |
34 | Restart=always
35 | RestartSec=8s
36 |
37 |
38 |
39 | [Install]
40 | WantedBy=multi-user.target
41 |
--------------------------------------------------------------------------------
/provision/shell/gluster-node.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
5 | cd ${DIR}
6 |
7 |
8 | source ./conf.env
9 |
10 |
11 |
12 | # src: https://wiki.centos.org/HowTos/GlusterFSonCentOS
13 |
14 |
15 |
16 | yum install -y centos-release-gluster
17 | yum install -y glusterfs gluster-cli glusterfs-libs glusterfs-server
18 |
19 |
20 | systemctl enable firewalld
21 | systemctl start firewalld
22 | firewall-cmd --zone=public --add-port=24007-24008/tcp --permanent
23 | # NOTE: for each brick there needs to be an additional port to get opened, starting at 24009
24 | firewall-cmd --zone=public --add-port=24009/tcp --permanent
25 | # NOTE: for native gluster-clients
26 | firewall-cmd --zone=public --add-port=49152-49251/tcp --permanent
27 | # NOTE: for nfs clients
28 | #firewall-cmd --zone=public --add-port=38465-38469/tcp --add-port=111/tcp --add-port=2049/tcp --permanent
29 | #firewall-cmd --zone=public --add-service=nfs --permanent
30 | firewall-cmd --reload
31 |
32 | systemctl enable glusterd
33 | systemctl start glusterd
34 |
35 |
36 | mkdir -p "${GLUSTER_JENKINS_BRICK_DIR}"
37 |
--------------------------------------------------------------------------------
/provision/shell/gluster-initial-node_configuration.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
5 | cd ${DIR}
6 |
7 |
8 | source ./conf.env
9 |
10 |
11 | ADDITIONAL_GLUSTER_NODES_INDEX_FILE_PATH="${1}"
12 |
13 |
14 | # src: https://wiki.centos.org/HowTos/GlusterFSonCentOS
15 |
16 |
17 |
18 | gluster volume create "${GLUSTER_BRICK_ID}" \
19 | transport tcp \
20 | "$(hostname):${GLUSTER_JENKINS_BRICK_DIR}" force
21 | gluster volume set "${GLUSTER_BRICK_ID}" storage.owner-gid "${JENKINS_USER_ID}"
22 | gluster volume set "${GLUSTER_BRICK_ID}" storage.owner-uid "${JENKINS_USER_ID}"
23 |
24 |
25 | nodeCount=1
26 | while read -r additionalGlusterNodeIP
27 | do
28 | if [ -z "${additionalGlusterNodeIP}" ]; then
29 | continue;
30 | fi
31 |
32 | gluster peer probe "${additionalGlusterNodeIP}"
33 |
34 | sleep 10 # because previous command is async in conjunction to the following one
35 |
36 | nodeCount=$(( $nodeCount + 1 ))
37 |
38 | gluster volume add-brick \
39 | "${GLUSTER_BRICK_ID}" \
40 | replica "${nodeCount}" \
41 | "${additionalGlusterNodeIP}:${GLUSTER_JENKINS_BRICK_DIR}" \
42 | force
43 |
44 | echo "Added gluster node: ${additionalGlusterNodeIP}"
45 |
46 | sleep 4
47 | done < "${ADDITIONAL_GLUSTER_NODES_INDEX_FILE_PATH}"
48 |
49 | rm -rf "${ADDITIONAL_GLUSTER_NODES_INDEX_FILE_PATH}"
50 |
51 | gluster volume start "${GLUSTER_BRICK_ID}"
52 |
53 | gluster volume info
54 |
--------------------------------------------------------------------------------
/provision/shell/jenkins-agent_win.bat:
--------------------------------------------------------------------------------
1 | @echo off
2 |
3 | :: NOTE: required because when using the script in this context (vagrant, provisioning, batch), the
4 | :: cmd env does not respect the changed PATH var by chocolaty
5 | call c:\ProgramData\chocolatey\bin\RefreshEnv.cmd
6 |
7 |
8 | :: set cwd to location of this file
9 | SET DIR=%~dp0
10 |
11 |
12 | :: importing configuration variables
13 | call "%DIR%\conf.env.bat"
14 |
15 |
16 |
17 | SET SWARM_VERSION=3.13
18 | SET "INSTALL_PATH=c:\Program Files\jenkins-swarm"
19 | SET JAR_LOCATION=%INSTALL_PATH%\swarm-client.jar
20 | SET TASK_FILE=jenkins-swarm-service.bat
21 | SET SERVICE_NAME=jenkins-swarm-task
22 |
23 |
24 | choco install -y javaruntime --version 8.0.151
25 |
26 |
27 | mkdir "%INSTALL_PATH%"
28 | :: NOTE: ignore cert. Otherwise it would break, because this old powershell/dotnet version is
29 | :: lacking of newer cert chains
30 | curl --output "%JAR_LOCATION%" --insecure --location --silent "https://repo.jenkins-ci.org/releases/org/jenkins-ci/plugins/swarm-client/%SWARM_VERSION%/swarm-client-%SWARM_VERSION%.jar"
31 |
32 | powershell -Command "(gc %DIR%\%TASK_FILE%_template) -replace 'PRIVATE_NETWORK_SLASH24_PREFIX', '%PRIVATE_NETWORK_SLASH24_PREFIX%' | Out-File -Encoding 'Default' %DIR%\%TASK_FILE%_template"
33 | copy "%DIR%\%TASK_FILE%_template" "%INSTALL_PATH%\%TASK_FILE%" /y
34 |
35 |
36 | SCHTASKS /Create ^
37 | /SC:ONSTART ^
38 | /TN:%SERVICE_NAME% ^
39 | /TR:"%INSTALL_PATH%\%TASK_FILE%" ^
40 | /RL:HIGHEST ^
41 | /DELAY 0000:08 ^
42 | /RU "Administrator" ^
43 | /RP "vagrant" ^
44 | /F ^
45 | /NP
46 |
47 | SCHTASKS /run /TN:%SERVICE_NAME%
48 |
--------------------------------------------------------------------------------
/haproxy/haproxy.cfg_template:
--------------------------------------------------------------------------------
1 | global
2 | log /var/log local0
3 | log /dev/log local1 notice
4 | chroot /var/lib/haproxy
5 | user haproxy
6 | group haproxy
7 | daemon
8 |
9 | defaults
10 | mode http
11 | log global
12 | option dontlognull
13 | option forwardfor
14 | option http-server-close
15 | retries 3
16 | maxconn 2000
17 | timeout http-request 60s
18 | timeout queue 60s
19 | timeout client 60s
20 | timeout server 120s
21 | timeout connect 20s
22 | timeout check 60s
23 |
24 | frontend WEB
25 | bind *:EXTERNAL_LOAD_BALANCER_PORT name http-web
26 | default_backend Jenkins_WEB
27 |
28 | backend Jenkins_WEB
29 | stick-table type ip size 1 nopurge
30 | stick on dst
31 | server jenkins-master-1 PRIVATE_NETWORK_SLASH24_PREFIX.31:JENKINS_WEB_PORT check
32 | server jenkins-master-2 PRIVATE_NETWORK_SLASH24_PREFIX.32:JENKINS_WEB_PORT check backup
33 |
34 | frontend JNLP
35 | bind *:JENKINS_JNLP_PORT name jenkins-jnlp
36 | option tcplog
37 | mode tcp
38 | timeout client 15m
39 | use_backend Jenkins_JNLP
40 |
41 | backend Jenkins_JNLP
42 | mode tcp
43 | option tcplog
44 | timeout server 15m
45 | stick-table type ip size 1 nopurge
46 | stick on dst
47 | default-server inter 1s
48 | server jenkins-master-1 PRIVATE_NETWORK_SLASH24_PREFIX.31:JENKINS_JNLP_PORT check port JENKINS_WEB_PORT
49 | server jenkins-master-2 PRIVATE_NETWORK_SLASH24_PREFIX.32:JENKINS_JNLP_PORT check port JENKINS_WEB_PORT backup
--------------------------------------------------------------------------------
/provision/shell/jenkins-master.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
5 | cd ${DIR}
6 |
7 |
8 | source ./conf.env
9 |
10 |
11 |
12 | # src: https://wiki.centos.org/HowTos/GlusterFSonCentOS
13 | # src: https://wiki.jenkins.io/display/JENKINS/Installing+Jenkins+on+Red+Hat+distributions
14 |
15 |
16 |
17 | groupadd --gid "${JENKINS_USER_ID}" \
18 | --system "${JENKINS_USER_NAME}"
19 | adduser --uid "${JENKINS_USER_ID}" \
20 | --gid "${JENKINS_USER_ID}" \
21 | --no-create-home \
22 | --system "${JENKINS_USER_NAME}"
23 |
24 | yum install -y java
25 |
26 | curl --output "/etc/yum.repos.d/jenkins.repo" \
27 | --location \
28 | --silent \
29 | "https://pkg.jenkins.io/redhat-stable/jenkins.repo"
30 | rpm --import "https://jenkins.io/redhat/jenkins-ci.org.key"
31 | yum install -y jenkins
32 |
33 | # NOTE: it seems that for whatever reason the default is enabled, but since the cluster logic takes
34 | # care of the jenkins service, it needs to be disabled, especially in case of reboot
35 | systemctl disable jenkins
36 |
37 |
38 | yum install -y centos-release-gluster
39 | yum install -y glusterfs glusterfs-fuse attr
40 |
41 |
42 | systemctl enable firewalld
43 | systemctl start firewalld
44 | firewall-cmd --zone=public --add-port=${JENKINS_WEB_PORT}/tcp --permanent # jenkins web interface
45 | firewall-cmd --zone=public --add-port=${JENKINS_JNLP_PORT}/tcp --permanent # JNLP
46 | firewall-cmd --zone=public --add-port=33848/udp --permanent # UDP, used by jenkins-swarm client
47 | firewall-cmd --reload
48 |
49 | mkdir -p "${JENKINS_HOME_PATH}"
50 |
51 | chown -R "${JENKINS_USER_NAME}:${JENKINS_USER_NAME}" "${JENKINS_HOME_PATH}"
52 | find "${JENKINS_HOME_PATH}" -type d -exec chmod 750 {} \;
53 | find "${JENKINS_HOME_PATH}" -type f -exec chmod 640 {} \;
54 |
--------------------------------------------------------------------------------
/jenkins/jenkins-swarm-service.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | vagrant
5 |
6 |
7 |
8 | true
9 |
10 |
11 |
12 |
13 | Administrator
14 | Password
15 | HighestAvailable
16 |
17 |
18 |
19 | IgnoreNew
20 | false
21 | false
22 | true
23 | true
24 | false
25 |
26 | true
27 | false
28 |
29 | true
30 | true
31 | false
32 | false
33 | false
34 | false
35 | false
36 | PT0S
37 | 7
38 |
39 | PT1M
40 | 3
41 |
42 |
43 |
44 |
45 | "C:\Program Files\jenkins-swarm\jenkins-swarm-service.bat"
46 |
47 |
48 |
--------------------------------------------------------------------------------
/provision/shell/jenkins-master_configuration.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 |
5 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
6 | cd ${DIR}
7 |
8 |
9 | source ./conf.env
10 |
11 |
12 |
13 | cp ./var-lib-jenkins.mount /etc/systemd/system
14 | systemctl start var-lib-jenkins.mount
15 | sleep 5
16 | systemctl is-active --quiet var-lib-jenkins.mount
17 | if [ $? -ne 0 ]; then
18 | echo " [ERR] Mounting gluster brick to provide JENKINS_HOME failed."
19 | exit 1;
20 | fi
21 |
22 | # NOTE: handling case where GlusterFS brick already contains Jenkins setup
23 | if [ "$(ls -A ${JENKINS_HOME_PATH})" ]; then
24 | echo " [WARN] JENKINS_HOME already contains configuration. Skipping..."
25 | systemctl stop var-lib-jenkins.mount
26 | exit 0
27 | fi
28 |
29 | chown -R "${JENKINS_USER_NAME}:${JENKINS_USER_NAME}" "${JENKINS_HOME_PATH}"
30 |
31 |
32 | sed -i \
33 | -e 's/JENKINS_JNLP_PORT/'"${JENKINS_JNLP_PORT}"'/g' \
34 | ./config.xml_template
35 | cp ./config.xml_template "${JENKINS_HOME_PATH}/config.xml"
36 |
37 | escapedJenkinsURL=$(printf '%s\n' "${JENKINS_URL}" | sed 's/[\&/]/\\&/g')
38 | sed -i \
39 | -e 's/JENKINS_URL/'"${escapedJenkinsURL}"'/g' \
40 | ./jenkins.model.JenkinsLocationConfiguration.xml_template
41 | cp ./jenkins.model.JenkinsLocationConfiguration.xml_template "${JENKINS_HOME_PATH}/jenkins.model.JenkinsLocationConfiguration.xml"
42 |
43 | cp /etc/sysconfig/jenkins /etc/sysconfig/jenkins.default
44 | sed -i \
45 | -e '/JENKINS_ARGS=/c\JENKINS_ARGS="-Djenkins.install.runSetupWizard=false"' \
46 | /etc/sysconfig/jenkins
47 | chown -R "${JENKINS_USER_NAME}:${JENKINS_USER_NAME}" "${JENKINS_HOME_PATH}"
48 |
49 |
50 | # starting jenkins for the first time
51 | systemctl start jenkins
52 | sleep 20
53 |
54 |
55 | curl --output ./jenkins-cli.jar \
56 | --location \
57 | --silent \
58 | "http://localhost:${JENKINS_WEB_PORT}/jnlpJars/jenkins-cli.jar"
59 |
60 | java -jar ./jenkins-cli.jar \
61 | -s "http://localhost:${JENKINS_WEB_PORT}/" \
62 | create-job example < ./example-job.xml
63 |
64 | while read plugin; do
65 | java -jar ./jenkins-cli.jar \
66 | -s "http://localhost:${JENKINS_WEB_PORT}/" \
67 | install-plugin "${plugin}"
68 | done < "./plugin-list.txt"
69 |
70 | java -jar ./jenkins-cli.jar \
71 | -s "http://localhost:${JENKINS_WEB_PORT}/" \
72 | restart && sleep 15
73 |
74 |
75 | systemctl stop jenkins && systemctl stop var-lib-jenkins.mount
76 |
--------------------------------------------------------------------------------
/provision/shell/cluster_configuration.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 |
4 | DIR="$( cd "$(dirname "$0")" ; pwd -P )"
5 | cd ${DIR}
6 |
7 |
8 | source ./conf.env
9 |
10 |
11 |
12 | # configure corosync
13 | pcs cluster auth jenkins-master-1 jenkins-master-2 \
14 | -u "${COROSYNC_USER_NAME}" \
15 | -p "${COROSYNC_USER_PASSWORD}"
16 |
17 | # create initial cluster
18 | pcs cluster setup \
19 | --name "jenkins-cluster" \
20 | jenkins-master-1 jenkins-master-2
21 |
22 | pcs cluster start --all
23 | # ensure reboot survival
24 | pcs cluster enable --all
25 |
26 | # NOTE: STONITH / fencing not implemented yet
27 | pcs property set stonith-enabled=false
28 |
29 | # NOTE: disable quorum, this is not needed to fail-over in cold standby mode
30 | pcs property set no-quorum-policy=ignore
31 |
32 | # create GlusterFS storage resource
33 | # NOTE: negative-timeout fixes an issue causing a performance hit of GlusterFS client in certain
34 | # cases (e.g. Jenkins: requests available plugin list). It seems (guess), that Jenkins is too fast
35 | # for GlusterFS when writing files (meaning, create+delete is faster then GlusterFS can process them)
36 | JENKINS_DIR_RESOURCE_NAME="jenkins-master-home-dir--rsc"
37 | pcs resource create "${JENKINS_DIR_RESOURCE_NAME}" ocf:heartbeat:Filesystem \
38 | device="gluster-node-0:/${GLUSTER_BRICK_ID}" \
39 | directory="${JENKINS_HOME_PATH}" \
40 | fstype="glusterfs" \
41 | options=direct-io-mode=disable,negative-timeout=0.1 \
42 | fast_stop="no" force_unmount="safe" \
43 | op stop on-fail=stop timeout=200 \
44 | op monitor on-fail=stop timeout=200 \
45 | OCF_CHECK_LEVEL=10
46 |
47 | # create Jenkins service resource
48 | JENKINS_SERVICE_RESOURCE_NAME="jenkins-master--rsc"
49 | pcs resource create "${JENKINS_SERVICE_RESOURCE_NAME}" systemd:jenkins \
50 | op monitor interval="60s" \
51 | op start interval="45s"
52 |
53 | # Stickyness control how likely a resource is moved, like the cost of the move
54 | pcs resource defaults resource-stickiness=100
55 |
56 | # bind both resources to one another, to make them run on the same host
57 | pcs constraint colocation add "${JENKINS_SERVICE_RESOURCE_NAME}" with "${JENKINS_DIR_RESOURCE_NAME}" INFINITY
58 |
59 | # define order of resources to get started
60 | pcs constraint order "${JENKINS_DIR_RESOURCE_NAME}" then "${JENKINS_SERVICE_RESOURCE_NAME}"
61 |
62 | # prefer host one over the other
63 | pcs constraint location "${JENKINS_SERVICE_RESOURCE_NAME}" prefers jenkins-master-1=50
64 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Jenkins High Availability Example Implementation
2 | ================================================
3 |
4 |
5 | This repository holds an example implementation of an Jenkins HA infrastructure setup solely based
6 | on Open Source components, whose reason of existence and development process is described
7 | [in a blog post](https://endocode.com/blog/2018/08/17/jenkins-high-availability-setup/).
8 |
9 |
10 |
11 | __Prerequisites:__
12 |
13 | + Ruby
14 | + Vagrant
15 | + VirtualBox
16 |
17 |
18 | ### Worth noticing
19 |
20 | 1. This setup may also serve as a simple Jenkins playground on your local machine (provides Linux
21 | and Windows agents)!
22 | 2. It neither implements nor configurates any security measures other then
23 | + private network for all component located behind the load balancer
24 | + enabled firewall and some simple rules
25 | 3. STONITH is not (yet) implemented
26 | 4. cluster only implements [cold-standby](https://www.ibm.com/developerworks/community/blogs/RohitShetty/entry/high_availability_cold_warm_hot) mode
27 | 5. components: HAProxy, GlusterFS, Jenkins, Jenkins Swarm Plugin, Pacemaker, Corosync
28 |
29 |
30 | ### Usage
31 |
32 | #### Installation
33 |
34 | 1. adjust `/conf.env` according to your needs (and available host resources)
35 | 2. `vagrant up`
36 | 3. go to `http[s]:${EXTERNAL_LOAD_BALANCER_IP}:${EXTERNAL_LOAD_BALANCER_PORT}` to visit
37 | Jenkins UI
38 |
39 |
40 | #### Verify cluster state
41 |
42 | *NOTE: must be executed as privileged user*
43 |
44 | ```bash
45 | $ pcs status
46 | $ pcs cluster status
47 | $ pcs status corosync
48 | ```
49 |
50 | #### Playing with the cluster
51 |
52 | To move resources form one node to another (simulate failure), you could stop one node by going into
53 | the node and do `pcs cluster stop $NODE_NAME` (node name defaults to `local`), or maybe change the
54 | configuration for the preferred resource location (e.g. `pcs constraint location jenkins-master--rsc prefers jenkins-master-2=INFINITY`).
55 | Another way would be to just send the active node into standby (`pcs cluster standby jenkins-master-1`)
56 |
57 |
58 | ### Future Work
59 |
60 | #### Approaches to implement STONITH
61 |
62 | 1) the HAProxy instance could play a role in fencing implementation, e.g. by preventing the dead
63 | node from getting traffic.
64 | 2) closing down firewall on all gluster nodes to prevent unwanted access by a *dead* jenkins master
65 | node
66 | 3) depending on availability, using cloud provider's API to shutdown a jenkins master node in
67 | question
68 | 4) ...?
69 |
70 | Both approaches require to write a fence agent from scratch, see
71 | + https://github.com/ClusterLabs/fence-agents
72 | + https://docs.pagure.org/ClusterLabs.fence-agents/FenceAgentAPI.md
73 | + `/usr/share/fence/`
74 |
75 |
76 | #### Configuration Management
77 |
78 | The entire provisioning and system setup is implemented with scripts (bash, batch). Today of course,
79 | this is done in a more declarative way, that would also ensure idempotency. Since this examples is
80 | based on centos7 it would only make sense to use *Ansible* here. But, any technology
81 | [supported by vagrant](https://www.vagrantup.com/docs/provisioning/) would suffice.
82 |
83 |
84 | ### Resources
85 |
86 | + [Blog post on this topic including a more detailed explanation](https://endocode.com/blog/2018/08/17/jenkins-high-availability-setup/)
87 | + [A cluster from scratch powered by pacemaker](http://clusterlabs.org/pacemaker/doc/en-US/Pacemaker/1.1/html/Clusters_from_Scratch/index.html)
88 | + [CRM Fencing](https://clusterlabs.org/pacemaker/doc/crm_fencing.html)
--------------------------------------------------------------------------------
/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | require 'open3'
5 |
6 |
7 | DIR = File.dirname(__FILE__)
8 |
9 |
10 | PROVISIONER = 'shell'
11 | PROVISION_ROOT_PATH = "#{DIR}/provision"
12 | REMOTE_SOURCE_PATH_LINUX = '/tmp/provision'
13 | REMOTE_SOURCE_PATH_WIN = 'C:\\tmp\\provision'
14 | MOUNT_PATH_LINUX = '/mnt/host'
15 | MOUNT_PATH_WIN = 'C:\\host'
16 | COMMON_PROVISION_SCRIPTS_LINUX = [ 'common-dependencies_centos.sh' ]
17 | COMMON_PROVISION_SCRIPTS_WIN = [ 'common-dependencies_win.bat' ]
18 |
19 |
20 | load "#{DIR}/conf.env"
21 |
22 |
23 | Vagrant.configure('2') do |config|
24 |
25 | if Vagrant.has_plugin?('vagrant-cachier')
26 | config.cache.scope = :machine
27 | end
28 |
29 |
30 | config.vm.box = 'centos/7'
31 | config.vm.box_version = '1902.01'
32 |
33 |
34 | dhcpServerIP = "#{PRIVATE_NETWORK_SLASH24_PREFIX}.1"
35 | internalNetworkName = "jenkins-ha-setup"
36 | cliArgs = [
37 | '--netname', internalNetworkName,
38 | '--ip', dhcpServerIP,
39 | '--netmask', '255.255.255.0',
40 | '--lowerip', "#{PRIVATE_NETWORK_SLASH24_PREFIX}.100",
41 | '--upperip', "#{PRIVATE_NETWORK_SLASH24_PREFIX}.200",
42 | '--enable'
43 | ]
44 |
45 | if ARGV[0] == 'up'
46 | stdout, stderr, status = Open3.capture3(
47 | [ 'vboxmanage', 'dhcpserver', 'add' ].concat(cliArgs).join(' ')
48 | )
49 |
50 | if stderr.length > 0
51 | puts("NOTE: DHCP server for network: #{internalNetworkName} already exists. Overwriting configurations...")
52 | stdout, stderr, status = Open3.capture3(
53 | [ 'vboxmanage', 'dhcpserver', 'modify' ].concat(cliArgs).join(' ')
54 | )
55 | end
56 | end
57 |
58 |
59 | ADDITIONAL_GLUSTER_NODES_INDEX = "#{MOUNT_PATH_LINUX}/gluster-nodes-list.txt"
60 | ADDITIONAL_GLUSTER_NODES > 0 && (1..ADDITIONAL_GLUSTER_NODES).each do |i|
61 | config.vm.define "gluster-node-#{i}" do |node|
62 |
63 | hostname = "gluster-node-#{i}"
64 |
65 | provision_scripts = Array.new(COMMON_PROVISION_SCRIPTS_LINUX).push(
66 | 'resolve.sh',
67 | 'gluster-node.sh',
68 | "gluster-additional-node_configuration.sh #{ADDITIONAL_GLUSTER_NODES_INDEX}"
69 | )
70 | asset_files = [
71 | './../conf.env'
72 | ]
73 |
74 | # node.vm.box = 'centos/7'
75 |
76 | node.vm.provider 'virtualbox' do |vb|
77 | vb.name = "#{PREFIX}_#{hostname}"
78 | vb.memory = 1024
79 | vb.cpus = 1
80 | end
81 |
82 | node.vm.hostname = hostname
83 | node.vm.provision 'shell', inline: 'sed -i -e "/^127.0.0.1\\t"$(hostname)"/d" /etc/hosts', privileged: true
84 | node.vm.network 'private_network',
85 | type: 'dhcp',
86 | virtualbox__intnet: internalNetworkName,
87 | :adapter => 2
88 |
89 | node.vm.synced_folder "#{DIR}", '/vagrant', disabled: true
90 | node.vm.synced_folder PROVISION_ROOT_PATH, MOUNT_PATH_LINUX, type: 'virtualbox'
91 |
92 | asset_files.each do |relFilePath|
93 | filename = File.basename(relFilePath)
94 | node.vm.provision 'file',
95 | source: "#{PROVISION_ROOT_PATH}/#{relFilePath}",
96 | destination: "#{REMOTE_SOURCE_PATH_LINUX}/#{filename}"
97 | end
98 | provision_scripts.each do |script|
99 | filename = script.split(' ').first
100 | node.vm.provision 'file',
101 | source: "#{PROVISION_ROOT_PATH}/#{PROVISIONER}/#{filename}",
102 | destination: "#{REMOTE_SOURCE_PATH_LINUX}/#{filename}"
103 | end
104 |
105 | provision_scripts.each do |script|
106 | node.vm.provision PROVISIONER,
107 | inline: "#{REMOTE_SOURCE_PATH_LINUX}/#{script}",
108 | privileged: true
109 | end
110 |
111 | end
112 | end
113 |
114 |
115 | config.vm.define "gluster-node-0" do |node|
116 |
117 | ipSuffix = 21
118 | instance_ip = "#{PRIVATE_NETWORK_SLASH24_PREFIX}.#{ipSuffix}"
119 | hostname = "gluster-node-0"
120 |
121 | provision_scripts = Array.new(COMMON_PROVISION_SCRIPTS_LINUX).push(
122 | 'resolve.sh',
123 | 'gluster-node.sh',
124 | "gluster-initial-node_configuration.sh #{ADDITIONAL_GLUSTER_NODES_INDEX}"
125 | )
126 | asset_files = [
127 | './../conf.env'
128 | ]
129 |
130 | # node.vm.box = 'centos/7'
131 |
132 | node.vm.provider 'virtualbox' do |vb|
133 | vb.name = "#{PREFIX}_#{hostname}"
134 | vb.memory = 1024
135 | vb.cpus = 1
136 | end
137 |
138 | node.vm.hostname = hostname
139 | node.vm.provision 'shell', inline: 'sed -i -e "/^127.0.0.1\\t"$(hostname)"/d" /etc/hosts', privileged: true
140 | node.vm.network 'private_network',
141 | ip: instance_ip,
142 | virtualbox__intnet: internalNetworkName,
143 | :adapter => 2
144 |
145 | node.vm.synced_folder "#{DIR}", '/vagrant', disabled: true
146 | node.vm.synced_folder PROVISION_ROOT_PATH, MOUNT_PATH_LINUX, type: 'virtualbox'
147 |
148 | asset_files.each do |relFilePath|
149 | filename = File.basename(relFilePath)
150 | node.vm.provision 'file',
151 | source: "#{PROVISION_ROOT_PATH}/#{relFilePath}",
152 | destination: "#{REMOTE_SOURCE_PATH_LINUX}/#{filename}"
153 | end
154 | provision_scripts.each do |script|
155 | filename = script.split(' ').first
156 | node.vm.provision 'file',
157 | source: "#{PROVISION_ROOT_PATH}/#{PROVISIONER}/#{filename}",
158 | destination: "#{REMOTE_SOURCE_PATH_LINUX}/#{filename}"
159 | end
160 |
161 | provision_scripts.each do |script|
162 | node.vm.provision PROVISIONER,
163 | inline: "#{REMOTE_SOURCE_PATH_LINUX}/#{script}",
164 | privileged: true
165 | end
166 |
167 | end
168 |
169 |
170 | JENKINS_MASTERS = [ 1, 2 ]
171 | JENKINS_MASTERS.each do |i|
172 | config.vm.define "jenkins-master-#{i}" do |node|
173 |
174 | ipSuffix = 30 + i
175 | instance_ip = "#{PRIVATE_NETWORK_SLASH24_PREFIX}.#{ipSuffix}"
176 | hostname = "jenkins-master-#{i}"
177 |
178 | provision_scripts = Array.new(COMMON_PROVISION_SCRIPTS_LINUX).push(
179 | 'resolve.sh',
180 | 'jenkins-master.sh',
181 | 'cluster.sh'
182 | )
183 | if i == 1
184 | provision_scripts.push( 'jenkins-master_configuration.sh' )
185 | elsif i == JENKINS_MASTERS.length
186 | provision_scripts.push( 'cluster_configuration.sh' )
187 | end
188 | asset_files = [
189 | './../conf.env',
190 | './../jenkins/config.xml_template',
191 | './../jenkins/jenkins.model.JenkinsLocationConfiguration.xml_template',
192 | './../jenkins/example-job.xml',
193 | './../jenkins/plugin-list.txt',
194 | './../jenkins/var-lib-jenkins.mount'
195 | ]
196 |
197 | # node.vm.box = 'centos/7'
198 |
199 | node.vm.provider 'virtualbox' do |vb|
200 | vb.name = "#{PREFIX}_jenkins-master-#{i}"
201 | vb.memory = 1024
202 | vb.cpus = 1
203 | end
204 |
205 | node.vm.hostname = hostname
206 | # NOTE: when setting a hostname with vagrant, it also makes the system resolving its
207 | # hostname to localhost, which gets reverted with this statement
208 | node.vm.provision 'shell', inline: 'sed -i -e "/^127.0.0.1\\t"$(hostname)"/d" /etc/hosts', privileged: true
209 | node.vm.network 'private_network',
210 | ip: instance_ip,
211 | virtualbox__intnet: internalNetworkName,
212 | :adapter => 2
213 |
214 | node.vm.synced_folder "#{DIR}", '/vagrant', disabled: true
215 | node.vm.synced_folder PROVISION_ROOT_PATH, MOUNT_PATH_LINUX, type: 'virtualbox'
216 |
217 | asset_files.each do |relFilePath|
218 | filename = File.basename(relFilePath)
219 | node.vm.provision 'file',
220 | source: "#{PROVISION_ROOT_PATH}/#{relFilePath}",
221 | destination: "#{REMOTE_SOURCE_PATH_LINUX}/#{filename}"
222 | end
223 | provision_scripts.each do |script|
224 | filename = script.split(' ').first
225 | node.vm.provision 'file',
226 | source: "#{PROVISION_ROOT_PATH}/#{PROVISIONER}/#{filename}",
227 | destination: "#{REMOTE_SOURCE_PATH_LINUX}/#{filename}"
228 | end
229 |
230 | provision_scripts.each do |script|
231 | node.vm.provision PROVISIONER,
232 | inline: "#{REMOTE_SOURCE_PATH_LINUX}/#{script}",
233 | privileged: true
234 | end
235 | end
236 | end
237 |
238 |
239 | if CREATE_LOAD_BALANCER
240 | config.vm.define "load-balancer" do |node|
241 |
242 | ipSuffix = 11
243 | instance_ip = "#{PRIVATE_NETWORK_SLASH24_PREFIX}.#{ipSuffix}"
244 | hostname = "load-balancer"
245 |
246 | provision_scripts = Array.new(COMMON_PROVISION_SCRIPTS_LINUX).push(
247 | 'resolve.sh',
248 | 'haproxy.sh',
249 | 'haproxy_configuration.sh'
250 | )
251 | asset_files = [
252 | './../conf.env',
253 | './../haproxy/haproxy.cfg_template',
254 | './../haproxy/haproxy_template',
255 | ]
256 |
257 | # node.vm.box = 'centos/7'
258 |
259 | node.vm.provider "virtualbox" do |vb|
260 | vb.name = "#{PREFIX}_load-balancer"
261 | vb.memory = 512
262 | vb.cpus = 1
263 | end
264 |
265 | node.vm.hostname = hostname
266 | node.vm.provision 'shell', inline: 'sed -i -e "/^127.0.0.1\\t"$(hostname)"/d" /etc/hosts', privileged: true
267 | node.vm.network 'private_network',
268 | ip: instance_ip,
269 | virtualbox__intnet: internalNetworkName,
270 | :adapter => 2
271 | if EXTERNAL_LOAD_BALANCER_IP
272 | node.vm.network "private_network",
273 | ip: "#{EXTERNAL_LOAD_BALANCER_IP}",
274 | :adapter => 3
275 | node.vm.network "forwarded_port",
276 | guest: "#{EXTERNAL_LOAD_BALANCER_PORT}",
277 | host: "#{EXTERNAL_LOAD_BALANCER_PORT}",
278 | :adapter => 3
279 | end
280 |
281 | node.vm.synced_folder "#{DIR}", "/vagrant", disabled: true
282 | node.vm.synced_folder PROVISION_ROOT_PATH, MOUNT_PATH_LINUX, type: "virtualbox"
283 |
284 | asset_files.each do |relFilePath|
285 | filename = File.basename(relFilePath)
286 | node.vm.provision 'file',
287 | source: "#{PROVISION_ROOT_PATH}/#{relFilePath}",
288 | destination: "#{REMOTE_SOURCE_PATH_LINUX}/#{filename}"
289 | end
290 | provision_scripts.each do |script|
291 | filename = script.split(' ').first
292 | node.vm.provision 'file',
293 | source: "#{PROVISION_ROOT_PATH}/#{PROVISIONER}/#{filename}",
294 | destination: "#{REMOTE_SOURCE_PATH_LINUX}/#{filename}"
295 | end
296 |
297 | provision_scripts.each do |script|
298 | node.vm.provision PROVISIONER,
299 | inline: "#{REMOTE_SOURCE_PATH_LINUX}/#{script}",
300 | privileged: true
301 | end
302 |
303 | end
304 | end
305 |
306 |
307 | LINUX_AGENTS > 0 && (1..LINUX_AGENTS).each do |i|
308 | config.vm.define "jenkins-agent-linux-#{i}" do |node|
309 |
310 | hostname = "jenkins-agent-linux-#{i}"
311 |
312 | provision_scripts = Array.new(COMMON_PROVISION_SCRIPTS_LINUX).push(
313 | 'jenkins-agent_linux.sh'
314 | )
315 | asset_files = [
316 | './../conf.env',
317 | './../jenkins/jenkins-swarm.service'
318 | ]
319 |
320 | # node.vm.box = 'centos/7'
321 |
322 | node.vm.provider 'virtualbox' do |vb|
323 | vb.name = "#{PREFIX}_jenkins-agent-linux-#{i}"
324 | vb.memory = 1024
325 | vb.cpus = 1
326 | end
327 |
328 | node.vm.hostname = hostname
329 | node.vm.provision 'shell', inline: 'sed -i -e "/^127.0.0.1\\t"$(hostname)"/d" /etc/hosts', privileged: true
330 | node.vm.network 'private_network',
331 | type: 'dhcp',
332 | virtualbox__intnet: internalNetworkName,
333 | :adapter => 2
334 |
335 | node.vm.synced_folder "#{DIR}", '/vagrant', disabled: true
336 | node.vm.synced_folder PROVISION_ROOT_PATH, MOUNT_PATH_LINUX, type: 'virtualbox'
337 |
338 | asset_files.each do |relFilePath|
339 | filename = File.basename(relFilePath)
340 | node.vm.provision 'file',
341 | source: "#{PROVISION_ROOT_PATH}/#{relFilePath}",
342 | destination: "#{REMOTE_SOURCE_PATH_LINUX}/#{filename}"
343 | end
344 | provision_scripts.each do |script|
345 | filename = script.split(' ').first
346 | node.vm.provision 'file',
347 | source: "#{PROVISION_ROOT_PATH}/#{PROVISIONER}/#{filename}",
348 | destination: "#{REMOTE_SOURCE_PATH_LINUX}/#{filename}"
349 | end
350 |
351 | provision_scripts.each do |script|
352 | node.vm.provision PROVISIONER,
353 | inline: "#{REMOTE_SOURCE_PATH_LINUX}/#{script}",
354 | privileged: true
355 | end
356 | end
357 | end
358 |
359 |
360 | WINDOWS_AGENTS > 0 && (1..WINDOWS_AGENTS).each do |i|
361 | config.vm.define "jenkins-agent-win-#{i}" do |node|
362 |
363 | hostname = "jenkins-agent-win-#{i}"
364 |
365 | provision_scripts = Array.new(COMMON_PROVISION_SCRIPTS_WIN).push(
366 | {
367 | :filename => 'jenkins-agent_win.bat',
368 | :args => [ PRIVATE_NETWORK_SLASH24_PREFIX ]
369 | }
370 | )
371 | asset_files = [
372 | './../conf.env',
373 | './assets/win/secconfig.cfg',
374 | './../jenkins/jenkins-swarm-service.bat_template'
375 | ]
376 |
377 | node.vm.box = 'opentable/win-2012r2-standard-amd64-nocm'
378 | config.vm.box_version = nil
379 |
380 | node.vm.communicator = 'winrm'
381 |
382 | node.vm.provider 'virtualbox' do |vb|
383 | vb.name = "#{PREFIX}_jenkins-agent-win-#{i}"
384 | vb.cpus = 1
385 | vb.memory = 2048
386 | vb.customize ['modifyvm', :id, '--memory', '2048']
387 | vb.customize ['modifyvm', :id, '--vram', '16']
388 | vb.gui = ! START_WINDOWS_AGENTS_HEADLESS
389 | end
390 |
391 | # Dont know yet if necessary
392 | # node.vm.communicator = "winrm"
393 |
394 | node.vm.hostname = hostname
395 | node.vm.network 'private_network',
396 | type: 'dhcp',
397 | :adapter => 2
398 | node.vm.provider 'virtualbox' do |vb|
399 | vb.customize [ 'modifyvm', :id, '--nic2', 'intnet' ]
400 | vb.customize [ 'modifyvm', :id, '--nictype2', '82540EM' ]
401 | vb.customize [ 'modifyvm', :id, '--intnet2', internalNetworkName ]
402 | vb.customize [ 'modifyvm', :id, '--cableconnected2', 'on' ]
403 | end
404 | node.vm.network 'forwarded_port', guest: 3389, host: 3389
405 | node.windows.set_work_network = true
406 |
407 | node.vm.synced_folder "#{DIR}", '/vagrant', disabled: true
408 | node.vm.synced_folder PROVISION_ROOT_PATH, MOUNT_PATH_WIN, type: 'virtualbox'
409 |
410 | asset_files.each do |relFilePath|
411 | filename = File.basename(relFilePath)
412 | node.vm.provision 'file',
413 | source: "#{PROVISION_ROOT_PATH}/#{relFilePath}",
414 | destination: "#{REMOTE_SOURCE_PATH_WIN}/#{filename}"
415 | end
416 |
417 | # convert conf.env and make it work with windows
418 | node.vm.provision 'shell',
419 | path: "#{PROVISION_ROOT_PATH}/assets/win/convert-configuration-file_win.ps1",
420 | privileged: true,
421 | args: [
422 | "#{REMOTE_SOURCE_PATH_WIN}\\conf.env"
423 | ]
424 |
425 | # # NOTE: install chocolatey
426 | node.vm.provision 'shell',
427 | privileged: true,
428 | inline: "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))"
429 |
430 | provision_scripts.each do |script|
431 | if script.is_a? String
432 | filename = script
433 | arguments = []
434 | elsif script.is_a? Object
435 | filename = script[:filename]
436 | arguments = script[:arguments] || []
437 | end
438 | node.vm.provision PROVISIONER,
439 | path: "#{PROVISION_ROOT_PATH}/#{PROVISIONER}/#{filename}",
440 | upload_path: "#{REMOTE_SOURCE_PATH_WIN}\\#{filename}",
441 | privileged: true,
442 | args: arguments
443 | end
444 |
445 | end
446 | end
447 |
448 | end
449 |
--------------------------------------------------------------------------------