├── aws ├── Vagrantfile ├── add-project.sh └── bootstrap.sh ├── centos-launcher ├── Vagrantfile ├── add-project.sh └── bootstrap.sh ├── centos-postgressql ├── Vagrantfile └── bootstrap.sh ├── centos-rpm ├── Vagrantfile ├── add-project.sh ├── bootstrap.sh ├── include.sh └── run-test.sh ├── centos-yum-ldap ├── Vagrantfile ├── default.ldif ├── jaas-loginmodule.conf ├── provisioning │ ├── functions.sh │ ├── install-openldap.sh │ └── install-rundeck.sh └── slapd.conf ├── centos-yum ├── Vagrantfile ├── add-project.sh └── bootstrap.sh ├── jenkins ├── Vagrantfile └── provisioning │ ├── functions.sh │ ├── install-jenkins.sh │ ├── jenkins │ ├── org.jenkinsci.plugins.rundeck.RundeckNotifier.xml │ └── simple.xml │ └── simple-1.0.0.war ├── nexus-plugin ├── Vagrantfile └── provisioning │ └── install-nexus.sh ├── primary-secondary-failover ├── README.md ├── Vagrantfile ├── add-primary.sh ├── add-project.sh ├── failover │ ├── check.sh │ ├── do-switch.sh │ ├── syncme.sh │ ├── takeover-notification.cgi │ ├── takeover.sh │ ├── update-jobs.sh │ └── update-resources.sh ├── generate-apikey.sh ├── install-httpd.sh ├── install-mysql.sh ├── install-rundeck.sh ├── jobs │ └── jobs.xml ├── load-jobs.sh ├── ssh-copy-id.expect └── templates │ └── apitoken.aclpolicy ├── tomcat6-mysql ├── README.md ├── Vagrantfile ├── add-project.sh ├── generate-apikey.sh ├── install-mysql.sh ├── install-rundeck.sh ├── server.xml └── tomcat-users.xml └── ubuntu ├── Vagrantfile ├── add-project.sh └── bootstrap.sh /aws/Vagrantfile: -------------------------------------------------------------------------------- 1 | 2 | Vagrant.configure("2") do |config| 3 | 4 | config.vm.box = "dummy" 5 | 6 | config.vm.network :forwarded_port, guest: 4440, host: 14440 7 | 8 | config.vm.provision :shell, :path => "bootstrap.sh" 9 | config.vm.provision :shell, :path => "add-project.sh" 10 | 11 | config.vm.provider :aws do |aws, override| 12 | aws.access_key_id = "AKIAJEGKRJGKJOCT4FLQ" 13 | aws.secret_access_key = "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" 14 | aws.keypair_name = "aws-vagrant-test" 15 | 16 | aws.security_groups = "vagrant-test" 17 | 18 | aws.ami = "ami-7747d01e" 19 | 20 | 21 | override.ssh.username = "ubuntu" 22 | override.ssh.private_key_path = "/Users/alexh/Development/rundeck-workspace/rundeck-vagrant/aws/aws-vagrant-test.pem" 23 | end 24 | end 25 | -------------------------------------------------------------------------------- /aws/add-project.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | die() { 4 | [[ $# -gt 1 ]] && { 5 | exit_status=$1 6 | shift 7 | } 8 | local -i frame=0; local info= 9 | while info=$(caller $frame) 10 | do 11 | local -a f=( $info ) 12 | [[ $frame -gt 0 ]] && { 13 | printf >&2 "ERROR in \"%s\" %s:%s\n" "${f[1]}" "${f[2]}" "${f[0]}" 14 | } 15 | (( frame++ )) || :; #ignore increment errors (i.e., errexit is set) 16 | done 17 | 18 | printf >&2 "ERROR: $*\n" 19 | 20 | exit ${exit_status:-1} 21 | } 22 | 23 | trap 'die $? "*** add-project failed. ***"' ERR 24 | set -o nounset -o pipefail 25 | 26 | # Create an example project and 27 | dir=$(rd-project -a create -p example |awk -F: '{print $2}') 28 | 29 | 30 | # Run simple commands to double check. 31 | # Print out the available nodes. 32 | # Fire off a command. 33 | dispatch -p example 34 | dispatch -p example -f -- whoami 35 | 36 | -------------------------------------------------------------------------------- /aws/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | die() { 4 | [[ $# -gt 1 ]] && { 5 | exit_status=$1 6 | shift 7 | } 8 | local -i frame=0; local info= 9 | while info=$(caller $frame) 10 | do 11 | local -a f=( $info ) 12 | [[ $frame -gt 0 ]] && { 13 | printf >&2 "ERROR in \"%s\" %s:%s\n" "${f[1]}" "${f[2]}" "${f[0]}" 14 | } 15 | (( frame++ )) || :; #ignore increment errors (i.e., errexit is set) 16 | done 17 | 18 | printf >&2 "ERROR: $*\n" 19 | 20 | exit ${exit_status:-1} 21 | } 22 | 23 | #trap 'die $? "*** bootstrap failed. ***"' ERR 24 | 25 | set -o nounset -o pipefail 26 | 27 | apt-get update 28 | # Install the JRE 29 | apt-get -y install openjdk-6-jre 30 | 31 | # Install Rundeck core 32 | 33 | mkdir -p /var/log/vagrant 34 | curl -sf http://download.rundeck.org/deb/rundeck-1.5-1-GA.deb -o /var/log/vagrant/rundeck-1.5-1-GA.deb 35 | 36 | dpkg -i /var/log/vagrant/rundeck-1.5-1-GA.deb 37 | sleep 10 38 | 39 | # Start up rundeck 40 | mkdir -p /var/log/vagrant 41 | if ! /etc/init.d/rundeckd status 42 | then 43 | ( 44 | exec 0>&- # close stdin 45 | /etc/init.d/rundeckd start 46 | ) &> /var/log/vagrant/bootstrap.log # redirect stdout/err to a log. 47 | 48 | let count=0 49 | while true 50 | do 51 | if ! grep "Started SocketConnector@" /var/log/vagrant/bootstrap.log 52 | then printf >&2 ".";# progress output. 53 | else break; # successful message. 54 | fi 55 | let count=$count+1;# increment attempts 56 | [ $count -eq 18 ] && { 57 | echo >&2 "FAIL: Execeeded max attemps " 58 | exit 1 59 | } 60 | sleep 10 61 | done 62 | fi 63 | 64 | -------------------------------------------------------------------------------- /centos-launcher/Vagrantfile: -------------------------------------------------------------------------------- 1 | 2 | Vagrant.configure("2") do |config| 3 | 4 | config.vm.box = "CentOS-6.3-x86_64-minimal" 5 | 6 | config.vm.network :forwarded_port, guest: 4440, host: 14440 7 | 8 | config.vm.provision :shell, :path => "bootstrap.sh" 9 | config.vm.provision :shell, :path => "add-project.sh" 10 | 11 | end 12 | -------------------------------------------------------------------------------- /centos-launcher/add-project.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export RDECK_BASE=/root/rundeck; export RDECK_HOME=$RDECK_BASE 4 | PATH=$RDECK_BASE/tools/bin:$PATH 5 | die() { 6 | [[ $# -gt 1 ]] && { 7 | exit_status=$1 8 | shift 9 | } 10 | printf >&2 "ERROR: $*\n" 11 | exit ${exit_status:-1} 12 | } 13 | 14 | trap 'die $? "*** add-project failed. ***"' ERR 15 | set -o nounset -o pipefail 16 | 17 | # Create an example project 18 | rd-project -a create -p example 19 | 20 | 21 | # Run simple commands to double check. 22 | # Print out the available nodes. 23 | # Fire off a command. 24 | dispatch -p example 25 | dispatch -p example -f -- whoami 26 | 27 | -------------------------------------------------------------------------------- /centos-launcher/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | # Get the launcher jar 5 | #LAUNCHER_JAR=http://download.rundeck.org/jar/rundeck-launcher-1.5.jar 6 | LAUNCHER_JAR=http://build.rundeck.org/job/candidate-1.5.1/lastSuccessfulBuild/artifact/rundeck-launcher/launcher/build/libs/rundeck-launcher-1.5.1.jar 7 | 8 | 9 | 10 | die() { 11 | [[ $# -gt 1 ]] && { 12 | exit_status=$1 13 | shift 14 | } 15 | printf >&2 "ERROR: $*\n" 16 | exit ${exit_status:-1} 17 | } 18 | 19 | #trap 'die $? "*** bootstrap failed. ***"' ERR 20 | 21 | set -o nounset -o pipefail 22 | 23 | 24 | # Install the JRE 25 | yum -y install java-1.6.0 26 | echo "Java installed." 27 | # Install rundeck 28 | export RDECK_BASE=$HOME/rundeck; export RDECK_HOME=$RDECK_BASE 29 | mkdir -p $RDECK_BASE 30 | echo "Created $RDECK_BASE" 31 | 32 | curl -s --fail $LAUNCHER_JAR -o $RDECK_BASE/rundeck-launcher.jar -z $RDECK_BASE/rundeck-launcher.jar 33 | echo "Launcher download." 34 | 35 | cd $RDECK_BASE 36 | java -jar ./rundeck-launcher.jar --installonly 37 | echo "Installed rundeck." 38 | java_exe=$(readlink /etc/alternatives/java) 39 | export JAVA_HOME="${java_exe%bin/*}" 40 | $RDECK_BASE/tools/bin/rd-setup -n localhost 41 | echo "Configured rundeck" 42 | 43 | # Disable the firewall so we can easily access it from the host 44 | service iptables stop 45 | 46 | # Start up rundeck 47 | if ! $RDECK_BASE/server/sbin/rundeckd status 48 | then :; 49 | else $RDECK_BASE/server/sbin/rundeckd stop 50 | fi 51 | 52 | echo "Starting rundeck.." 53 | ( 54 | exec 0>&- # close stdin 55 | $RDECK_BASE/server/sbin/rundeckd start 56 | ) &> $RDECK_BASE/var/log/service.log # redirect stdout/err to the log. 57 | 58 | let count=0 59 | while true 60 | do 61 | if ! grep "Started SocketConnector@" $RDECK_BASE/var/log/service.log 62 | then printf >&2 ".";# progress output. 63 | else break; # matched success message. 64 | fi 65 | let count=$count+1;# increment attempts 66 | [ $count -eq 18 ] && { 67 | echo >&2 "FAIL: Execeeded max attemps " 68 | exit 1 69 | } 70 | sleep 10 71 | done 72 | 73 | 74 | -------------------------------------------------------------------------------- /centos-postgressql/Vagrantfile: -------------------------------------------------------------------------------- 1 | 2 | Vagrant.configure("2") do |config| 3 | 4 | config.vm.box = "CentOS-6.3-x86_64-minimal" 5 | 6 | config.vm.network :private_network, ip: "192.168.50.20" 7 | 8 | config.vm.provision :shell, :path => "bootstrap.sh", :args => "192.168.50.20" 9 | 10 | end 11 | 12 | -------------------------------------------------------------------------------- /centos-postgressql/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu 3 | 4 | RDECK_IP=$1 5 | 6 | # Install Postgres 7 | # 8 | cat >> /etc/yum.repos.d/CentOS-Base.repo < /var/lib/pgsql/9.4/data/pg_hba.conf.new 22 | 23 | cat >>/var/lib/pgsql/9.4/data/pg_hba.conf.new < /tmp/createdb.sql < /etc/rundeck/rundeck-config.properties.new 53 | cat >> /etc/rundeck/rundeck-config.properties < "bootstrap.sh" 9 | config.vm.provision :shell, :path => "add-project.sh" 10 | 11 | end 12 | -------------------------------------------------------------------------------- /centos-rpm/add-project.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | source $(dirname $0)/include.sh 5 | 6 | trap 'die $? "*** add-project failed. ***"' ERR 7 | set -o nounset -o pipefail 8 | 9 | # Create an example project 10 | rd-project -a create -p example 11 | 12 | 13 | # Run simple commands to double check. 14 | # Print out the available nodes. 15 | # Fire off a command. 16 | dispatch -p example -v 17 | dispatch -p example -f -- whoami 18 | 19 | -------------------------------------------------------------------------------- /centos-rpm/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | # The packages 5 | RUNDECK_RPM=http://build.rundeck.org/job/candidate-1.5.1/lastSuccessfulBuild/artifact/packaging/rpmdist/RPMS/noarch/rundeck-1.5.1-1.2.GA.noarch.rpm 6 | RUNDECK_CFG=http://build.rundeck.org/job/candidate-1.5.1/lastSuccessfulBuild/artifact/packaging/rpmdist/RPMS/noarch/rundeck-config-1.5.1-1.2.GA.noarch.rpm 7 | 8 | source $(dirname $0)/include.sh 9 | 10 | 11 | #trap 'die $? "*** bootstrap failed. ***"' ERR 12 | 13 | set -o nounset -o pipefail 14 | 15 | 16 | # Install the JRE via yum 17 | yum -y install java-1.6.0 18 | echo "Java installed." 19 | 20 | 21 | curl -# --fail $RUNDECK_RPM -o rundeck.rpm -z rundeck.rpm 22 | curl -# --fail $RUNDECK_CFG -o rundeck-cfg.rpm -z rundeck-cfg.rpm 23 | 24 | echo "RPM downloaded." 25 | 26 | 27 | rpm -i rundeck.rpm --nodeps 28 | rpm -i rundeck-cfg.rpm --nodeps 29 | 30 | echo "Installed rundeck." 31 | 32 | 33 | # Disable the firewall so we can easily access it from the host 34 | service iptables stop 35 | 36 | # Start up rundeck 37 | if ! /etc/init.d/rundeckd status 38 | then :; 39 | else /etc/init.d/rundeckd stop 40 | fi 41 | 42 | echo "Starting rundeck.." 43 | ( 44 | exec 0>&- # close stdin 45 | /etc/init.d/rundeckd start 46 | ) &> /var/log/rundeck/service.log # redirect stdout/err to the log. 47 | 48 | let count=0 49 | while true 50 | do 51 | if ! grep "Started SocketConnector@" /var/log/rundeck/service.log 52 | then printf >&2 ".";# progress output. 53 | else break; # matched success message. 54 | fi 55 | let count=$count+1;# increment attempts 56 | [ $count -eq 18 ] && { 57 | echo >&2 "FAIL: Execeeded max attemps " 58 | exit 1 59 | } 60 | sleep 10 61 | done 62 | 63 | 64 | -------------------------------------------------------------------------------- /centos-rpm/include.sh: -------------------------------------------------------------------------------- 1 | 2 | die() { 3 | [[ $# -gt 1 ]] && { 4 | exit_status=$1 5 | shift 6 | } 7 | printf >&2 "ERROR: $*\n" 8 | exit ${exit_status:-1} 9 | } 10 | -------------------------------------------------------------------------------- /centos-rpm/run-test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $(dirname $0)/include.sh 6 | 7 | # Get the git client installed. 8 | yum -y install git 9 | 10 | if [ ! -d rundeck ] 11 | then git clone git://github.com/dtolabs/rundeck.git 12 | else git pull 13 | fi 14 | 15 | cd rundeck 16 | bash test/test.sh -------------------------------------------------------------------------------- /centos-yum-ldap/Vagrantfile: -------------------------------------------------------------------------------- 1 | 2 | Vagrant.configure("2") do |config| 3 | 4 | #RUNDECK_YUM_REPO="https://bintray.com/rundeck/candidate-rpm/rpm" 5 | RUNDECK_YUM_REPO="https://bintray.com/gschueler/ci-rundeck2-rpm/rpm" 6 | RUNDECK_IP="192.168.50.4" 7 | NAME="rundeck" 8 | 9 | config.vm.box = "CentOS-6.3-x86_64-minimal" 10 | config.vm.box_url = "https://dl.dropbox.com/u/7225008/Vagrant/CentOS-6.3-x86_64-minimal.box" 11 | 12 | config.vm.network :forwarded_port, guest: 4440, host: 14440 13 | config.vm.network :forwarded_port, guest: 389, host: 3890 14 | 15 | config.vm.network :private_network, ip: "#{RUNDECK_IP}" 16 | 17 | config.vm.provision :shell, :path => "provisioning/install-openldap.sh" 18 | config.vm.provision :shell, :path => "provisioning/install-rundeck.sh", :args => "#{NAME} #{RUNDECK_IP} #{RUNDECK_YUM_REPO}" 19 | 20 | end 21 | -------------------------------------------------------------------------------- /centos-yum-ldap/default.ldif: -------------------------------------------------------------------------------- 1 | # Define top-level entry: 2 | dn: dc=example,dc=com 3 | objectClass: dcObject 4 | objectClass: organization 5 | o: Example, Inc. 6 | dc: example 7 | 8 | # Define an entry to contain users: 9 | dn: ou=users,dc=example,dc=com 10 | objectClass: organizationalUnit 11 | ou: users 12 | 13 | # Define some users: 14 | dn: cn=admin,ou=users,dc=example,dc=com 15 | userPassword: admin 16 | objectClass: person 17 | sn: The admin account for the Example client to use 18 | cn: admin 19 | 20 | dn: cn=build,ou=users,dc=example,dc=com 21 | userPassword: build 22 | objectClass: person 23 | sn: The account to use to demonstrate managing builds only 24 | cn: build 25 | 26 | dn: cn=deploy,ou=users,dc=example,dc=com 27 | userPassword: deploy 28 | objectClass: person 29 | sn: The account to use to demonstrate managing deployment only 30 | cn: deploy 31 | 32 | # Define an entry to contain roles: 33 | dn: ou=roles,dc=example,dc=com 34 | objectClass: organizationalUnit 35 | ou: roles 36 | 37 | # Define some roles and their membership: 38 | dn: cn=architect,ou=roles,dc=example,dc=com 39 | objectClass: groupOfUniqueNames 40 | uniqueMember: cn=admin,ou=users,dc=example,dc=com 41 | cn: architect 42 | 43 | dn: cn=admin,ou=roles,dc=example,dc=com 44 | objectClass: groupOfUniqueNames 45 | uniqueMember: cn=admin,ou=users,dc=example,dc=com 46 | cn: admin 47 | 48 | dn: cn=user,ou=roles,dc=example,dc=com 49 | objectClass: groupOfUniqueNames 50 | uniqueMember: cn=admin,ou=users,dc=example,dc=com 51 | uniqueMember: cn=deploy,ou=users,dc=example,dc=com 52 | uniqueMember: cn=build,ou=users,dc=example,dc=com 53 | cn: user 54 | 55 | dn: cn=build,ou=roles,dc=example,dc=com 56 | objectClass: groupOfUniqueNames 57 | uniqueMember: cn=admin,ou=users,dc=example,dc=com 58 | uniqueMember: cn=build,ou=users,dc=example,dc=com 59 | cn: build 60 | 61 | dn: cn=deploy,ou=roles,dc=example,dc=com 62 | objectClass: groupOfUniqueNames 63 | uniqueMember: cn=admin,ou=users,dc=example,dc=com 64 | uniqueMember: cn=deploy,ou=users,dc=example,dc=com 65 | cn: deploy 66 | 67 | -------------------------------------------------------------------------------- /centos-yum-ldap/jaas-loginmodule.conf: -------------------------------------------------------------------------------- 1 | RDpropertyfilelogin_X { 2 | org.mortbay.jetty.plus.jaas.spi.PropertyFileLoginModule required 3 | debug="true" 4 | file="/etc/rundeck/realm.properties"; 5 | }; 6 | 7 | RDpropertyfilelogin { 8 | com.dtolabs.rundeck.jetty.jaas.JettyCachingLdapLoginModule required 9 | debug="true" 10 | contextFactory="com.sun.jndi.ldap.LdapCtxFactory" 11 | providerUrl="ldap://localhost:389" 12 | bindDn="cn=Manager,dc=example,dc=com" 13 | bindPassword="password" 14 | authenticationMethod="simple" 15 | forceBindingLogin="false" 16 | userBaseDn="ou=users,dc=example,dc=com" 17 | userRdnAttribute="cn" 18 | userIdAttribute="cn" 19 | userPasswordAttribute="userPassword" 20 | userObjectClass="person" 21 | roleBaseDn="ou=roles,dc=example,dc=com" 22 | roleNameAttribute="cn" 23 | roleMemberAttribute="uniqueMember" 24 | roleObjectClass="groupOfUniqueNames" 25 | cacheDurationMillis="0" 26 | reportStatistics="true"; 27 | }; 28 | -------------------------------------------------------------------------------- /centos-yum-ldap/provisioning/functions.sh: -------------------------------------------------------------------------------- 1 | wait_for_success_msg () { 2 | [[ $# = 2 ]] || { 3 | echo >&2 'usage: wait_for_success_msg success_msg logfile' 4 | return 2 5 | } 6 | 7 | success_msg=$1 8 | logfile=$2 9 | let count=0 max=18 10 | 11 | while [ $count -le $max ] 12 | do 13 | if ! grep "${success_msg}" $2 14 | then printf >&2 ".";# output message. 15 | else break; # successful message. 16 | fi 17 | let count=$count+1;# increment attempts count. 18 | [ $count -eq $max ] && { 19 | echo >&2 "FAIL: Execeeded max attemps " 20 | exit 1 21 | } 22 | sleep 10; # wait 10s before trying again. 23 | done 24 | } 25 | -------------------------------------------------------------------------------- /centos-yum-ldap/provisioning/install-openldap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Exit immediately on error or undefined variable. 4 | set -e 5 | set -u 6 | 7 | # Process command line arguments. 8 | 9 | # Software install 10 | # ---------------- 11 | 12 | yum -y install openldap 13 | yum -y install openldap-servers 14 | yum -y install openldap-clients 15 | 16 | 17 | 18 | 19 | # set root password 20 | PASS=`slappasswd -s "password"` 21 | cat /vagrant/slapd.conf | sed "s#^rootpw.*\$#rootpw ${PASS}#" > /etc/openldap/slapd.conf 22 | 23 | rm -rf /etc/openldap/slapd.d 24 | 25 | # Start up openldap 26 | # ---------------- 27 | 28 | service slapd restart 29 | 30 | # load ldif 31 | # ------------- 32 | 33 | sleep 2 34 | 35 | ldapadd -D cn=Manager,dc=example,dc=com -x -w password -f /vagrant/default.ldif 36 | 37 | ldapsearch -D cn=Manager,dc=example,dc=com -x -w password -b 'ou=users,dc=example,dc=com' '(cn=*)' *.* 38 | 39 | ldapsearch -D cn=Manager,dc=example,dc=com -x -w password -b 'ou=roles,dc=example,dc=com' '(cn=*)' *.* 40 | 41 | # Done. 42 | exit $? 43 | -------------------------------------------------------------------------------- /centos-yum-ldap/provisioning/install-rundeck.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Exit immediately on error or undefined variable. 4 | set -e 5 | set -u 6 | 7 | # Process command line arguments. 8 | 9 | if [ $# -lt 3 ] 10 | then 11 | echo >&2 "usage: $0 name IP rundeck_yum_repo" 12 | exit 1 13 | fi 14 | NAME=$1 15 | IP=$2 16 | RUNDECK_REPO_URL=$3 17 | 18 | # Software install 19 | # ---------------- 20 | 21 | # 22 | # JRE 23 | # 24 | #yum -y install java7 25 | yum -y install java-1.7.0-openjdk 26 | 27 | # Rundeck 28 | # 29 | if [ -n "$RUNDECK_REPO_URL" ] 30 | then 31 | curl -# --fail -L -o /etc/yum.repos.d/rundeck.repo "$RUNDECK_REPO_URL" || { 32 | echo "failed downloading rundeck.repo config" 33 | exit 2 34 | } 35 | else 36 | if ! rpm -q rundeck-repo 37 | then 38 | rpm -Uvh http://repo.rundeck.org/latest.rpm 39 | fi 40 | fi 41 | yum -y --skip-broken install rundeck 42 | 43 | cp /vagrant/jaas-loginmodule.conf /etc/rundeck/ 44 | 45 | # 46 | # Disable the firewall so we can easily access it from any host. 47 | service iptables stop 48 | # 49 | 50 | # Configure rundeck. 51 | # ----------------- 52 | 53 | 54 | # 55 | # Configure the server URLS. 56 | cd /etc/rundeck 57 | 58 | # Update the framework.properties with name 59 | sed -i \ 60 | -e "s/localhost/$NAME/g" \ 61 | -e "s,framework.server.url = .*,framework.server.url = http://$IP:4440,g" \ 62 | -e "s,framework.rundeck.url = .*,framework.rundeck.url = http://$IP:4440,g" \ 63 | framework.properties 64 | 65 | sed -i \ 66 | -e "s,grails.serverURL=.*,grails.serverURL=http://$IP:4440,g" \ 67 | rundeck-config.properties 68 | 69 | chown rundeck:rundeck framework.properties 70 | 71 | 72 | # Start up rundeck 73 | # ---------------- 74 | 75 | # Check if rundeck is running and start it if necessary. 76 | # Checks if startup message is contained by log file. 77 | # Fails and exits non-zero if reaches max tries. 78 | 79 | set +e; # shouldn't have to turn off errexit. 80 | 81 | source /vagrant/provisioning/functions.sh 82 | success_msg="Connector@" 83 | if ! service rundeckd status 84 | then 85 | echo "Starting rundeck..." 86 | ( 87 | exec 0>&- # close stdin 88 | service rundeckd start 89 | ) &> /var/log/rundeck/service.log # redirect stdout/err to a log. 90 | 91 | wait_for_success_msg "$success_msg" /var/log/rundeck/service.log 92 | 93 | fi 94 | 95 | echo "Rundeck started." 96 | 97 | 98 | # Done. 99 | exit $? 100 | -------------------------------------------------------------------------------- /centos-yum-ldap/slapd.conf: -------------------------------------------------------------------------------- 1 | # 2 | # See slapd.conf(5) for details on configuration options. 3 | # This file should NOT be world readable. 4 | # 5 | include /etc/openldap/schema/core.schema 6 | include /etc/openldap/schema/cosine.schema 7 | include /etc/openldap/schema/inetorgperson.schema 8 | include /etc/openldap/schema/nis.schema 9 | 10 | # Allow LDAPv2 client connections. This is NOT the default. 11 | allow bind_v2 12 | 13 | # Do not enable referrals until AFTER you have a working directory 14 | # service AND an understanding of referrals. 15 | #referral ldap://root.openldap.org 16 | 17 | pidfile /var/run/openldap/slapd.pid 18 | argsfile /var/run/openldap/slapd.args 19 | 20 | # Load dynamic backend modules: 21 | # modulepath /usr/lib/openldap 22 | 23 | # Modules available in openldap-servers-overlays RPM package 24 | # Module syncprov.la is now statically linked with slapd and there 25 | # is no need to load it here 26 | # moduleload accesslog.la 27 | # moduleload auditlog.la 28 | # moduleload denyop.la 29 | # moduleload dyngroup.la 30 | # moduleload dynlist.la 31 | # moduleload lastmod.la 32 | # moduleload pcache.la 33 | # moduleload ppolicy.la 34 | # moduleload refint.la 35 | # moduleload retcode.la 36 | # moduleload rwm.la 37 | # moduleload smbk5pwd.la 38 | # moduleload translucent.la 39 | # moduleload unique.la 40 | # moduleload valsort.la 41 | 42 | # modules available in openldap-servers-sql RPM package: 43 | # moduleload back_sql.la 44 | 45 | # The next three lines allow use of TLS for encrypting connections using a 46 | # dummy test certificate which you can generate by changing to 47 | # /etc/pki/tls/certs, running "make slapd.pem", and fixing permissions on 48 | # slapd.pem so that the ldap user or group can read it. Your client software 49 | # may balk at self-signed certificates, however. 50 | # TLSCACertificateFile /etc/pki/tls/certs/ca-bundle.crt 51 | # TLSCertificateFile /etc/pki/tls/certs/slapd.pem 52 | # TLSCertificateKeyFile /etc/pki/tls/certs/slapd.pem 53 | 54 | # Sample security restrictions 55 | # Require integrity protection (prevent hijacking) 56 | # Require 112-bit (3DES or better) encryption for updates 57 | # Require 63-bit encryption for simple bind 58 | # security ssf=1 update_ssf=112 simple_bind=64 59 | 60 | # Sample access control policy: 61 | # Root DSE: allow anyone to read it 62 | # Subschema (sub)entry DSE: allow anyone to read it 63 | # Other DSEs: 64 | # Allow self write access 65 | # Allow authenticated users read access 66 | # Allow anonymous users to authenticate 67 | # Directives needed to implement policy: 68 | # access to dn.base="" by * read 69 | # access to dn.base="cn=Subschema" by * read 70 | # access to * 71 | # by self write 72 | # by users read 73 | # by anonymous auth 74 | # 75 | # if no access controls are present, the default policy 76 | # allows anyone and everyone to read anything but restricts 77 | # updates to rootdn. (e.g., "access to * by * read") 78 | # 79 | # rootdn can always read and write EVERYTHING! 80 | 81 | ####################################################################### 82 | # ldbm and/or bdb database definitions 83 | ####################################################################### 84 | 85 | database bdb 86 | suffix "dc=example,dc=com" 87 | rootdn "cn=Manager,dc=example,dc=com" 88 | # Cleartext passwords, especially for the rootdn, should 89 | # be avoided. See slappasswd(8) and slapd.conf(5) for details. 90 | # Use of strong authentication encouraged. 91 | # rootpw secret 92 | # rootpw {crypt}ijFYNcSNctBYg 93 | #rootpw {SSHA}32TMg1vzIdh10v1xGKS7+7L8oLv/tpGC 94 | #monkey 95 | rootpw password 96 | 97 | # The database directory MUST exist prior to running slapd AND 98 | # should only be accessible by the slapd and slap tools. 99 | # Mode 700 recommended. 100 | directory /var/lib/ldap 101 | 102 | # Indices to maintain for this database 103 | index objectClass eq,pres 104 | index ou,cn,mail,surname,givenname eq,pres,sub 105 | index uidNumber,gidNumber,loginShell eq,pres 106 | index uid,memberUid eq,pres,sub 107 | index nisMapName,nisMapEntry eq,pres,sub 108 | 109 | # Replicas of this database 110 | #replogfile /var/lib/ldap/openldap-master-replog 111 | #replica host=ldap-1.example.com:389 starttls=critical 112 | # bindmethod=sasl saslmech=GSSAPI 113 | # authcId=host/ldap-master.example.com@EXAMPLE.COM 114 | 115 | loglevel any 116 | logfile /var/log/slapd.log 117 | 118 | 119 | 120 | -------------------------------------------------------------------------------- /centos-yum/Vagrantfile: -------------------------------------------------------------------------------- 1 | 2 | Vagrant.configure("2") do |config| 3 | 4 | config.vm.box = "CentOS-6.3-x86_64-minimal" 5 | 6 | config.vm.network :forwarded_port, guest: 4440, host: 14440 7 | 8 | config.vm.provision :shell, :path => "bootstrap.sh" 9 | config.vm.provision :shell, :path => "add-project.sh", :args => "examples" 10 | 11 | end 12 | -------------------------------------------------------------------------------- /centos-yum/add-project.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -u 5 | 6 | 7 | if [ $# -ne 1 ] 8 | then 9 | echo >&2 "usage: add-project project" 10 | exit 1 11 | fi 12 | PROJECT=$1 13 | 14 | 15 | echo Creating project $PROJECT... 16 | # Create an example project 17 | rd-project -a create -p $PROJECT 18 | 19 | # Run simple commands to double check. 20 | dispatch -p $PROJECT 21 | # Run an adhoc command. 22 | dispatch -p $PROJECT -f -- whoami 23 | 24 | exit $? -------------------------------------------------------------------------------- /centos-yum/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | #set -e 4 | #set -u 5 | 6 | 7 | # Software install 8 | # ---------------- 9 | # 10 | # Utilities 11 | # 12 | 13 | # 14 | # JRE 15 | # 16 | yum -y install java-1.6.0 17 | # 18 | # Rundeck 19 | # 20 | if ! rpm -q rundeck-repo 21 | then 22 | rpm -Uvh http://repo.rundeck.org/latest.rpm 23 | fi 24 | yum -y install rundeck 25 | 26 | # Reset the home directory permission as it comes group writeable. 27 | # This is needed for ssh requirements. 28 | chmod 755 ~rundeck 29 | 30 | # Configure the system 31 | # 32 | 33 | # 34 | # Disable the firewall so we can easily access it from the host 35 | service iptables stop 36 | # 37 | 38 | 39 | # Start up rundeck 40 | # ---------------- 41 | # 42 | mkdir -p /var/log/vagrant 43 | if ! /etc/init.d/rundeckd status 44 | then 45 | echo "Starting rundeck..." 46 | ( 47 | exec 0>&- # close stdin 48 | /etc/init.d/rundeckd start 49 | ) &> /var/log/rundeck/service.log # redirect stdout/err to a log. 50 | 51 | let count=0 52 | let max=18 53 | while [ $count -le $max ] 54 | do 55 | if ! grep "Started SelectChannelConnector@" /var/log/rundeck/service.log 56 | then printf >&2 ".";# progress output. 57 | else break; # successful message. 58 | fi 59 | let count=$count+1;# increment attempts 60 | [ $count -eq $max ] && { 61 | echo >&2 "FAIL: Execeeded max attemps " 62 | exit 1 63 | } 64 | sleep 10 65 | done 66 | fi 67 | 68 | echo "Rundeck started." 69 | 70 | exit $? -------------------------------------------------------------------------------- /jenkins/Vagrantfile: -------------------------------------------------------------------------------- 1 | 2 | Vagrant.configure("2") do |config| 3 | config.vm.box = "CentOS-6.3-x86_64-minimal" 4 | config.vm.box_url = "https://dl.dropbox.com/u/7225008/Vagrant/CentOS-6.3-x86_64-minimal.box" 5 | 6 | IP="192.168.50.10" 7 | RUNDECK_IP="192.168.50.4" 8 | 9 | config.vm.define :jenkins do |jenkins| 10 | jenkins.vm.hostname = "jenkins" 11 | jenkins.vm.network :private_network, ip: "#{IP}" 12 | jenkins.vm.provision :shell, :path => "provisioning/install-jenkins.sh", :args => "#{RUNDECK_IP}" 13 | end 14 | 15 | end 16 | 17 | -------------------------------------------------------------------------------- /jenkins/provisioning/functions.sh: -------------------------------------------------------------------------------- 1 | wait_for_success_msg () { 2 | [[ $# = 2 ]] || { 3 | echo >&2 'usage: wait_for_success_msg success_msg logfile' 4 | return 2 5 | } 6 | 7 | success_msg=$1 8 | logfile=$2 9 | let count=0 max=18 10 | 11 | while [ $count -le $max ] 12 | do 13 | if ! grep "${success_msg}" $2 14 | then printf >&2 ".";# output message. 15 | else break; # successful message. 16 | fi 17 | let count=$count+1;# increment attempts count. 18 | [ $count -eq $max ] && { 19 | echo >&2 "FAIL: Execeeded max attemps " 20 | exit 1 21 | } 22 | sleep 10; # wait 10s before trying again. 23 | done 24 | } 25 | -------------------------------------------------------------------------------- /jenkins/provisioning/install-jenkins.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Exit immediately on error or undefined variable. 4 | set -e 5 | set -u 6 | 7 | RUNDECK_IP=$1 8 | 9 | # Process command line arguments. 10 | # ---------------- 11 | 12 | # Software install 13 | # ---------------- 14 | 15 | # 16 | # JRE 17 | # 18 | #yum -y install java7 19 | yum -y install java-1.7.0-openjdk 20 | # 21 | # Jenkins 22 | # 23 | 24 | curl -# --fail -L -o /etc/yum.repos.d/jenkins.repo http://pkg.jenkins-ci.org/redhat/jenkins.repo || { 25 | echo "failed downloading jenkins.repo config" 26 | exit 3 27 | } 28 | rpm --import http://pkg.jenkins-ci.org/redhat/jenkins-ci.org.key 29 | 30 | yum -y install jenkins 31 | 32 | mkdir -p /var/lib/jenkins/examples 33 | cp /vagrant/provisioning/simple-1.0.0.war /var/lib/jenkins/examples 34 | chown -R jenkins:jenkins /var/lib/jenkins/examples 35 | echo "Sample war file: $(ls /var/lib/jenkins/examples)" 36 | 37 | # Configure jenkins. 38 | # ----------------- 39 | 40 | 41 | # Start up jenkins 42 | # ---------------- 43 | 44 | source /vagrant/provisioning/functions.sh 45 | success_msg="Jenkins is fully up and running" 46 | if ! service jenkins status 47 | then 48 | service jenkins start 49 | wait_for_success_msg "$success_msg" /var/log/jenkins/jenkins.log 50 | fi 51 | 52 | echo "Jenkins started." 53 | service iptables stop 54 | 55 | 56 | # Install the rundeck plugin using the jenkins CLI. 57 | curl -s --fail -o jenkins-cli.jar http://localhost:8080/jnlpJars/jenkins-cli.jar 58 | 59 | if test -f /vagrant/rundeck.hpi ; then 60 | java -jar jenkins-cli.jar -s http://localhost:8080 install-plugin \ 61 | /vagrant/rundeck.hpi 62 | else 63 | java -jar jenkins-cli.jar -s http://localhost:8080 install-plugin \ 64 | http://updates.jenkins-ci.org/download/plugins/rundeck/2.11/rundeck.hpi 65 | fi 66 | 67 | # Configure the plugin. 68 | sed "s/localhost/$RUNDECK_IP/g" /vagrant/provisioning/jenkins/org.jenkinsci.plugins.rundeck.RundeckNotifier.xml > /var/lib/jenkins/org.jenkinsci.plugins.rundeck.RundeckNotifier.xml 69 | chown jenkins:jenkins /var/lib/jenkins/org.jenkinsci.plugins.rundeck.RundeckNotifier.xml 70 | 71 | # Load job definiton. 72 | java -jar jenkins-cli.jar -s http://localhost:8080 create-job simple \ 73 | < /vagrant/provisioning/jenkins/simple.xml 74 | 75 | # Restart it to finilize the install. 76 | java -jar jenkins-cli.jar -s http://localhost:8080 safe-restart 77 | 78 | 79 | 80 | # Done. 81 | exit $? 82 | -------------------------------------------------------------------------------- /jenkins/provisioning/jenkins/org.jenkinsci.plugins.rundeck.RundeckNotifier.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | http://localhost:4440 5 | admin 6 | admin 7 | 8 | -------------------------------------------------------------------------------- /jenkins/provisioning/jenkins/simple.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | false 6 | 7 | 8 | true 9 | false 10 | false 11 | false 12 | 13 | false 14 | 15 | 16 | cp /var/lib/jenkins/examples/simple-1.0.0.war simple-1.0.$BUILD_NUMBER.war 17 | 18 | 19 | 20 | 21 | *.war 22 | false 23 | false 24 | 25 | 26 | simple:deploy 27 | build_number=$BUILD_NUMBER 28 | war_url=$BUILD_URL/artifact/simple-1.0.$BUILD_NUMBER.war 29 | tomcat 30 | 31 | true 32 | false 33 | 34 | 35 | 36 | -------------------------------------------------------------------------------- /jenkins/provisioning/simple-1.0.0.war: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ahonor/rundeck-vagrant/03aaa9b14eb619b82bd32925978a1e60b1a7416b/jenkins/provisioning/simple-1.0.0.war -------------------------------------------------------------------------------- /nexus-plugin/Vagrantfile: -------------------------------------------------------------------------------- 1 | 2 | Vagrant.configure("2") do |config| 3 | config.vm.box = "CentOS-6.3-x86_64-minimal" 4 | config.vm.box_url = "https://dl.dropbox.com/u/7225008/Vagrant/CentOS-6.3-x86_64-minimal.box" 5 | 6 | IP="192.168.50.20" 7 | 8 | config.vm.define :nexus do |nexus| 9 | nexus.vm.hostname = "nexus" 10 | nexus.vm.network :private_network, ip: "#{IP}" 11 | nexus.vm.provision :shell, :path => "provisioning/install-nexus.sh", :args => "#{IP}" 12 | end 13 | 14 | end 15 | 16 | -------------------------------------------------------------------------------- /nexus-plugin/provisioning/install-nexus.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | yum install -y zip unzip curl java7 4 | 5 | echo "installing nexus..." 6 | 7 | if ! id nexus 8 | then 9 | useradd -d /usr/local/nexus -M nexus; echo "created nexus user" 10 | fi 11 | 12 | curl -L http://www.sonatype.org/downloads/nexus-latest-bundle.zip -o nexus-latest-bundle.zip 13 | 14 | unzip nexus-latest-bundle.zip -d /usr/local 15 | nexus_home=/usr/local/nexus-2* 16 | 17 | 18 | mkdir -p /usr/local/sonatype-work/nexus/plugin-repository 19 | curl -L https://github.com/downloads/vbehar/nexus-rundeck-plugin/nexus-rundeck-plugin-1.2.2.2-bundle.zip -o nexus-rundeck-plugin.zip 20 | unzip nexus-rundeck-plugin.zip -d /usr/local/sonatype-work/nexus/plugin-repository 21 | 22 | ln -s $nexus_home /usr/local/nexus 23 | 24 | sed -i 's,NEXUS_HOME=.*,NEXUS_HOME=/usr/local/nexus,g' /usr/local/nexus/bin/nexus 25 | sed -i 's,#RUN_AS_USER=,RUN_AS_USER=nexus,g' /usr/local/nexus/bin/nexus 26 | sed -i 's,#PIDDIR=.*,PIDDIR=/usr/local/sonatype-work,g' /usr/local/nexus/bin/nexus 27 | 28 | chown -R nexus $nexus_home /usr/local/sonatype-work 29 | ln -s /usr/local/nexus/bin/nexus /etc/init.d/nexus 30 | chkconfig --add nexus 31 | chkconfig --levels 345 nexus on 32 | service nexus start 33 | 34 | service iptables stop 35 | 36 | 37 | tail /usr/local/nexus/logs/wrapper.log 38 | 39 | echo "Login into nexus: admin/admin123" 40 | -------------------------------------------------------------------------------- /primary-secondary-failover/README.md: -------------------------------------------------------------------------------- 1 | This is a multi-machine vagrant configuration that 2 | models two rundeck instances sharing a mysql database. 3 | 4 | ## Vagrant configuration. 5 | 6 | The vagrant configuration defines the following virtual machines: 7 | 8 | * **mysql**: Common database instance shared by both rundeck hosts. 9 | * **rundeck1**: The user-facing "primary" rundeck. 10 | * **rundeck2**: The standby rundeck instance. It runs jobs 11 | to sync from the _primary_ and check it to see if the secondary should "takeover". 12 | 13 | All machines use the same centos base box and install software via yum/rpm. 14 | 15 | 16 | ## Requirements 17 | 18 | * Internet access to download packages from public repositories. 19 | * [Vagrant 1.2.2](http://downloads.vagrantup.com) 20 | 21 | ## Startup 22 | 23 | Start up the VMs in the following order. 24 | 25 | vagrant up mysql 26 | vagrant up rundeck1 27 | vagrant up rundeck2 28 | 29 | You can access the rundecks from your host machine through vagrant's port forwarding. 30 | 31 | * rundeck1: http://192.168.50.4:4440 32 | * rundeck2: http://192.168.50.5:4440 33 | 34 | Login to either rundeck instance using user/pass: admin/admin 35 | 36 | ### Shell Logins 37 | 38 | You can login into any VM via vagrant ssh. Eg: login to the secondary rundeck: 39 | 40 | vagrant ssh rundeck2 41 | 42 | Once logged in as vagrant, you can use sudo/su to change users. 43 | Here's how to change user to the rundeck login: 44 | 45 | sudo su - rundeck 46 | 47 | You can also ssh to the rundeck VMs using user/password: rundeck/rundeck 48 | 49 | ## Operations 50 | 51 | The jobs used for synchronization and takeover are in a job group 52 | called "failover". The jobs are pre-configured by the provisioning process 53 | and have default option values appropriate for this environment. 54 | 55 | * failover/Check-Or-Takeover: Runs the check job and if it fails, the "takeover" job executes. 56 | * failover/Sync: Synchronize the job state data from the primary. 57 | * failover/check: Test if the primary responds to an API call. 58 | * failover/takeover: Turn off the schedule for the failover jobs and update secondary's tags. 59 | 60 | Go to the secondary rundeck and navigate to the "failover/check" job and run it. 61 | You should see system info about the primary displayed in the job output. 62 | 63 | 05:35:59 # System Stats for Rundeck 2.0.0 on node rundeck1 64 | 05:35:59 - up since: 2014-01-29T15:12:06Z 65 | 05:35:59 - cpu avg: 0.0 66 | 05:35:59 - mem free: 124876264 67 | 68 | Next, try the "failover/Sync" job. This job uses rsync to copy job output logs from the primary 69 | so they can be viewable on the secondary. 70 | 71 | ## Takeover Scripts 72 | 73 | The failover/takeover jobs is responsible for executing any procedure 74 | needed to transition the secondary server to become the primary server. 75 | 76 | The takeover job defines three steps that each call a separate script: 77 | 78 | * update-jobs.sh: Removes the cron schedule for the Check-Or-Takeover and Sync jobs. 79 | * update-resources.sh: Updates the resource data to show the secondary is now tagged primary. 80 | * do-switch.sh: This is a place holder script which might update load balancer or EIPs. 81 | 82 | -------------------------------------------------------------------------------- /primary-secondary-failover/Vagrantfile: -------------------------------------------------------------------------------- 1 | 2 | Vagrant.configure("2") do |config| 3 | config.vm.box = "CentOS-6.3-x86_64-minimal" 4 | config.vm.box_url = "https://dl.dropbox.com/u/7225008/Vagrant/CentOS-6.3-x86_64-minimal.box" 5 | 6 | PROJECT="examples" 7 | RUNDECK_YUM_REPO="https://bintray.com/rundeck/rundeck-rpm/rpm" 8 | 9 | RERUN_YUM_REPO="https://bintray.com/rerun/rerun-rpm/rpm" 10 | PRIMARY="rundeck1" 11 | SECONDARY="rundeck2" 12 | WEBDAV_URL="http://192.168.50.7/dav" 13 | 14 | config.vm.define :mysql do |mysql| 15 | mysql.vm.hostname = "centos-mysql.local" 16 | mysql.vm.network :private_network, ip: "192.168.50.6" 17 | mysql.vm.provision :shell, :path => "install-mysql.sh", :args => "'mysql'" 18 | end 19 | 20 | 21 | config.vm.define :webdav do |webdav| 22 | webdav.vm.hostname = "centos-webdav.local" 23 | webdav.vm.network :private_network, ip: "192.168.50.7" 24 | webdav.vm.provision :shell, :path => "install-httpd.sh", :args => "'webdav'" 25 | end 26 | 27 | config.vm.define :rundeck1 do |primary| 28 | primary.vm.hostname = "#{PRIMARY}" 29 | primary.vm.network :private_network, ip: "192.168.50.14" 30 | primary.vm.provision :shell, :path => "install-rundeck.sh", :args => "#{PRIMARY} 192.168.50.6 #{RUNDECK_YUM_REPO} #{RERUN_YUM_REPO} #{WEBDAV_URL}" 31 | primary.vm.provision :shell, :path => "add-project.sh", :args => "#{PROJECT} #{PRIMARY} 192.168.50.14 rundeck,primary #{WEBDAV_URL}" 32 | primary.vm.provision :shell, :path => "generate-apikey.sh" 33 | end 34 | 35 | config.vm.define :rundeck2 do |secondary| 36 | secondary.vm.hostname = "#{SECONDARY}" 37 | secondary.vm.network :private_network, ip: "192.168.50.15" 38 | secondary.vm.provision :shell, :path => "install-rundeck.sh", :args => "#{SECONDARY} 192.168.50.6 #{RUNDECK_YUM_REPO} #{RERUN_YUM_REPO} #{WEBDAV_URL}" 39 | secondary.vm.provision :shell, :path => "add-project.sh", :args => "#{PROJECT} #{SECONDARY} 192.168.50.15 rundeck,secondary #{WEBDAV_URL}" 40 | secondary.vm.provision :shell, :path => "add-primary.sh", :args => "#{PROJECT} #{PRIMARY} 192.168.50.14" 41 | secondary.vm.provision :shell, :path => "load-jobs.sh", :args => "#{PROJECT} 192.168.50.14:4440" 42 | end 43 | 44 | end 45 | 46 | -------------------------------------------------------------------------------- /primary-secondary-failover/add-primary.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -u 5 | 6 | 7 | if [ $# -lt 2 ] 8 | then 9 | echo >&2 "usage: add-primary project name hostname" 10 | exit 1 11 | fi 12 | PROJECT=${1} 13 | NAME=${2} 14 | SSH_HOST_IP=${3} 15 | SSH_HOST_USER=rundeck 16 | SSH_HOST_PASSWD=rundeck 17 | 18 | # Lookup the SSH key for this user. 19 | SSH_KEY_PATH_PUB="$(eval echo ~${SSH_HOST_USER}/.ssh/id_rsa.pub)" 20 | 21 | if ! eval test -f ${SSH_KEY_PATH_PUB} 22 | then 23 | echo >&2 "${SSH_HOST_USER} host key not found: $SSH_KEY_PATH_PUB" 24 | exit 1 25 | fi 26 | 27 | SSH_OPTIONS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" 28 | 29 | # Copy this hosts ssh key to the primary. 30 | if ! timeout 60s su - ${SSH_HOST_USER} -c "ssh ${SSH_OPTIONS} ${SSH_HOST_IP} /bin/true" 2>&1 >/dev/null 31 | then 32 | echo "Copying secondary's ssh key to ${SSH_HOST_USER}@${SSH_HOST_IP}" 33 | yum -y install expect 34 | 35 | expect /vagrant/ssh-copy-id.expect ${SSH_HOST_IP} ${SSH_HOST_USER} ${SSH_HOST_PASSWD} ${SSH_KEY_PATH_PUB} 36 | 37 | # Test the key-based ssh access to the primary. 38 | echo "Testing ssh access..." 39 | if ! timeout 60s su - ${SSH_HOST_USER} -c "ssh ${SSH_OPTIONS} ${SSH_HOST_USER}@${SSH_HOST_IP} uptime" 40 | then echo >&2 "Could not ssh a command after key was copied." ; exit 1; 41 | fi 42 | fi 43 | 44 | 45 | # Get the UUID from the primary. 46 | # Copy the primary's framework.properties file over and parse it. 47 | if ! timeout 60s su - ${SSH_HOST_USER} -c "scp ${SSH_OPTIONS} ${SSH_HOST_USER}@${SSH_HOST_IP}:/etc/rundeck/framework.properties /tmp/framework.properties.$$" 48 | then 49 | echo >&2 "Could not get framework.properties from ${SSH_HOST_USER}@${SSH_HOST_IP}" 50 | exit 1 51 | fi 52 | SERVER_UUID=$(awk -F= '/rundeck.server.uuid/ {print $2}' /tmp/framework.properties.$$) 53 | 54 | 55 | # Generate the resource metadata for the primary. 56 | RESOURCES=/var/rundeck/projects/${PROJECT}/etc/resources.xml 57 | 58 | if ! xmlstarlet sel -t -m "/project/node[@name='${NAME}']" -v @name $RESOURCES 59 | then 60 | echo "Generating resource info for primary ${SSH_HOST_IP}." 61 | xmlstarlet ed -P -S -L -s /project -t elem -n NodeTMP -v "" \ 62 | -i //NodeTMP -t attr -n "name" -v "${NAME}" \ 63 | -i //NodeTMP -t attr -n "description" -v "Rundeck server node." \ 64 | -i //NodeTMP -t attr -n "tags" -v "rundeck,primary" \ 65 | -i //NodeTMP -t attr -n "hostname" -v "${SSH_HOST_IP}" \ 66 | -i //NodeTMP -t attr -n "username" -v "${SSH_HOST_USER}" \ 67 | -i //NodeTMP -t attr -n "ssh-keypath" -v "/var/lib/rundeck/.ssh/id_rsa" \ 68 | -i //NodeTMP -t attr -n "server-uuid" -v "${SERVER_UUID}" \ 69 | -r //NodeTMP -v node \ 70 | $RESOURCES 71 | else 72 | echo "Node $NAME already defined in resources.xml" 73 | fi 74 | chown -R rundeck:rundeck $RESOURCES 75 | 76 | # Test the primary can be listed by name or tags. 77 | result=$(su - rundeck -c "dispatch -p ${PROJECT} -I name=${NAME}") 78 | if [ "$result" != ${NAME} ] 79 | then 80 | echo >&2 "primary node could not be found by name." 81 | exit 1 82 | fi 83 | 84 | result=$(su - rundeck -c "dispatch -p ${PROJECT} -I tags=rundeck+primary") 85 | if [ "$result" != ${NAME} ] 86 | then 87 | echo >&2 "primary node could not be found by tags." 88 | exit 1 89 | fi 90 | 91 | exit $? -------------------------------------------------------------------------------- /primary-secondary-failover/add-project.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | 6 | if [[ $# -ne 5 ]] 7 | then 8 | echo >&2 "usage: add-project project nodename nodeip tags webdav-url" 9 | exit 1 10 | fi 11 | PROJECT=$1 12 | NODENAME=$2 13 | NODEIP=$3 14 | TAGS=$4 15 | WEBDAV_URL=$5 16 | 17 | echo "Create project $PROJECT..." 18 | # Create an example project as the rundeck user 19 | su - rundeck -c "rd-project -a create -p $PROJECT" 20 | 21 | # Configure the webdav-logstore plugin. 22 | cat >>/var/rundeck/projects/$PROJECT/etc/project.properties< /dev/null 30 | # Fire off a command. 31 | su - rundeck -c "dispatch -p $PROJECT -f -- whoami" 32 | 33 | 34 | echo "Project created." 35 | 36 | 37 | keypath=$(awk -F= '/framework.ssh.keypath/ {print $2}' /etc/rundeck/framework.properties) 38 | echo "ssh-keypath: $keypath" 39 | uuid=$(awk -F= '/rundeck.server.uuid/ {print $2}' /etc/rundeck/framework.properties) 40 | echo "server-uuid: $uuid" 41 | 42 | # Update the resource metadata for this host. 43 | DIR=/var/rundeck/projects/$PROJECT/etc 44 | 45 | echo "Update resource metadata for this host. (dir=$DIR)" 46 | xmlstarlet ed -u "/project/node/@tags" -v "$TAGS" $DIR/resources.xml | 47 | xmlstarlet ed -u "/project/node/@name" -v "$NODENAME" | 48 | xmlstarlet ed -u "/project/node/@hostname" -v "$NODEIP" | 49 | xmlstarlet ed -i "/project/node" -t attr -n server-uuid -v ${uuid} | 50 | xmlstarlet ed -i "/project/node" -t attr -n ssh-keypath -v ${keypath} > resources.xml.new 51 | mv resources.xml.new $DIR/resources.xml 52 | 53 | # Set the ownerships to rundeck. 54 | chown -R rundeck:rundeck /var/rundeck/projects/$PROJECT 55 | 56 | 57 | # run the node listing. 58 | echo "List the nodes tagged rundeck:" 59 | su - rundeck -c "dispatch -p $PROJECT -v -I tags=rundeck" 60 | 61 | 62 | exit $? 63 | -------------------------------------------------------------------------------- /primary-secondary-failover/failover/check.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -u 5 | 6 | # Call systeminfo API. maxtry=3 timeout(curl /api/systeminfo) 7 | 8 | if [ $# -lt 3 ] 9 | then 10 | echo >&2 "usage: $(basename $0) url apikey timeout" 11 | exit 1 12 | fi 13 | 14 | RDURL=$1 15 | API_KEY=$2 16 | TIMEOUT=$3 17 | 18 | : ${TIMEOUT:=60} 19 | 20 | APIURL="${RDURL}/api/1/system/info" 21 | AUTHHEADER="X-RunDeck-Auth-Token: $API_KEY" 22 | CURLOPTS="-s -S -L" 23 | CURL="curl $CURLOPTS" 24 | 25 | 26 | trap "{rerun_die 1 'Request exceeded timeout: $TIMEOUT'}" ALRM 27 | 28 | # Submit the request. 29 | timeout --signal=ALRM $TIMEOUT $CURL -H "$AUTHHEADER" -o systeminfo.xml $APIURL 30 | 31 | # validate the response is XML 32 | xmlstarlet val -q systeminfo.xml 33 | 34 | # Use xml starlet to parse the result. 35 | XML_SEL='xmlstarlet sel -t -m' 36 | 37 | # look for error authentication problems. 38 | status=$($XML_SEL "/result" -v @success systeminfo.xml) 39 | if [ "$status" != "true" ] 40 | then 41 | # print the reason. 42 | printf "Connection response error. Message: " 43 | $XML_SEL "/result" -v error systeminfo.xml >&2 44 | exit $? 45 | fi 46 | 47 | 48 | # Print out some useful info. 49 | $XML_SEL "/result/success" -o "# " -v message systeminfo.xml 50 | $XML_SEL "/result/system/stats/uptime/since" -o "- up since: " -v datetime systeminfo.xml 51 | $XML_SEL "/result/system/stats/cpu" -o "- cpu avg: " -v loadAverage systeminfo.xml 52 | $XML_SEL "/result/system/stats/memory" -o "- mem free: " -v free systeminfo.xml 53 | 54 | 55 | exit $? 56 | # 57 | # Done. 58 | -------------------------------------------------------------------------------- /primary-secondary-failover/failover/do-switch.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | 6 | 7 | echo "Updating VIP..." 8 | 9 | exit $? 10 | # 11 | # Done. -------------------------------------------------------------------------------- /primary-secondary-failover/failover/syncme.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -u 5 | 6 | # Process the command arguments. 7 | if [ $# -lt 3 ] 8 | then 9 | echo >&2 "usage: $(basename $0) url apikey project" 10 | exit 1 11 | fi 12 | RDURL=$1 13 | API_KEY=$2 14 | PROJECT=$3 15 | 16 | # List of directories to backup 17 | # Exclude the resources.xml for the project. 18 | #DIRS=(/var/rundeck/projects /var/lib/rundeck/logs) 19 | 20 | DIRS=() 21 | 22 | 23 | if [ ${#DIRS[*]} -lt 1 ] 24 | then 25 | echo "No directories configured. Nothing to do." 26 | exit 0; 27 | fi 28 | 29 | APIURL="${RDURL}/api/3/resources" 30 | AUTHHEADER="X-RunDeck-Auth-Token: $API_KEY" 31 | CURLOPTS="-s -S -L" 32 | CURL="curl $CURLOPTS" 33 | tags="rundeck+primary"; # encode the '+' char. 34 | params="project=$PROJECT&tags=${tags/+/%2B}" 35 | 36 | # Call the API 37 | $CURL -H "$AUTHHEADER" -o resources.xml $APIURL?${params} 38 | xmlstarlet val -q resources.xml 39 | 40 | count=$(xmlstarlet sel -T -t -v "count(/project/node)" resources.xml) 41 | if [ "$count" -ne 1 ] 42 | then 43 | echo >&2 "Could not locate primary. Count from query result: $count" 44 | exit 1; 45 | fi 46 | 47 | # Lookup primary's hostname and SSH connection details. 48 | SSH_HOST=$(xmlstarlet sel -t -m /project/node -v @hostname resources.xml) 49 | SSH_USR=$(xmlstarlet sel -t -m /project/node -v @username resources.xml) 50 | SSH_KEY=$(xmlstarlet sel -t -m /project/node -v @ssh-keypath resources.xml) 51 | 52 | echo "rsync'ing from primary: $SSH_USR@$SSH_HOST" 53 | 54 | # Create backup directories. 55 | BACKUP=/tmp/backup 56 | 57 | # 58 | SSH_OPTIONS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null" 59 | for dir in ${DIRS[*]:-} 60 | do 61 | [ ! -d ${BACKUP}/${dir} ] && mkdir -p ${BACKUP}/${dir} 62 | rsync -acz \ 63 | --rsh="ssh -i ${SSH_KEY} ${SSH_OPTIONS}" \ 64 | $SSH_USR@$SSH_HOST:$dir $(dirname ${BACKUP}/${dir}) 65 | done 66 | 67 | 68 | pushd $BACKUP >/dev/null 69 | tar czf /tmp/backup.tzg . 70 | popd >/dev/null 71 | 72 | # Copy the primary's data into this instance. 73 | # Use rsync to be efficient about copying changes. 74 | 75 | # - Projects 76 | #rsync -acz \ 77 | # --exclude $BACKUP/var/rundeck/projects/*/resources.xml \ 78 | # $BACKUP/var/rundeck/projects/* /var/rundeck/projects 79 | 80 | # - Execution log output. 81 | if [ "$(ls -A $BACKUP/var/lib/rundeck/logs/)" ] 82 | then 83 | rsync -acz $BACKUP/var/lib/rundeck/logs/* /var/lib/rundeck/logs 84 | fi 85 | echo Done. 86 | 87 | exit $? 88 | # 89 | # Done. -------------------------------------------------------------------------------- /primary-secondary-failover/failover/takeover-notification.cgi: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This CGI will be called like so: 4 | # http://server/$0?id=${execution.id}&status=${execution.status}&trigger=${notification.trigger} 5 | 6 | # fixme, should really read up to CONTENT_LENGTH 7 | POST_DATA=$( /var/www/html/rundeck/takeovers/$dtstamp.html< 21 | 22 |

A Takeover occured at $dtstamp

23 |
    24 |
  • execution_id: ${id:-}
  • 25 |
  • status: ${status:-}
  • 26 |
  • trigger: ${trigger:-}
  • 27 |
28 |

Post data

29 |
30 | $POST_DATA
31 | 
32 | 33 | 34 | EOF 35 | 36 | echo Content-type: application/html 37 | echo "" 38 | cat /var/www/html/rundeck/takeovers/$dtstamp.html -------------------------------------------------------------------------------- /primary-secondary-failover/failover/takeover.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -u 5 | 6 | 7 | echo taking over. 8 | 9 | # Process the command arguments. 10 | if [ $# -lt 2 ] 11 | then 12 | echo >&2 "usage: $(basename $0) url apikey" 13 | exit 1 14 | fi 15 | RDURL=$1 16 | API_KEY=$2 17 | 18 | project=$RD_JOB_PROJECT 19 | 20 | APIURL="${RDURL}/api/1/jobs/export" 21 | AUTHHEADER="X-RunDeck-Auth-Token: $API_KEY" 22 | CURLOPTS="-s -S -L" 23 | CURL="curl $CURLOPTS" 24 | params="project=$project&groupPath=failover" 25 | 26 | # Call the API 27 | $CURL -H "$AUTHHEADER" -o job.xml $APIURL?${params} 28 | xmlstarlet val -q job.xml 29 | 30 | # Turn cron schedule off for the Sync-Or-Takeover job. 31 | # ---------------------- 32 | echo "removing schedule for Sync-Or-Takeover" 33 | # 34 | xmlstarlet ed -d //job/schedule jobs.xml > jobs.xml.new 35 | 36 | 37 | # 38 | APIURL="${RDURL}/api/1/jobs/import" 39 | params="dupeOption=update" 40 | $CURL -H "$AUTHHEADER" -o result.xml -F xmlBatch=@jobs.xml.new $APIURL?${params} 41 | success=$(xmlstarlet sel -T -t -v "/result/@success" result.xml) 42 | if [ "true" == "$success" ] ; then 43 | echo >&2 "FAIL: Server reported an error: " 44 | xmlstarlet sel -T -t -m "/result/error/message" -v "." -n result.xml 45 | exit 2 46 | fi 47 | 48 | # Update the tag for this host. 49 | # ----------------------------- 50 | echo "tagging this host as primary" 51 | APIURL="${RDURL}/api/1/resource" 52 | params="project=$project" 53 | $CURL -H "$AUTHHEADER" -o resource.xml $APIURL/$RD_NODE_NAME?$params 54 | success=$(xmlstarlet sel -T -t -v "/result/@success" result.xml) 55 | if [ "true" == "$success" ] ; then 56 | echo >&2 "FAIL: Server reported an error: " 57 | xmlstarlet sel -T -t -m "/result/error/message" -v "." -n result.xml 58 | exit 2 59 | fi 60 | 61 | # Get existing tags for this server. 62 | tags=$(xmlstarlet sel -t -m "/project/node" -v @tags resource.xml) 63 | ntags=$(echo $tags | sed 's/secondary/primary/g') 64 | 65 | xmlstarlet ed -u "/project/node/@tags" -v "$ntags" resource.xml > resource.xml.new 66 | 67 | 68 | # ----------------------- 69 | 70 | 71 | # Update the monitor tool. 72 | # ------------------------ 73 | 74 | 75 | # Update the load balancer. 76 | # ------------------------ 77 | 78 | exit $? 79 | # 80 | # Done. -------------------------------------------------------------------------------- /primary-secondary-failover/failover/update-jobs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Turn cron schedule off for the sync and check jobs 4 | # ---------------------- 5 | 6 | set -e 7 | set -u 8 | 9 | 10 | 11 | # Process the command arguments. 12 | if [ $# -lt 3 ] 13 | then 14 | echo >&2 "usage: update-jobs.sh url apikey project" 15 | exit 1 16 | fi 17 | 18 | 19 | RDURL=$1 20 | API_KEY=$2 21 | PROJECT=$3 22 | 23 | 24 | AUTHHEADER="X-RunDeck-Auth-Token: $API_KEY" 25 | CURLOPTS="-s -S -L" 26 | CURL="curl $CURLOPTS" 27 | CURL_OUT=$(mktemp "/tmp/curl.out.XXXXX") 28 | 29 | 30 | # Remove the schedule for the failover jobs 31 | # ----------------------------------------- 32 | 33 | # Dump the job definitions for the failover job group. 34 | 35 | APIURL="${RDURL}/api/1/jobs/export" 36 | params="project=$PROJECT&groupPath=failover" 37 | 38 | $CURL -H "$AUTHHEADER" -o jobs.xml $APIURL?${params} 39 | xmlstarlet val -q jobs.xml 40 | 41 | 42 | # Remove the schedule elements. 43 | 44 | xmlstarlet ed -L -d //job/schedule jobs.xml 45 | 46 | 47 | # Reload the updated job definitions. 48 | 49 | APIURL="${RDURL}/api/1/jobs/import" 50 | params="dupeOption=update" 51 | $CURL -H "$AUTHHEADER" -o $CURL_OUT -F xmlBatch=@jobs.xml $APIURL?${params} 52 | 53 | if ! xmlstarlet sel -T -t -v "/result/@success" $CURL_OUT >/dev/null 54 | then 55 | printf >&2 "FAIL: API error: $APIURL" 56 | xmlstarlet sel -t -m "/result/error/message" -v "." $CURL_OUT 57 | exit 2 58 | fi 59 | 60 | echo "Removed schedule for failover jobs." 61 | 62 | 63 | # Take over scheduled jobs. 64 | 65 | 66 | # Lookup the primary's server UUID. 67 | 68 | APIURL="${RDURL}/api/3/resources" 69 | tags="rundeck+primary"; # encode the '+' char. 70 | params="project=$PROJECT&tags=${tags/+/%2B}" 71 | 72 | # Call the resources API 73 | $CURL -H "$AUTHHEADER" -o resources.xml $APIURL?${params} 74 | xmlstarlet val -q resources.xml 75 | 76 | count=$(xmlstarlet sel -T -t -v "count(/project/node)" resources.xml) 77 | if [ "$count" -ne 1 ] 78 | then 79 | echo >&2 "Could not locate primary. Count from query result: $count" 80 | exit 1; 81 | fi 82 | 83 | # Lookup primary's server-uuid and name. 84 | SVR_UUID=$(xmlstarlet sel -t -m /project/node -v @server-uuid resources.xml) 85 | SVR_NAME=$(xmlstarlet sel -t -m /project/node -v @name resources.xml) 86 | 87 | echo "Taking over scheduled jobs from $SVR_NAME. server-uid: $SVR_UUID..." 88 | 89 | 90 | # See http://rundeck.org/docs/api/index.html#takeover-schedule-in-cluster-mode 91 | APIURL="${RDURL}/api/7/incubator/jobs/takeoverSchedule" 92 | $CURL -H "$AUTHHEADER" -H "Content-Type: application/xml" -o $CURL_OUT \ 93 | --data "" -X PUT $APIURL 94 | 95 | xmlstarlet val -q $CURL_OUT 96 | 97 | if ! xmlstarlet sel -T -t -v "/result/@success" $CURL_OUT >/dev/null 98 | then 99 | printf >&2 "FAIL: API error: $APIURL" 100 | xmlstarlet sel -t -m "/result/error/message" -v "." $CURL_OUT 101 | exit 1 102 | fi 103 | 104 | declare -i successful failed 105 | successful=$(xmlstarlet sel -t -m "/result/takeoverSchedule/jobs/successful" -v @count $CURL_OUT) 106 | failed=$(xmlstarlet sel -t -m "/result/takeoverSchedule/jobs/failed" -v @count $CURL_OUT) 107 | if [ "$failed" -ne 0 ] 108 | then 109 | rerun_die 3 "Not all jobs taken over: $failed out of $((successful+failed))" 110 | else 111 | echo "Took over schedule for $successful jobs" 112 | fi 113 | 114 | 115 | exit $? 116 | # 117 | # Done. -------------------------------------------------------------------------------- /primary-secondary-failover/failover/update-resources.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Update the tags for this host. 4 | # ----------------------------- 5 | 6 | set -e 7 | set -u 8 | 9 | 10 | # Process the command arguments. 11 | if [ $# -lt 4 ] 12 | then 13 | echo >&2 "usage: $(basename $0) url apikey project node" 14 | exit 1 15 | fi 16 | RDURL=$1 17 | API_KEY=$2 18 | PROJECT=$3 19 | NODE=$4 20 | 21 | 22 | 23 | AUTHHEADER="X-RunDeck-Auth-Token: $API_KEY" 24 | CURLOPTS="-s -S -L" 25 | CURL="curl $CURLOPTS" 26 | 27 | CURL_OUT=$(mktemp "/tmp/update-resources.sh.curl.out.XXXXX") 28 | RESOURCES=$(mktemp "/tmp/update-resources.sh.resources.xml.XXXXX") 29 | 30 | echo "tagging this host as primary." 31 | 32 | # List the resources. 33 | # ------------------- 34 | 35 | # Search for the current primary node. 36 | 37 | APIURL="${RDURL}/api/3/resources" 38 | AUTHHEADER="X-RunDeck-Auth-Token: $API_KEY" 39 | qtags="rundeck+primary"; # encode the '+' char. 40 | params="project=$PROJECT&tags=${qtags/+/%2B}" 41 | 42 | $CURL -H "$AUTHHEADER" -o $CURL_OUT $APIURL?${params} 43 | xmlstarlet val -q $CURL_OUT 44 | 45 | # List the primaries. 46 | PRIMARIES=( $(xmlstarlet sel -t -m "/project/node" -v @name $CURL_OUT) ) 47 | 48 | # List all the resources. 49 | APIURL="${RDURL}/api/3/project/${PROJECT}/resources" 50 | $CURL -H "$AUTHHEADER" -o $CURL_OUT $APIURL 51 | 52 | # Remove the primary tag from any nodes tagged as such. 53 | for nodename in ${PRIMARIES[*]} 54 | do 55 | otags=$(xmlstarlet sel -t -m "/project/node[@name='$nodename']" -v @tags $CURL_OUT) 56 | ntags=$(echo $otags | sed 's/primary//g') 57 | # Use -L to edit the resources file in place. 58 | xmlstarlet ed -L -u "/project/node[@name='${nodename}']/@tags" -v "$ntags" $CURL_OUT 59 | echo >&2 "Removed tag: primary, from node: $nodename" 60 | done 61 | 62 | # Read the tags for the secondary rundeck. 63 | tags=$(xmlstarlet sel -t -m "/project/node[@name='${NODE}']" -v @tags $CURL_OUT) 64 | stags=$(echo $tags | sed 's/secondary/primary/g') 65 | 66 | 67 | # Update the resources. 68 | # --------------------- 69 | 70 | # Rewrite the tags for the two rundecks. 71 | xmlstarlet ed -u "/project/node[@name='${NODE}']/@tags" -v "$stags" $CURL_OUT > $RESOURCES 72 | 73 | # Post the updated resources.xml back to the secondary rundeck. 74 | $CURL -X POST -H "$AUTHHEADER" -H "Content-Type: text/xml" -d @$RESOURCES -o $CURL_OUT $APIURL 75 | 76 | success=$(xmlstarlet sel -T -t -v "/result/@success" $CURL_OUT) 77 | if [ "true" != "$success" ] ; then 78 | echo >&2 "FAIL: Server reported an error: " 79 | xmlstarlet sel -T -t -m "/result/error/message" -v "." -n $CURL_OUT 80 | exit 2 81 | fi 82 | 83 | 84 | exit $? 85 | # 86 | # Done. -------------------------------------------------------------------------------- /primary-secondary-failover/generate-apikey.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | set -u 4 | 5 | echo "Generating API token..." 6 | 7 | printf "Lookup credentials for login..." 8 | RDUSER=$(awk -F= '/framework.server.username/ {print $2}' /etc/rundeck/framework.properties| tr -d ' ') 9 | RDPASS=$(awk -F= '/framework.server.password/ {print $2}' /etc/rundeck/framework.properties| tr -d ' ') 10 | SVR_URL=$(awk -F= '/framework.server.url/ {print $2}' /etc/rundeck/framework.properties) 11 | 12 | 13 | CURLOPTS="-f -s -S -L -c cookies -b cookies" 14 | CURL="curl $CURLOPTS" 15 | 16 | # Authenticate 17 | printf "authenticating..." 18 | loginurl="${SVR_URL}/j_security_check" 19 | $CURL $loginurl > curl.out 20 | $CURL -X POST -d j_username=$RDUSER -d j_password=$RDPASS $loginurl > curl.out 21 | 22 | # Generate the API token. 23 | printf "Requesting token..." 24 | tokenurl="$SVR_URL/user/generateApiToken" 25 | $CURL $tokenurl?login=${RDUSER} > curl.out 26 | xmlstarlet fo -R -H curl.out > userprofile.html 2>/dev/null 27 | 28 | # Query the profile for the first apitoken. 29 | # 30 | token=$(xmlstarlet sel -t -m "//span[@class='apitoken']" -v . -n userprofile.html|head -1) 31 | 32 | if [ -z "$token" ] 33 | then 34 | echo >&2 "API token not found in the user profile." 35 | exit 1 36 | fi 37 | echo "Generated token: $token" 38 | -------------------------------------------------------------------------------- /primary-secondary-failover/install-httpd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -u 5 | 6 | # Software install 7 | # ---------------- 8 | # 9 | # Utilities 10 | # Bootstrap a fedora repo to get lighttpd 11 | 12 | if ! rpm -q epel-release 13 | then 14 | rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm 15 | fi 16 | yum install -y httpd xmlstarlet 17 | 18 | # Apache httpd 19 | # ------------ 20 | 21 | # Create directory for takeover log messages 22 | mkdir -p /var/www/html/rundeck/takeovers 23 | chown apache:apache /var/www/html/rundeck/takeovers 24 | 25 | # Create directory for webdav lock files 26 | mkdir -p /var/lock/apache 27 | chown apache:apache /var/lock/apache 28 | 29 | # Create a login for accessing the webdav content. 30 | (echo -n "admin:DAV-upload:" && echo -n "admin:DAV-upload:admin" | 31 | md5sum | 32 | awk '{print $1}' ) >> /etc/httpd/webdav.passwd 33 | 34 | # Generate the configuration into the includes directory. 35 | cat > /etc/httpd/conf.d/webdav.conf< 41 | Dav On 42 | Order Allow,Deny 43 | Allow from all 44 | 45 | AuthType Digest 46 | AuthName DAV-upload 47 | 48 | # You can use the htdigest program to create the password database: 49 | # htdigest -c "/etc/httpd/webdav.passwd" DAV-upload admin 50 | AuthUserFile "/etc/httpd/webdav.passwd" 51 | AuthDigestProvider file 52 | 53 | # Allow universal read-access, but writes are restricted 54 | # to the admin user. 55 | 56 | require user admin 57 | 58 | 59 | 60 | EOF 61 | 62 | # Create subdirectories for webdav content. 63 | mkdir -p /var/www/html/dav 64 | cat > /var/www/html/dav/hi.txt<&2 "usage: $0 name mysqladdr rundeck_yum_repo rerun_yum_repo webdav_url" 12 | exit 1 13 | fi 14 | NAME=$1 15 | MYSQLADDR=$2 16 | RUNDECK_REPO_URL=$3 17 | RERUN_REPO_URL=${4} 18 | WEBDAV_URL=${5} 19 | 20 | # Software install 21 | # ---------------- 22 | # 23 | # Utilities 24 | # Bootstrap a fedora repo to get xmlstarlet 25 | 26 | curl -s http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm -o epel-release.rpm -z epel-release.rpm 27 | if ! rpm -q epel-release 28 | then 29 | rpm -Uvh epel-release.rpm 30 | fi 31 | yum -y install xmlstarlet coreutils rsync 32 | 33 | # 34 | # Rerun 35 | # 36 | if [ -n "${RERUN_REPO_URL:-}" ] 37 | then 38 | curl -# --fail -L -o /etc/yum.repos.d/rerun.repo "$RERUN_REPO_URL" || { 39 | echo "failed downloading rerun.repo config" 40 | exit 2 41 | } 42 | fi 43 | yum -y install rerun rerun-rundeck-admin 44 | 45 | # 46 | # JRE 47 | # 48 | yum -y install java-1.6.0 49 | # 50 | # Rundeck 51 | # 52 | if [ -n "$RUNDECK_REPO_URL" ] 53 | then 54 | curl -# --fail -L -o /etc/yum.repos.d/rundeck.repo "$RUNDECK_REPO_URL" || { 55 | echo "failed downloading rundeck.repo config" 56 | exit 2 57 | } 58 | else 59 | if ! rpm -q rundeck-repo 60 | then 61 | rpm -Uvh http://repo.rundeck.org/latest.rpm 62 | fi 63 | fi 64 | yum -y --skip-broken install rundeck 65 | 66 | # Retreive the webav-logstore file store plugin. 67 | curl -L -s -f -o /var/lib/rundeck/libext/webdav-logstore-plugin.jar \ 68 | "http://dl.bintray.com/ahonor/rundeck-plugins/rundeck-webdav-logstore-plugin-2.1.0.jar" 69 | chown rundeck:rundeck /var/lib/rundeck/libext/webdav-logstore-plugin.jar 70 | mkdir -p /var/lib/rundeck/libext/cache/webdav-logstore-plugin 71 | chown -R rundeck:rundeck /var/lib/rundeck/libext/cache 72 | 73 | 74 | 75 | # Reset the home directory permission as it comes group writeable. 76 | # This is needed for ssh requirements. 77 | chmod 755 ~rundeck 78 | # Add vagrant user to rundeck group. 79 | usermod -g rundeck vagrant 80 | 81 | # 82 | # Disable the firewall so we can easily access it from any host. 83 | service iptables stop 84 | # 85 | 86 | # Configure rundeck. 87 | # ----------------- 88 | 89 | # Replace the apitoken policy 90 | cp /vagrant/templates/apitoken.aclpolicy /etc/rundeck/apitoken.aclpolicy 91 | 92 | # 93 | # Configure the mysql connection and log file storage plugin. 94 | cd /etc/rundeck 95 | cat >rundeck-config.properties.new < framework.properties.new 113 | grep -q rundeck.server.uuid framework.properties.new || { 114 | UUID=$(uuidgen) 115 | cat >>framework.properties.new <&2 ".";# output message. 143 | else break; # successful message. 144 | fi 145 | let count=$count+1;# increment attempts count. 146 | [ $count -eq $max ] && { 147 | echo >&2 "FAIL: Execeeded max attemps " 148 | exit 1 149 | } 150 | sleep 10; # wait 10s before trying again. 151 | done 152 | } 153 | 154 | mkdir -p /var/log/vagrant 155 | success_msg="Connector@" 156 | 157 | if ! service rundeckd status 158 | then 159 | echo "Starting rundeck..." 160 | ( 161 | exec 0>&- # close stdin 162 | service rundeckd start 163 | ) &> /var/log/rundeck/service.log # redirect stdout/err to a log. 164 | 165 | wait_for_success_msg "$success_msg" 166 | 167 | fi 168 | 169 | echo "Rundeck started." 170 | 171 | 172 | # Done. 173 | exit $? 174 | -------------------------------------------------------------------------------- /primary-secondary-failover/jobs/jobs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | c1ddb601-bd19-474a-9769-09c1618d5a74 4 | 5 | 10 | INFO 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | Successfully check the current primary or take its place. 20 | Check-Or-Takeover 21 | 22 | examples 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 1 31 | false 32 | true 33 | ascending 34 | 35 | c1ddb601-bd19-474a-9769-09c1618d5a74 36 | 37 | 38 | secondary+rundeck 39 | 40 | 41 | failover 42 | 43 | 44 | fa8febd1-a8f8-43ad-ba14-cd8a431e5436 45 | 46 | 51 | INFO 52 | 53 | 54 | /var/lib/rundeck/scripts/failover/syncme.sh 55 | ${option.secondary} ${option.key} ${option.project} 56 | 57 | 58 | Synchronize the job state data to this failover instance. 59 | Sync 60 | 61 | examples 62 | 63 | 66 | 69 | 72 | 73 | 74 | 75 | 1 76 | false 77 | true 78 | ascending 79 | 80 | fa8febd1-a8f8-43ad-ba14-cd8a431e5436 81 | 82 | 83 | secondary+rundeck 84 | 85 | 86 | failover 87 | 88 | 89 | 738a5664-bd5e-4de5-b96d-ebb18850df13 90 | INFO 91 | 92 | 93 | /var/lib/rundeck/scripts/failover/check.sh 94 | ${option.primary} ${option.key} ${option.timeout} 95 | 96 | 97 | check if primary healthy 98 | check 99 | 100 | examples 101 | 102 | 105 | 108 | 111 | 112 | 113 | 114 | 1 115 | false 116 | true 117 | ascending 118 | 119 | 738a5664-bd5e-4de5-b96d-ebb18850df13 120 | 121 | 122 | secondary+rundeck 123 | 124 | 125 | failover 126 | 127 | 128 | 98f8d2d7-3bbe-48ee-af53-274e44594d45 129 | INFO 130 | 131 | 132 | /var/lib/rundeck/scripts/failover/update-jobs.sh 133 | ${option.url} ${option.key} ${job.project} 134 | 135 | 136 | /var/lib/rundeck/scripts/failover/update-resources.sh 137 | ${option.url} ${option.key} ${job.project} ${node.name} 138 | 139 | 140 | /var/lib/rundeck/scripts/failover/do-switch.sh 141 | 142 | 143 | 144 | Take over as the primary rundeck server. 145 | takeover 146 | 147 | examples 148 | 149 | 152 | 155 | 156 | 157 | 98f8d2d7-3bbe-48ee-af53-274e44594d45 158 | failover 159 | 160 | 161 | -------------------------------------------------------------------------------- /primary-secondary-failover/load-jobs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -u 5 | 6 | [ $# -lt 1 ] && { 7 | echo >&2 'usage: load-jobs project [primary]' 8 | exit 2 9 | } 10 | 11 | PROJECT=$1 12 | PRIMARY=${2:-} 13 | 14 | 15 | echo "Get the API token for this server..." 16 | # GEt the API TOkens 17 | CURLOPTS="-s -S -L -c cookies -b cookies" 18 | CURL="curl $CURLOPTS" 19 | RDUSER=$(awk -F= '/framework.server.username/ {print $2}' /etc/rundeck/framework.properties| tr -d ' ') 20 | RDPASS=$(awk -F= '/framework.server.password/ {print $2}' /etc/rundeck/framework.properties| tr -d ' ') 21 | SVR_URL=$(awk -F= '/framework.server.url/ {print $2}' /etc/rundeck/framework.properties) 22 | 23 | # Authenticate to get the user profile 24 | loginurl="${SVR_URL}/j_security_check" 25 | $CURL $loginurl > curl.out 26 | $CURL -X POST -d j_username=$RDUSER -d j_password=$RDPASS $loginurl > curl.out 27 | 28 | # Get the user profile and format the html into well formed xml. 29 | tokenurl="${SVR_URL}/user/profile" 30 | $CURL $tokenurl?login=${RDUSER} > curl.out 31 | xmlstarlet fo -R -H curl.out > user.html 2>/dev/null 32 | 33 | # Query the profile for the first apitoken. 34 | # 35 | token=$(xmlstarlet sel -t -m "//span[@class='apitoken']" -v . -n user.html|head -1) 36 | if [ -z "$token" ] 37 | then 38 | echo >&2 "No API token found in the user profile." 39 | exit 1 40 | fi 41 | 42 | mkdir -p /var/lib/rundeck/scripts/failover 43 | cp /vagrant/failover/*.sh /var/lib/rundeck/scripts/failover 44 | chown -R rundeck:rundeck /var/lib/rundeck/scripts 45 | 46 | # Replace the token with the one here. 47 | echo "Updating job definitions for this environment" 48 | sed -e 's,@SCRIPTDIR@,/var/lib/rundeck/scripts/failover,g' /vagrant/jobs/jobs.xml | 49 | xmlstarlet ed -u "//job/context/options/option[@name='key']/@value" -v "$token" | 50 | xmlstarlet ed -u "//job/context/options/option[@name='project']/@value" -v "$PROJECT" | 51 | xmlstarlet ed -u "//job/context/options/option[@name='primary']/@value" -v "${PRIMARY:-}" > /tmp/jobs.xml.new 52 | 53 | 54 | # Now load the jobs 55 | chown rundeck /tmp/jobs.xml.new 56 | su - rundeck -c "rd-jobs load -f /tmp/jobs.xml.new" 57 | 58 | exit $? 59 | -------------------------------------------------------------------------------- /primary-secondary-failover/ssh-copy-id.expect: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env expect 2 | 3 | set timeout 60 4 | 5 | if { $argc != 4 } { 6 | puts stderr "usage: ssh-copy-id.expect host user password keyfile" 7 | exit 2 8 | } 9 | 10 | set host [lindex $argv 0] 11 | set user [lindex $argv 1] 12 | set password [lindex $argv 2] 13 | set keyfile [lindex $argv 3] 14 | 15 | 16 | spawn ssh-copy-id -i $keyfile $user@$host 17 | 18 | expect "yes/no" { send "yes\n"; } 19 | expect "password:" { send "$password\n"; exp_continue } 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /primary-secondary-failover/templates/apitoken.aclpolicy: -------------------------------------------------------------------------------- 1 | description: API project level access control 2 | context: 3 | project: '.*' # all projects 4 | for: 5 | resource: 6 | - equals: 7 | kind: job 8 | allow: [create,delete] # allow create and delete jobs 9 | - equals: 10 | kind: node 11 | allow: [read,create,update,refresh] # allow refresh node sources 12 | - equals: 13 | kind: event 14 | allow: [read,create] # allow read/create events 15 | adhoc: 16 | - allow: [read,run,kill] # allow running/killing adhoc jobs and read output 17 | job: 18 | - allow: [create,read,update,delete,run,kill] # allow create/read/write/delete/run/kill of all jobs 19 | node: 20 | - allow: [read,run] # allow read/run for all nodes 21 | by: 22 | group: api_token_group 23 | 24 | --- 25 | 26 | description: API Application level access control 27 | context: 28 | application: 'rundeck' 29 | for: 30 | resource: 31 | - equals: 32 | kind: system 33 | allow: [read] # allow read of system info 34 | - equals: 35 | kind: job 36 | allow: [admin] # allow cluster schedule takeover 37 | project: 38 | - match: 39 | name: '.*' 40 | allow: [read] # allow view of all projects 41 | by: 42 | group: api_token_group 43 | -------------------------------------------------------------------------------- /tomcat6-mysql/README.md: -------------------------------------------------------------------------------- 1 | This is a multi-machine vagrant configuration that 2 | provisions a rundeck instance and mysql database. 3 | 4 | ## Vagrant configuration. 5 | 6 | The vagrant configuration defines the following virtual machines: 7 | 8 | * **mysql**: The mysql VM 9 | * **rundeck**: The rundeck VM 10 | 11 | 12 | ## Requirements 13 | 14 | * Internet access to download packages from public repositories. 15 | * [Vagrant](http://downloads.vagrantup.com) 16 | 17 | ## Startup 18 | 19 | Start up the VMs in the following order. 20 | 21 | vagrant up mysql 22 | vagrant up rundeck 23 | 24 | 25 | You can access rundecks via: 26 | 27 | * http://192.168.50.4:4440 28 | 29 | 30 | Login to rundeck using user/pass: admin/admin 31 | 32 | -------------------------------------------------------------------------------- /tomcat6-mysql/Vagrantfile: -------------------------------------------------------------------------------- 1 | 2 | Vagrant.configure("2") do |config| 3 | config.vm.box = "CentOS-6.3-x86_64-minimal" 4 | config.vm.box_url = "https://dl.dropbox.com/u/7225008/Vagrant/CentOS-6.3-x86_64-minimal.box" 5 | 6 | PROJECT="examples" 7 | RUNDECK_VERSION="2.0.0" 8 | RUNDECK_IP="192.168.50.14" 9 | MYSQL_IP="192.168.50.16" 10 | 11 | config.vm.define :mysql do |mysql| 12 | mysql.vm.hostname = "mysql" 13 | mysql.vm.network :private_network, ip: "#{MYSQL_IP}" 14 | mysql.vm.provision :shell, :path => "install-mysql.sh", :args => "mysql" 15 | end 16 | 17 | config.vm.define :rundeck do |rundeck| 18 | rundeck.vm.hostname = "rundeck" 19 | rundeck.vm.network :private_network, ip: "#{RUNDECK_IP}" 20 | rundeck.vm.provision :shell, :path => "install-rundeck.sh", :args => "#{RUNDECK_VERSION} rundeck #{RUNDECK_IP} #{MYSQL_IP}" 21 | rundeck.vm.provision :shell, :path => "add-project.sh", :args => "#{PROJECT} rundeck #{RUNDECK_IP} rundeck,rundeck" 22 | end 23 | 24 | 25 | end 26 | 27 | -------------------------------------------------------------------------------- /tomcat6-mysql/add-project.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -u 5 | 6 | 7 | if [ $# -ne 4 ] 8 | then 9 | echo >&2 "usage: add-project project nodename nodeip tags" 10 | exit 1 11 | fi 12 | PROJECT=$1 13 | NODENAME=$2 14 | NODEIP=$3 15 | TAGS=$4 16 | 17 | export RDECK_BASE=/etc/tomcat6/rundeck 18 | PATH=$PATH:$RDECK_BASE/tools/bin 19 | 20 | 21 | echo Create project $PROJECT... 22 | # Create an example project as the rundeck user 23 | rd-project -a create -p $PROJECT 24 | 25 | # Run simple commands to double check the project. 26 | dispatch -p $PROJECT > /dev/null 27 | # Fire off a command. 28 | dispatch -p $PROJECT -f -- whoami 29 | 30 | 31 | echo "Project created. Update resource metadata for this host." 32 | keypath=$(awk -F= '/framework.ssh.keypath/ {print $2}' $RDECK_BASE/etc/framework.properties) 33 | # Update the resource metadata for this host. 34 | DIR=$RDECK_BASE/projects/$PROJECT/etc 35 | 36 | xmlstarlet ed -u "/project/node/@tags" -v "$TAGS" $DIR/resources.xml | 37 | xmlstarlet ed -u "/project/node/@name" -v "$NODENAME" | 38 | xmlstarlet ed -u "/project/node/@hostname" -v "$NODEIP" | 39 | xmlstarlet ed -i "/project/node" -t attr -n ssh-keypath -v ${keypath} > resources.xml.new 40 | mv resources.xml.new $DIR/resources.xml 41 | 42 | # Set the ownerships to rundeck. 43 | chown -R tomcat:tomcat $RDECK_BASE/var 44 | 45 | 46 | # run the node listing. 47 | echo "List the nodes tagged rundeck:" 48 | dispatch -p $PROJECT -v -I tags=rundeck 49 | 50 | 51 | exit $? -------------------------------------------------------------------------------- /tomcat6-mysql/generate-apikey.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Exit immediately on error or undefined variable. 4 | set -e 5 | set -u 6 | 7 | export RDECK_BASE=/etc/tomcat6/rundeck 8 | 9 | echo "Requesting API token..." 10 | 11 | RDUSER=$(awk -F= '/framework.server.username/ {print $2}' $RDECK_BASE/etc/framework.properties| tr -d ' ') 12 | RDPASS=$(awk -F= '/framework.server.password/ {print $2}' $RDECK_BASE/etc/framework.properties| tr -d ' ') 13 | SVR_URL=$(awk -F= '/framework.rundeck.url/ {print $2}' $RDECK_BASE/etc/framework.properties|tr -d ' ') 14 | 15 | 16 | CURLOPTS="-f -s -S -L -c cookies -b cookies" 17 | CURL="curl $CURLOPTS" 18 | 19 | # Authenticate. 20 | # ------------- 21 | # Create session. For tomcat, make a request to get a redirect to the login page. 22 | $CURL ${SVR_URL} > curl.out; 23 | # Now post credentials to j_security_check with our cookie session info. 24 | $CURL -X POST -d j_username=$RDUSER -d j_password=$RDPASS "${SVR_URL}/j_security_check" > curl.out 25 | 26 | # Request the API token. 27 | # ----------------------- 28 | tokenurl="$SVR_URL/user/generateApiToken" 29 | $CURL $tokenurl?login=${RDUSER} > curl.out 30 | xmlstarlet fo -R -H curl.out > userprofile.html 2>/dev/null 31 | 32 | # Query the profile for the first apitoken. 33 | # 34 | token=$(xmlstarlet sel -N x="http://www.w3.org/1999/xhtml" -t -m "//x:span[@class='apitoken']" -v . -n userprofile.html|head -1) 35 | 36 | if [ -z "$token" ] 37 | then 38 | echo >&2 "API token not found in the user profile." 39 | exit 1 40 | fi 41 | echo "Obtained API token: $token" -------------------------------------------------------------------------------- /tomcat6-mysql/install-mysql.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Exit immediately on error or undefined variable. 4 | set -e 5 | set -u 6 | 7 | 8 | # 9 | # Install software. 10 | # ----------------- 11 | 12 | # 13 | # Mysql. 14 | # 15 | yum -y install mysql mysql-server mysql-devel 16 | chgrp -R mysql /var/lib/mysql 17 | chmod -R 770 /var/lib/mysql 18 | 19 | # Startup the server. 20 | # ------------------ 21 | service mysqld start 22 | 23 | sleep 20; # let it bootup. 24 | 25 | # 26 | # Create the database. 27 | # -------------------- 28 | 29 | # Create the rundeck database and grant access to any host. 30 | mysql --user root --password='' <&2 "usage: $0 rdversion hostname hostip mysqladdr" 11 | exit 1 12 | fi 13 | RUNDECK_VERSION=$1 14 | HOST_NAME=$2 15 | HOST_IP=$3 16 | MYSQLADDR=$4 17 | export RDECK_BASE=/etc/tomcat6/rundeck 18 | 19 | # Software install 20 | # ---------------- 21 | # 22 | # Utilities 23 | # Bootstrap a fedora repo to get xmlstarlet 24 | 25 | if ! rpm -q epel-release 26 | then 27 | rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm 28 | fi 29 | yum -y install xmlstarlet coreutils rsync unzip 30 | # 31 | # JRE 32 | # 33 | yum -y install java-1.6.0 34 | # 35 | # Tomcat. 36 | # 37 | yum install -y tomcat6 tomcat6-webapps tomcat6-admin-webapps 38 | 39 | # 40 | # Rundeck 41 | # 42 | mkdir -p $RDECK_BASE 43 | 44 | ## Rundeck WAR 45 | WAR=rundeck-${RUNDECK_VERSION}.war 46 | if test -f /vagrant/rundeck-*.war ; then 47 | WAR=`ls /vagrant/rundeck-*.war` 48 | else 49 | WAR_URL=http://download.rundeck.org/war/rundeck-${RUNDECK_VERSION}.war 50 | curl -f -s -L $WAR_URL -o ${WAR} -z ${WAR} 51 | 52 | fi 53 | mkdir -p /var/lib/tomcat6/webapps/rundeck 54 | unzip -qu ${WAR} -d /var/lib/tomcat6/webapps/rundeck 55 | 56 | ## Rundeck CLI. Extract the CLI tools from the rundeck-core.jar. 57 | core_jar=/var/lib/tomcat6/webapps/rundeck/WEB-INF/lib/rundeck-core-${RUNDECK_VERSION}.jar 58 | tmp_dir=/tmp/rundeck-core-templates 59 | mkdir -p $tmp_dir 60 | unzip -qu $core_jar -d $tmp_dir 61 | mkdir -p $RDECK_BASE/tools/{bin,lib} 62 | mv $tmp_dir/com/dtolabs/rundeck/core/cli/templates/* $RDECK_BASE/tools/bin 63 | chmod 755 $RDECK_BASE/tools/bin/* 64 | # Copy the CLI libraries. 65 | cp $core_jar $RDECK_BASE/tools/lib/ 66 | libs="ant-*.jar log4j-*.jar commons-codec-*.jar commons-beanutils-*.jar commons-collections-*.jar commons-logging-*.jar commons-lang-*.jar dom4j-*.jar commons-cli-*.jar jsch-*.jar snakeyaml-*.jar xercesImpl-*.jar jaxen-*.jar commons-httpclient-*.jar jdom-*.jar icu4j-*.jar xom-*.jar" 67 | (cd /var/lib/tomcat6/webapps/rundeck/WEB-INF/lib; cp $libs $RDECK_BASE/tools/lib) 68 | 69 | 70 | # 71 | # Configure Tomcat. 72 | # ------------------- 73 | 74 | http_port=4440 75 | https_port=4443 76 | 77 | # Generate the keystore. 78 | keystore_file=$RDECK_BASE/etc/truststore 79 | keystore_pass=password 80 | 81 | if [ ! -f "$keystore_file" ] 82 | then 83 | mkdir -p $RDECK_BASE/etc 84 | keytool -genkey -noprompt \ 85 | -alias tomcat \ 86 | -keyalg RSA \ 87 | -dname "CN=rundeck.org, OU=CA, O=RUNDECK, L=Rundeck, S=Rundeck, C=US" \ 88 | -keystore "$keystore_file" \ 89 | -storepass $keystore_pass \ 90 | -keypass $keystore_pass 91 | chmod 600 "$keystore_file" 92 | fi 93 | 94 | # Configure tomcat to use our ports and keystore. 95 | # Copy existing configuration to a backup file. 96 | if [ -f /etc/tomcat6/server.xml ] 97 | then cp /etc/tomcat6/server.xml /etc/tomcat6/server.xml.$(date +"%Y-%m-%d-%S") 98 | fi 99 | sed -e "s,@http_port@,$http_port,g" \ 100 | -e "s,@https_port@,$https_port,g" \ 101 | -e "s,@keystore_file@,$keystore_file,g" \ 102 | -e "s,@keystore_pass@,$keystore_pass,g" \ 103 | /vagrant/server.xml > /etc/tomcat6/server.xml 104 | 105 | # Replace tomcat-users with standard rundeck users and roles 106 | cp /etc/tomcat6/tomcat-users.xml /etc/tomcat6/tomcat-users.xml.$(date +"%Y-%m-%d-%S") 107 | cp /vagrant/tomcat-users.xml /etc/tomcat6/tomcat-users.xml 108 | 109 | chkconfig tomcat6 on 110 | 111 | # Configure Rundeck. 112 | # ------------------ 113 | 114 | server_url="https://$HOST_IP:$https_port/rundeck" 115 | 116 | if [ ! -f $RDECK_BASE/rundeck-config.properties ] 117 | then 118 | cat >rundeck-config.properties.new <> /etc/tomcat6/tomcat6.conf <&2 ".";# output message. 174 | else break; # found successful startup message. 175 | fi 176 | let count=$count+1;# increment attempts 177 | [ $count -eq $max ] && { 178 | echo >&2 "FAIL: Reached max attempts to find success message in log. Exiting." 179 | exit 1 180 | } 181 | sleep 10; # wait 10s before trying again. 182 | done 183 | fi 184 | 185 | echo "Rundeck started." 186 | 187 | 188 | # Replace references to localhost with this node's name. 189 | sed -e "s#framework.server.port=.*#framework.server.port=$http_port#g" \ 190 | -e "s#framework.rundeck.url=.*#framework.rundeck.url=http://$HOST_NAME:$http_port#g" \ 191 | $RDECK_BASE/etc/framework.properties >framework.properties.new 192 | mv framework.properties.new $RDECK_BASE/etc/framework.properties 193 | echo >&2 "Updated $RDECK_BASE/etc/framework.properties" 194 | egrep 'framework.server.port|framework.rundeck.url' $RDECK_BASE/etc/framework.properties 195 | 196 | # Done. 197 | exit $? -------------------------------------------------------------------------------- /tomcat6-mysql/server.xml: -------------------------------------------------------------------------------- 1 | 2 | 18 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 37 | 38 | 41 | 46 | 47 | 48 | 53 | 54 | 55 | 56 | 60 | 61 | 62 | 69 | 72 | 73 | 79 | 83 | 84 | 89 | 90 | 91 | 92 | 93 | 94 | 99 | 100 | 103 | 104 | 105 | 108 | 111 | 112 | 115 | 118 | 119 | 123 | 125 | 126 | 129 | 132 | 133 | 135 | 138 | 139 | 141 | 145 | 146 | 147 | 148 | 149 | 150 | -------------------------------------------------------------------------------- /tomcat6-mysql/tomcat-users.xml: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /ubuntu/Vagrantfile: -------------------------------------------------------------------------------- 1 | 2 | Vagrant.configure("2") do |config| 3 | 4 | config.vm.box = "precise32" 5 | 6 | config.vm.network :forwarded_port, guest: 4440, host: 14440 7 | 8 | config.vm.provision :shell, :path => "bootstrap.sh" 9 | config.vm.provision :shell, :path => "add-project.sh" 10 | 11 | end 12 | -------------------------------------------------------------------------------- /ubuntu/add-project.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | die() { 4 | [[ $# -gt 1 ]] && { 5 | exit_status=$1 6 | shift 7 | } 8 | printf >&2 "ERROR: $*\n" 9 | 10 | exit ${exit_status:-1} 11 | } 12 | 13 | trap 'die $? "*** add-project failed. ***"' ERR 14 | set -o nounset -o pipefail 15 | 16 | # Create an example project and 17 | rd-project -a create -p example 18 | 19 | # Run simple commands to double check. 20 | # Print out the available nodes. 21 | # Fire off a command. 22 | dispatch -p example 23 | dispatch -p example -f -- whoami 24 | 25 | -------------------------------------------------------------------------------- /ubuntu/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | DEB=http://build.rundeck.org/job/candidate-1.5.1/lastSuccessfulBuild/artifact/packaging/rundeck-1.5.1-1-GA.deb 3 | 4 | die() { 5 | [[ $# -gt 1 ]] && { 6 | exit_status=$1 7 | shift 8 | } 9 | printf >&2 "ERROR: $*\n" 10 | 11 | exit ${exit_status:-1} 12 | } 13 | 14 | #trap 'die $? "*** bootstrap failed. ***"' ERR 15 | 16 | set -o nounset -o pipefail 17 | 18 | apt-get update 19 | # Install the JRE 20 | apt-get -y install openjdk-6-jre 21 | apt-get -y install curl 22 | 23 | # Install Rundeck core 24 | 25 | curl -s --fail $DEB -o rundeck.deb 26 | 27 | dpkg -i rundeck.deb 28 | sleep 10 29 | 30 | # Start up rundeck 31 | if ! /etc/init.d/rundeckd status 32 | then 33 | ( 34 | exec 0>&- # close stdin 35 | /etc/init.d/rundeckd start 36 | ) &> /var/log/rundeck/service.log # redirect stdout/err to a log. 37 | 38 | let count=0 39 | while true 40 | do 41 | if ! grep "Started SocketConnector@" /var/log/rundeck/service.log 42 | then printf >&2 ".";# progress output. 43 | else break; # successful message. 44 | fi 45 | let count=$count+1;# increment attempts 46 | [ $count -eq 18 ] && { 47 | echo >&2 "FAIL: Execeeded max attemps " 48 | exit 1 49 | } 50 | sleep 10 51 | done 52 | fi 53 | 54 | --------------------------------------------------------------------------------