├── .gitmodules ├── version.txt ├── elasticluster ├── share │ └── playbooks │ │ ├── roles │ │ ├── anaconda │ │ │ ├── library │ │ │ │ ├── conda.py │ │ │ │ └── ansible-conda │ │ │ │ │ ├── LICENSE.txt │ │ │ │ │ └── README.md │ │ │ ├── README.md │ │ │ ├── templates │ │ │ │ └── etc │ │ │ │ │ └── profile.d │ │ │ │ │ └── anaconda.sh.j2 │ │ │ ├── vars │ │ │ │ └── main.yml │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── LICENSE │ │ ├── nis │ │ │ ├── templates │ │ │ │ └── etc │ │ │ │ │ ├── defaultdomain.j2 │ │ │ │ │ ├── yp.conf.j2 │ │ │ │ │ ├── ypserv.securenets.j2 │ │ │ │ │ └── default │ │ │ │ │ └── nis.j2 │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── vars │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── init-RedHat.yml │ │ │ │ ├── init-Debian.yml │ │ │ │ ├── ypserv.yml │ │ │ │ ├── ypbind.yml │ │ │ │ └── main.yml │ │ │ └── files │ │ │ │ └── etc │ │ │ │ └── sysconfig │ │ │ │ └── yppasswdd │ │ ├── postgresql │ │ │ ├── ansible.cfg │ │ │ ├── vars │ │ │ │ ├── empty.yml │ │ │ │ ├── Debian.yml │ │ │ │ └── RedHat.yml │ │ │ ├── .gitignore │ │ │ ├── tests │ │ │ │ ├── playbook.yml │ │ │ │ ├── vars.yml │ │ │ │ ├── Dockerfile-ubuntu14.04 │ │ │ │ ├── Dockerfile-centos6 │ │ │ │ └── idempotence_check.sh │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── vagrant-inventory │ │ │ ├── templates │ │ │ │ ├── etc_apt_preferences.d_apt_postgresql_org_pub_repos_apt.pref.j2 │ │ │ │ ├── etc_monit_conf.d_postgresql.j2 │ │ │ │ ├── etc_systemd_system_postgresql.service.d_custom.conf.j2 │ │ │ │ ├── HOWTO.postgresql.conf │ │ │ │ └── pg_hba.conf.j2 │ │ │ ├── tasks │ │ │ │ ├── monit.yml │ │ │ │ ├── extensions.yml │ │ │ │ ├── extensions │ │ │ │ │ ├── dev_headers.yml │ │ │ │ │ ├── postgis.yml │ │ │ │ │ └── contrib.yml │ │ │ │ ├── users_privileges.yml │ │ │ │ ├── users.yml │ │ │ │ ├── main.yml │ │ │ │ └── install_yum.yml │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── Vagrantfile │ │ │ └── LICENSE │ │ ├── glusterfs-client │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── glusterfs-server │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── gridengine-exec │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── init-RedHat.yml │ │ │ │ └── init-Debian.yml │ │ │ └── templates │ │ │ │ └── gridengine-execd.service.j2 │ │ ├── pbs+maui │ │ │ ├── templates │ │ │ │ └── var │ │ │ │ │ └── spool │ │ │ │ │ └── torque │ │ │ │ │ ├── server_name.j2 │ │ │ │ │ └── server_priv │ │ │ │ │ └── nodes.j2 │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── files │ │ │ │ └── qmgr.bootstrap │ │ │ └── tasks │ │ │ │ ├── maui.yml │ │ │ │ ├── clients.yml │ │ │ │ └── master.yml │ │ ├── gridengine-common │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── init-RedHat.yml │ │ │ │ ├── init-Debian.yml │ │ │ │ └── main.yml │ │ │ └── templates │ │ │ │ └── copr-loveshack-sge.repo.j2 │ │ ├── gridengine-master │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── templates │ │ │ │ ├── allhosts.grp.conf.j2 │ │ │ │ ├── newhost.qconf.j2 │ │ │ │ └── gridengine-master.service.j2 │ │ │ ├── tasks │ │ │ │ ├── init-Debian.yml │ │ │ │ └── init-RedHat.yml │ │ │ └── files │ │ │ │ └── all.q.conf │ │ ├── hive │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── templates │ │ │ │ ├── hive-env.sh.j2 │ │ │ │ └── hive-site.xml.j2 │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── jupyterhub │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ └── files │ │ │ │ └── etc │ │ │ │ └── supervisor │ │ │ │ └── conf.d │ │ │ │ └── jupyterhub.conf │ │ ├── jenkins │ │ │ └── handlers │ │ │ │ └── main.yml │ │ ├── spark-master │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── meta │ │ │ │ └── main.yml │ │ ├── ansible │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── templates │ │ │ │ └── inventory.j2 │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── glusterfs-common │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ ├── main.yml │ │ │ │ ├── rhel.yml │ │ │ │ └── debian.yml │ │ ├── hadoop-common │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── templates │ │ │ │ ├── master.j2 │ │ │ │ ├── slaves.j2 │ │ │ │ ├── mapred-env.sh │ │ │ │ ├── core-site.xml.j2 │ │ │ │ └── hdfs-site.xml.j2 │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── slurm-master │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── install-slurmctld.yml │ │ │ │ ├── install-slurmdbd.yml │ │ │ │ ├── init-Debian.yml │ │ │ │ ├── init-RedHat.yml │ │ │ │ └── main.yml │ │ │ └── meta │ │ │ │ └── main.yml │ │ ├── spark-worker │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── yarn-master │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── yarn-worker │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── hdfs-datanode │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── hdfs-namenode │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── slurm-common │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── files │ │ │ │ └── etc │ │ │ │ │ └── munge │ │ │ │ │ └── munge.key │ │ │ ├── tasks │ │ │ │ ├── init-RedHat.yml │ │ │ │ ├── init-Debian.yml │ │ │ │ └── mkdir.yml │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── templates │ │ │ │ └── copr-slurm.repo.j2 │ │ │ └── defaults │ │ │ │ └── main.yml │ │ ├── ntpd │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── init-RedHat.yml │ │ │ │ ├── init-Debian.yml │ │ │ │ └── main.yml │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── README.rst │ │ ├── easybuild │ │ │ ├── vars │ │ │ │ └── main.yml │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── deps-Debian.yml │ │ │ │ └── deps-RedHat.yml │ │ │ └── templates │ │ │ │ └── etc │ │ │ │ └── easybuild.cfg.j2 │ │ ├── nfs-server │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ ├── init-RedHat.yml │ │ │ │ ├── init-Debian.yml │ │ │ │ └── main.yml │ │ ├── ansible.yml │ │ ├── mcr │ │ │ ├── files │ │ │ │ ├── MCRInstaller.zip │ │ │ │ └── install │ │ │ └── templates │ │ │ │ ├── profile.sh.j2 │ │ │ │ └── input.txt.j2 │ │ ├── spark-common │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── files │ │ │ │ └── etc │ │ │ │ └── profile.d │ │ │ │ └── pyspark.sh │ │ ├── common │ │ │ ├── templates │ │ │ │ └── etc │ │ │ │ │ ├── ssh │ │ │ │ │ ├── shosts.equiv.j2 │ │ │ │ │ └── ssh_known_hosts.j2 │ │ │ │ │ ├── exports.j2 │ │ │ │ │ └── hosts.j2 │ │ │ ├── tasks │ │ │ │ ├── hosts.yml │ │ │ │ ├── main.yml │ │ │ │ ├── init-RedHat.yml │ │ │ │ ├── gc3repo.yml │ │ │ │ ├── software-Debian.yml │ │ │ │ ├── ssh_host_based_authentication.yml │ │ │ │ ├── software-RedHat.yml │ │ │ │ └── hostname.yml │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ └── files │ │ │ │ └── etc │ │ │ │ └── yum.repos.d │ │ │ │ └── gc3.repo.j2 │ │ ├── htcondor │ │ │ ├── handlers │ │ │ │ └── common.yml │ │ │ ├── templates │ │ │ │ ├── etc │ │ │ │ │ ├── apt │ │ │ │ │ │ └── sources.list.d │ │ │ │ │ │ │ └── htcondor.list.j2 │ │ │ │ │ └── condor │ │ │ │ │ │ └── condor_config.local.j2 │ │ │ │ └── htcondor.debconf.j2 │ │ │ └── tasks │ │ │ │ └── common.yml │ │ ├── r │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── templates │ │ │ │ └── etc │ │ │ │ │ └── R │ │ │ │ │ └── Rprofile.site.j2 │ │ │ └── tasks │ │ │ │ ├── init-RedHat.yml │ │ │ │ └── main.yml │ │ ├── jupyterhub.yml │ │ ├── slurm-client │ │ │ └── meta │ │ │ │ └── main.yml │ │ ├── slurm-worker │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── main.yml │ │ │ │ ├── init-RedHat.yml │ │ │ │ └── init-Debian.yml │ │ │ └── files │ │ │ │ └── usr │ │ │ │ └── lib │ │ │ │ └── systemd │ │ │ │ └── system │ │ │ │ └── slurmd.service │ │ ├── ceph │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── mds.yml │ │ │ │ ├── main.yml │ │ │ │ └── osd.yml │ │ │ └── templates │ │ │ │ └── etc │ │ │ │ └── ceph │ │ │ │ └── ceph.conf.j2 │ │ ├── lua │ │ │ ├── vars │ │ │ │ └── main.yml │ │ │ ├── templates │ │ │ │ └── etc │ │ │ │ │ └── profile.d │ │ │ │ │ └── lua.sh.j2 │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── jenkins.yml │ │ ├── hive-server │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── ganglia │ │ │ ├── files │ │ │ │ └── etc │ │ │ │ │ └── httpd │ │ │ │ │ └── conf.d │ │ │ │ │ └── ganglia.conf │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── client.yml │ │ ├── bigtop │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── pdsh │ │ │ ├── files │ │ │ │ └── etc │ │ │ │ │ └── profile.d │ │ │ │ │ └── pdsh.sh │ │ │ ├── templates │ │ │ │ └── etc │ │ │ │ │ └── genders.j2 │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── lmod │ │ │ ├── files │ │ │ │ └── etc │ │ │ │ │ └── profile.d │ │ │ │ │ └── 000_user_is_root.sh │ │ │ ├── templates │ │ │ │ └── etc │ │ │ │ │ └── profile.d │ │ │ │ │ ├── z80_StdEnv.csh.j2 │ │ │ │ │ └── z80_StdEnv.sh.j2 │ │ │ ├── tasks │ │ │ │ └── main.yml │ │ │ └── defaults │ │ │ │ └── main.yml │ │ ├── jupyter │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── bash.yml │ │ │ │ ├── main.yml │ │ │ │ ├── irkernel.yml │ │ │ │ └── python.yml │ │ │ ├── templates │ │ │ │ └── pyspark.kernel.json.j2 │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── files │ │ │ │ └── usr │ │ │ │ └── local │ │ │ │ └── share │ │ │ │ └── jupyter │ │ │ │ └── kernels │ │ │ │ ├── pyspark2 │ │ │ │ └── startup.py │ │ │ │ └── pyspark3 │ │ │ │ └── startup.py │ │ ├── iptables │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ │ ├── init-RedHat.yml │ │ │ │ └── init-Debian.yml │ │ │ └── templates │ │ │ │ └── etc │ │ │ │ └── iptables │ │ │ │ └── rules.v4.j2 │ │ ├── nfs-client │ │ │ ├── tasks │ │ │ │ ├── nfsmount.yml │ │ │ │ └── main.yml │ │ │ └── defaults │ │ │ │ └── main.yml │ │ ├── ganglia.yml │ │ ├── glusterfs.yml │ │ ├── hadoop.yml │ │ ├── cluster │ │ │ └── tasks │ │ │ │ └── packages.yml │ │ ├── pbs+maui.yml │ │ ├── htcondor.yml │ │ ├── gridengine.yml │ │ ├── slurm.yml │ │ └── pvfs2 │ │ │ └── files │ │ │ └── pvfs2.debian.init │ │ ├── private_vars │ │ └── ldap │ │ ├── examples │ │ ├── nfsexport.yml │ │ ├── variables.yml │ │ ├── variables.j2 │ │ ├── nestedvars.yml │ │ └── drbd.yml │ │ ├── after.yml │ │ ├── group_vars │ │ └── jenkins │ │ ├── before.yml │ │ └── site.yml └── __init__.py ├── setup.cfg ├── docs ├── presentations │ ├── chosug-20131206 │ │ ├── elasticluster.toc │ │ ├── elasticluster.pdf │ │ └── uzh_logo_e_pos.pdf │ ├── egiTF2013 │ │ ├── uzh_logo_e_pos.pdf │ │ └── elasticluster_egi_TF_2013.pdf │ └── euroscipy2013 │ │ └── elasticluster_euroscipy_2013.pdf ├── api │ ├── elasticluster │ │ ├── utils.rst │ │ ├── conf.rst │ │ ├── main.rst │ │ ├── cluster.rst │ │ ├── exceptions.rst │ │ ├── providers.rst │ │ ├── repository.rst │ │ ├── subcommands.rst │ │ ├── gc3pie_config.rst │ │ └── providers │ │ │ ├── gce.rst │ │ │ ├── ec2_boto.rst │ │ │ ├── openstack.rst │ │ │ └── ansible_provider.rst │ └── index.rst └── customize.rst ├── MANIFEST.in ├── tox.ini ├── .travis.yml ├── tests ├── Dockerfile └── _helpers │ └── __init__.py ├── .gitignore └── update_storage.py /.gitmodules: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /version.txt: -------------------------------------------------------------------------------- 1 | 1.3.dev 2 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/anaconda/library/conda.py: -------------------------------------------------------------------------------- 1 | ansible-conda/conda.py -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nis/templates/etc/defaultdomain.j2: -------------------------------------------------------------------------------- 1 | {{NIS_DOMAIN}} 2 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | roles_path = ../ -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [tool:pytest] 2 | norecursedirs = docs elasticluster/share .* build dist *.egg 3 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/private_vars/ldap: -------------------------------------------------------------------------------- 1 | U2FsdGVkX1/OOsq0AAAAAB84/prniRasGIoEKh90qFCZm0klo9z2Pb+BRiAPu1RA 2 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/glusterfs-client/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependencies: 4 | - glusterfs-common 5 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/glusterfs-server/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependencies: 4 | - glusterfs-common 5 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-exec/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependencies: 4 | - gridengine-common 5 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/pbs+maui/templates/var/spool/torque/server_name.j2: -------------------------------------------------------------------------------- 1 | {{ groups['pbs_master'][0] }} 2 | -------------------------------------------------------------------------------- /docs/presentations/chosug-20131206/elasticluster.toc: -------------------------------------------------------------------------------- 1 | \beamer@endinputifotherversion {3.24pt} 2 | \select@language {english} 3 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # SGE cell name 4 | SGE_CELL: 'default' 5 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-master/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependencies: 4 | - gridengine-common 5 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/vars/empty.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This file intentionally does not define any variables. 3 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include elasticluster/share * 2 | recursive-include docs * 3 | include elasticluster README.rst version.txt 4 | 5 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hive/meta/main.yml: -------------------------------------------------------------------------------- 1 | # hive/meta/main.yml 2 | --- 3 | 4 | dependencies: 5 | - { role: bigtop } 6 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/jupyterhub/meta/main.yml: -------------------------------------------------------------------------------- 1 | # jupyterhub/meta/main.yml 2 | --- 3 | 4 | dependencies: 5 | - jupyter 6 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py26,py27 3 | 4 | [testenv] 5 | deps = 6 | mock 7 | pytest 8 | commands = py.test {posargs} 9 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/jenkins/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart jenkins 3 | action: service name=jenkins state=restarted -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/spark-master/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | spark_history_storage_uri: 'file:////var/log/spark/apps' 4 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ansible/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # the version of Ansible to install 4 | ANSIBLE_VERSION: 'LATEST' 5 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/glusterfs-common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # version of the SW to install 4 | glusterfs_version: '3.8' 5 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hadoop-common/meta/main.yml: -------------------------------------------------------------------------------- 1 | # hadoop-common/meta/main.yml 2 | --- 3 | 4 | dependencies: 5 | - { role: bigtop } 6 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/vars/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | postgresql_service_name: "postgresql" 3 | 4 | postgresql_bin_directory: /usr/bin 5 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-master/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | SLURM_ACCOUNTING_HOST: '{{groups.slurm_master[0]|default("slurmdbd")}}' 4 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/spark-master/meta/main.yml: -------------------------------------------------------------------------------- 1 | # spark-master/meta/main.yml 2 | --- 3 | 4 | dependencies: 5 | - { role: spark-common } 6 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/spark-worker/meta/main.yml: -------------------------------------------------------------------------------- 1 | # spark-worker/meta/main.yml 2 | --- 3 | 4 | dependencies: 5 | - { role: spark-common } 6 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/yarn-master/meta/main.yml: -------------------------------------------------------------------------------- 1 | # yarn-master/meta/main.yml 2 | --- 3 | 4 | dependencies: 5 | - { role: hadoop-common } 6 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/yarn-worker/meta/main.yml: -------------------------------------------------------------------------------- 1 | # yarn-worker/meta/main.yml 2 | --- 3 | 4 | dependencies: 5 | - { role: hadoop-common } 6 | -------------------------------------------------------------------------------- /docs/presentations/egiTF2013/uzh_logo_e_pos.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/googlegenomics/elasticluster/master/docs/presentations/egiTF2013/uzh_logo_e_pos.pdf -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-common/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependencies: 4 | - role: iptables 5 | default_input_policy: 'ACCEPT' 6 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hdfs-datanode/meta/main.yml: -------------------------------------------------------------------------------- 1 | # hdfs-datanode/meta/main.yml 2 | --- 3 | 4 | dependencies: 5 | - { role: hadoop-common } 6 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hdfs-namenode/meta/main.yml: -------------------------------------------------------------------------------- 1 | # hdfs-namenode/meta/main.yml 2 | --- 3 | 4 | dependencies: 5 | - { role: hadoop-common } 6 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart munge 4 | service: 5 | name=munge 6 | state=restarted 7 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ntpd/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart ntpd 4 | service: 5 | name='{{ntpd_service}}' 6 | state=restarted 7 | -------------------------------------------------------------------------------- /docs/presentations/chosug-20131206/elasticluster.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/googlegenomics/elasticluster/master/docs/presentations/chosug-20131206/elasticluster.pdf -------------------------------------------------------------------------------- /docs/presentations/chosug-20131206/uzh_logo_e_pos.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/googlegenomics/elasticluster/master/docs/presentations/chosug-20131206/uzh_logo_e_pos.pdf -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/easybuild/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # directory where to download and run the bootstrap script 4 | easybuild_build_dir: '/var/tmp' 5 | -------------------------------------------------------------------------------- /docs/presentations/egiTF2013/elasticluster_egi_TF_2013.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/googlegenomics/elasticluster/master/docs/presentations/egiTF2013/elasticluster_egi_TF_2013.pdf -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nfs-server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | NFS_EXPORTS: 4 | - path: '/home' 5 | clients: 'localhost' 6 | options: 'rw,no_root_squash' 7 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ansible.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Ansible 3 | hosts: ansible 4 | 5 | roles: 6 | - ansible 7 | 8 | tags: 9 | - ansible 10 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-exec/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart execd 3 | service: 4 | name='{{gridengine_exec_service}}' 5 | state=restarted 6 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .AppleDouble 3 | .LSOverride 4 | Icon 5 | ._* 6 | .Spotlight-V100 7 | .Trashes 8 | .vagrant 9 | test 10 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-master/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart slurmdbd 4 | service: 5 | name='{{slurmdbd_service_name}}' 6 | state=restarted 7 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/mcr/files/MCRInstaller.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/googlegenomics/elasticluster/master/elasticluster/share/playbooks/roles/mcr/files/MCRInstaller.zip -------------------------------------------------------------------------------- /docs/presentations/euroscipy2013/elasticluster_euroscipy_2013.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/googlegenomics/elasticluster/master/docs/presentations/euroscipy2013/elasticluster_euroscipy_2013.pdf -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/spark-common/meta/main.yml: -------------------------------------------------------------------------------- 1 | # spark-common/meta/main.yml 2 | --- 3 | 4 | dependencies: 5 | - { role: anaconda } 6 | - { role: bigtop } 7 | - { role: r } 8 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/glusterfs-common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include: debian.yml 4 | when: is_debian_compatible 5 | 6 | - include: rhel.yml 7 | when: is_rhel_compatible 8 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-master/templates/allhosts.grp.conf.j2: -------------------------------------------------------------------------------- 1 | group_name @allhosts 2 | hostlist {% for host in groups['gridengine_worker'] %} 3 | {{ host }} 4 | {%- endfor %} 5 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/common/templates/etc/ssh/shosts.equiv.j2: -------------------------------------------------------------------------------- 1 | {% for host in groups.all %} 2 | {{ host }} 3 | {{ hostvars[host].ansible_default_ipv4.address | default('') }} 4 | {% endfor %} -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/htcondor/handlers/common.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: mount home 3 | action: shell mount /home 4 | 5 | - name: restart HTCondor 6 | action: service name=condor state=restarted 7 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-common/files/etc/munge/munge.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/googlegenomics/elasticluster/master/elasticluster/share/playbooks/roles/slurm-common/files/etc/munge/munge.key -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/common/tasks/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set /etc/hosts from Ansible hostgroups 3 | template: 4 | dest=/etc/hosts 5 | src=etc/hosts.j2 6 | owner=root 7 | group=root 8 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/tests/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: all 4 | remote_user: root 5 | become: yes 6 | vars_files: 7 | - ./vars.yml 8 | roles: 9 | - postgresql 10 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/r/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # r/defaults/main.yml 2 | --- 3 | 4 | # preferred CRAN mirror, see: https://cran.r-project.org/mirrors.html 5 | r_cran_mirror_url: 'http://cloud.r-project.org/' 6 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/jupyterhub.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: JupyterHub playbook 3 | hosts: jupyterhub 4 | roles: 5 | - jupyter 6 | - jupyterhub 7 | tags: 8 | - jupyter 9 | - jupyterhub 10 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/handlers/main.yml: -------------------------------------------------------------------------------- 1 | # file: postgresql/handlers/main.yml 2 | 3 | - name: restart postgresql 4 | service: 5 | name: "{{ postgresql_service_name }}" 6 | state: restarted 7 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/vagrant-inventory: -------------------------------------------------------------------------------- 1 | [anxs] 2 | anxs.local ansible_ssh_host=192.168.88.22 ansible_ssh_port=22 ansible_ssh_user=vagrant ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key 3 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-client/meta/main.yml: -------------------------------------------------------------------------------- 1 | # slurm-client/meta/main.yml 2 | --- 3 | 4 | dependencies: 5 | - role: slurm-common 6 | SLURM_ACCOUNTING_HOST: '{{groups.slurm_master[0]|default("slurmdbd")}}' 7 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-common/tasks/init-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set SLURM common playbook params (RHEL 7.x compatible) 4 | set_fact: 5 | slurm_pid_dir: /var/run 6 | when: 'is_rhel_compatible' 7 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-master/tasks/install-slurmctld.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install SLURM master packages 4 | package: 5 | pkg='{{item}}' 6 | state=latest 7 | with_items: '{{ slurmctld_packages }}' 8 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/jupyterhub/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # see: http://www.onurguzel.com/supervisord-restarting-and-reloading/ 4 | - name: reread supervisord configuration 5 | command: | 6 | supervisorctl reread 7 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ntpd/tasks/init-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set NTPd common playbook params (RHEL/CentOS) 4 | set_fact: 5 | ntpd_service: ntpd 6 | ntpd_package: ntp 7 | when: 8 | '{{is_rhel_compatible}}' 9 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ntpd/tasks/init-Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set NTPd common playbook params (Debian/Ubuntu) 4 | set_fact: 5 | ntpd_service: ntp 6 | ntpd_package: ntp 7 | when: 8 | '{{is_debian_compatible}}' 9 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-common/tasks/init-Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set SLURM common playbook params (Debian/Ubuntu) 4 | set_fact: 5 | slurm_pid_dir: /var/run/slurm-llnl 6 | when: 7 | '{{is_debian_compatible}}' 8 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/templates/etc_apt_preferences.d_apt_postgresql_org_pub_repos_apt.pref.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | Package: * 4 | Pin: release o=apt.postgresql.org 5 | Pin-Priority: {{ postgresql_apt_pin_priority }} 6 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-master/meta/main.yml: -------------------------------------------------------------------------------- 1 | # slurm-master/meta/main.yml 2 | --- 3 | 4 | dependencies: 5 | - role: slurm-common 6 | SLURM_ACCOUNTING_HOST: '{{groups.slurm_master[0]|default("slurmdbd")}}' 7 | - role: slurm-client 8 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-worker/meta/main.yml: -------------------------------------------------------------------------------- 1 | # slurm-worker/meta/main.yml 2 | --- 3 | 4 | dependencies: 5 | - role: slurm-common 6 | SLURM_ACCOUNTING_HOST: '{{groups.slurm_master[0]|default("slurmdbd")}}' 7 | - role: slurm-client 8 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ceph/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: makecephs 3 | action: shell mkcephfs -a -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.keyring 4 | notify: restart ceph 5 | 6 | - name: restart ceph 7 | action: service name=ceph state=restarted 8 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/tasks/monit.yml: -------------------------------------------------------------------------------- 1 | # file: postgresql/tasks/monit.yml 2 | 3 | - name: PostgreSQL | (Monit) Copy the postgresql monit service file 4 | template: 5 | src: etc_monit_conf.d_postgresql.j2 6 | dest: /etc/monit/conf.d/postgresql 7 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/lua/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Lmod's Lua distribution uses a custom non-GNU makefile which does not allow 4 | # out-of-tree build, so this *has* to be the same as the source files directory 5 | lua_build_dir: '{{lua_source_dir}}/lua-{{LUA_VERSION}}' 6 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "2.6" 4 | - "2.7" 5 | install: 6 | - "pip install --upgrade 'setuptools' 'pip>=8.1.2'" 7 | - "pip install 'pytest>=2.10' 'pytest-cov' 'mock' 'tox'" 8 | - "pip install ." 9 | script: py.test --cov=elasticluster 10 | sudo: false 11 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/spark-common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # spark-common/defaults/main.yml 2 | --- 3 | 4 | # Apache BigTop uses `/etc/spark/conf.*` directories and then 5 | # symlinks `/etc/spark/conf` to the actual configuration 6 | SPARK_CONF_DIR: '/etc/spark/conf.elasticluster' 7 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hadoop-common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # hadoop-common/defaults/main.yml 2 | --- 3 | 4 | # Apache BigTop uses `/etc/hadoop/conf.*` directories and then 5 | # symlinks `/etc/hadoop/conf` to the actual configuration 6 | HADOOP_CONF_DIR: '/etc/hadoop/conf.elasticluster' 7 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ntpd/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # by default, no NTP server in the cluster, sync time with public NTP hosts 4 | ntp_server: "{{groups.ntp_server|default([])}}" 5 | 6 | # by default, peer with every other host in the cluster 7 | ntp_peers: "{{groups.all}}" 8 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-common/meta/main.yml: -------------------------------------------------------------------------------- 1 | # slurm-common/meta/main.yml 2 | --- 3 | 4 | # Install the GlusterFS client software, to allow to easily use 5 | # GlusterFS with a SLURM compute cluster 6 | 7 | dependencies: 8 | - role: glusterfs-client 9 | GLUSTERFS_MOUNTS: [] -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/jenkins.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Jenkins CIS Playbook 3 | hosts: jenkins 4 | tasks: 5 | - include: common/tasks/ssh_host_based_authentication.yml hosts=${groups.all} 6 | - include: jenkins/tasks/main.yml 7 | handlers: 8 | - include: jenkins/handlers/main.yml -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nis/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | NIS_DOMAIN: elasticluster 4 | 5 | NIS_MASTER: "{{(groups.nis_master|default(['ypserv']))[0]}}" 6 | 7 | NIS_SLAVES: "{{groups.nis_slaves|default([])}}" 8 | 9 | NIS_CLIENTS: "{{groups.nis_clients|default(groups.all)}}" 10 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hive-server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # hive-server/defaults/main.yml 2 | --- 3 | 4 | # Version of the Hive DB schema to load. This goes hand-in-hand with 5 | # the Hive package version, but not quite: Hive 1.2.1 uses 6 | # schema version 1.2.0 ... 7 | hive_schema_version: '1.2.0' 8 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/pbs+maui/templates/var/spool/torque/server_priv/nodes.j2: -------------------------------------------------------------------------------- 1 | {% for host in groups['pbs_clients'] %} 2 | {{ host }}{% if 1 < hostvars[host].ansible_processor_count|int %} np={{ hostvars[host].ansible_processor_count }} num_node_boards=1{% else %} np=1{% endif %} 3 | {% endfor %} 4 | -------------------------------------------------------------------------------- /docs/api/elasticluster/utils.rst: -------------------------------------------------------------------------------- 1 | .. Hey Emacs, this is -*- rst -*- 2 | 3 | This file follows reStructuredText markup syntax; see 4 | http://docutils.sf.net/rst.html for more information. 5 | 6 | 7 | `elasticluster.utils` 8 | ===================== 9 | .. automodule:: elasticluster.utils 10 | :members: 11 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ganglia/files/etc/httpd/conf.d/ganglia.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Ganglia monitoring system php web frontend 3 | # 4 | 5 | Alias /ganglia /usr/share/ganglia 6 | 7 | 8 | AllowOverride All 9 | Order allow,deny 10 | Allow from all 11 | Deny from none 12 | 13 | -------------------------------------------------------------------------------- /docs/api/elasticluster/conf.rst: -------------------------------------------------------------------------------- 1 | .. Hey Emacs, this is -*- rst -*- 2 | 3 | This file follows reStructuredText markup syntax; see 4 | http://docutils.sf.net/rst.html for more information. 5 | 6 | 7 | `elasticluster.conf` 8 | ==================== 9 | .. automodule:: elasticluster.conf 10 | :members: 11 | 12 | -------------------------------------------------------------------------------- /docs/api/elasticluster/main.rst: -------------------------------------------------------------------------------- 1 | .. Hey Emacs, this is -*- rst -*- 2 | 3 | This file follows reStructuredText markup syntax; see 4 | http://docutils.sf.net/rst.html for more information. 5 | 6 | 7 | `elasticluster.main` 8 | ==================== 9 | .. automodule:: elasticluster.main 10 | :members: 11 | 12 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hive/templates/hive-env.sh.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | # this is needed for hive to see the PostgreSQL JDBC connector 7 | export HIVE_AUX_JARS_PATH=/usr/share/java/ 8 | -------------------------------------------------------------------------------- /docs/api/elasticluster/cluster.rst: -------------------------------------------------------------------------------- 1 | .. Hey Emacs, this is -*- rst -*- 2 | 3 | This file follows reStructuredText markup syntax; see 4 | http://docutils.sf.net/rst.html for more information. 5 | 6 | 7 | `elasticluster.cluster` 8 | ======================= 9 | .. automodule:: elasticluster.cluster 10 | :members: 11 | 12 | -------------------------------------------------------------------------------- /docs/customize.rst: -------------------------------------------------------------------------------- 1 | .. Hey, Emacs this is -*- rst -*- 2 | 3 | This file follows reStructuredText markup syntax; see 4 | http://docutils.sf.net/rst.html for more information. 5 | 6 | .. include:: global.inc 7 | 8 | ============================= 9 | Customizing elasticluster 10 | ============================= 11 | 12 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/common/templates/etc/exports.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ANSIBLE 2 | # any local modifications will be overwritten! 3 | # 4 | 5 | /home {{hostvars[inventory_hostname].ansible_default_ipv4.network}}/{{hostvars[inventory_hostname].ansible_default_ipv4.netmask}}(rw,async,no_subtree_check,no_root_squash) 6 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-master/templates/newhost.qconf.j2: -------------------------------------------------------------------------------- 1 | hostname {{ item }} 2 | load_scaling NONE 3 | complex_values slots={{ hostvars[item].ansible_processor_count|default(2) }} 4 | user_lists NONE 5 | xuser_lists NONE 6 | projects NONE 7 | xprojects NONE 8 | usage_scaling NONE 9 | report_variables NONE 10 | -------------------------------------------------------------------------------- /docs/api/elasticluster/exceptions.rst: -------------------------------------------------------------------------------- 1 | .. Hey Emacs, this is -*- rst -*- 2 | 3 | This file follows reStructuredText markup syntax; see 4 | http://docutils.sf.net/rst.html for more information. 5 | 6 | 7 | `elasticluster.exceptions` 8 | ========================== 9 | .. automodule:: elasticluster.exceptions 10 | :members: 11 | 12 | -------------------------------------------------------------------------------- /docs/api/elasticluster/providers.rst: -------------------------------------------------------------------------------- 1 | .. Hey Emacs, this is -*- rst -*- 2 | 3 | This file follows reStructuredText markup syntax; see 4 | http://docutils.sf.net/rst.html for more information. 5 | 6 | 7 | `elasticluster.providers` 8 | ========================= 9 | .. automodule:: elasticluster.providers 10 | :members: 11 | 12 | -------------------------------------------------------------------------------- /docs/api/elasticluster/repository.rst: -------------------------------------------------------------------------------- 1 | .. Hey Emacs, this is -*- rst -*- 2 | 3 | This file follows reStructuredText markup syntax; see 4 | http://docutils.sf.net/rst.html for more information. 5 | 6 | 7 | `elasticluster.repository` 8 | ========================== 9 | .. automodule:: elasticluster.repository 10 | :members: 11 | 12 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/mcr/files/install: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | cat <<__EOF__ 4 | This is a placeholder installation script that does nothing. 5 | 6 | In order to get this playbook working, you should replace 7 | the 'MCRInstaller.zip' file with the one copied over from 8 | a MATLAB installation. 9 | __EOF__ 10 | 11 | exit 1 12 | -------------------------------------------------------------------------------- /docs/api/elasticluster/subcommands.rst: -------------------------------------------------------------------------------- 1 | .. Hey Emacs, this is -*- rst -*- 2 | 3 | This file follows reStructuredText markup syntax; see 4 | http://docutils.sf.net/rst.html for more information. 5 | 6 | 7 | `elasticluster.subcommands` 8 | =========================== 9 | .. automodule:: elasticluster.subcommands 10 | :members: 11 | 12 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/anaconda/README.md: -------------------------------------------------------------------------------- 1 | The code from this role is a minor modification of the 2 | [ansible-anaconda][1] playbook written by Andrew Rothstein, and as 3 | such maintains the original distribution license. See the 4 | accompanying `LICENSE` file for details. 5 | 6 | [1]: https://github.com/andrewrothstein/ansible-anaconda 7 | -------------------------------------------------------------------------------- /docs/api/elasticluster/gc3pie_config.rst: -------------------------------------------------------------------------------- 1 | .. Hey Emacs, this is -*- rst -*- 2 | 3 | This file follows reStructuredText markup syntax; see 4 | http://docutils.sf.net/rst.html for more information. 5 | 6 | 7 | `elasticluster.gc3pie_config` 8 | ============================= 9 | .. automodule:: elasticluster.gc3pie_config 10 | :members: 11 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/spark-common/files/etc/profile.d/pyspark.sh: -------------------------------------------------------------------------------- 1 | # Additional settings for PySpark 2 | 3 | # force PySpark to use the Hadoop/YARN cluster 4 | # (default is to use a local executor) 5 | export MASTER=yarn-client 6 | 7 | # by default, calling `pyspark` starts an IPython interpreter 8 | export PYSPARK_DRIVER_PYTHON=ipython 9 | -------------------------------------------------------------------------------- /docs/api/elasticluster/providers/gce.rst: -------------------------------------------------------------------------------- 1 | .. Hey Emacs, this is -*- rst -*- 2 | 3 | This file follows reStructuredText markup syntax; see 4 | http://docutils.sf.net/rst.html for more information. 5 | 6 | 7 | `elasticluster.providers.gce` 8 | ============================= 9 | .. automodule:: elasticluster.providers.gce 10 | :members: 11 | 12 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/bigtop/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # bigtop/defaults/main.yml 2 | --- 3 | 4 | # What release of Apache Bigtop to get packages from 5 | bigtop_release: '1.1.0' 6 | 7 | # identifier for GnuPG key(s) used to sign packages from the Apache Bigtop repo 8 | bigtop_signing_key: 9 | - '13971DA39475BD5D' 10 | - '3A367EA0FA08B173' 11 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/tasks/extensions.yml: -------------------------------------------------------------------------------- 1 | # file: postgresql/tasks/extensions.yml 2 | 3 | - include: extensions/contrib.yml 4 | when: postgresql_ext_install_contrib 5 | - include: extensions/dev_headers.yml 6 | when: postgresql_ext_install_dev_headers 7 | - include: extensions/postgis.yml 8 | when: postgresql_ext_install_postgis 9 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/pdsh/files/etc/profile.d/pdsh.sh: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | # PDSH defaults to using `rsh` for executing remote commands, but SSH 7 | # is actually a much better default choice... 8 | export PDSH_RCMD_TYPE='ssh' 9 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nis/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart NIS master services 4 | service: 5 | name: '{{item}}' 6 | state: restarted 7 | with_items: '{{nis_master_services}}' 8 | 9 | 10 | - name: restart ypbind 11 | service: 12 | name: '{{item}}' 13 | state: restarted 14 | with_items: '{{nis_client_services}}' 15 | -------------------------------------------------------------------------------- /docs/api/elasticluster/providers/ec2_boto.rst: -------------------------------------------------------------------------------- 1 | .. Hey Emacs, this is -*- rst -*- 2 | 3 | This file follows reStructuredText markup syntax; see 4 | http://docutils.sf.net/rst.html for more information. 5 | 6 | 7 | `elasticluster.providers.ec2_boto` 8 | ================================== 9 | .. automodule:: elasticluster.providers.ec2_boto 10 | :members: 11 | 12 | -------------------------------------------------------------------------------- /docs/api/elasticluster/providers/openstack.rst: -------------------------------------------------------------------------------- 1 | .. Hey Emacs, this is -*- rst -*- 2 | 3 | This file follows reStructuredText markup syntax; see 4 | http://docutils.sf.net/rst.html for more information. 5 | 6 | 7 | `elasticluster.providers.openstack` 8 | =================================== 9 | .. automodule:: elasticluster.providers.openstack 10 | :members: 11 | 12 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hadoop-common/templates/master.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | # 7 | # The VM(s) below will run HDFS NameNode and YARN ResourceManager services. 8 | # 9 | {% for fqdn in groups.hadoop_master %} 10 | {{fqdn}} 11 | {% endfor %} 12 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nis/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | is_nis_master_server: '(ansible_hostname == NIS_MASTER)' 4 | 5 | is_nis_slave_server: '(ansible_hostname in NIS_SLAVES)' 6 | 7 | is_nis_server: '(({{is_nis_master_server}}) or ({{is_nis_slave_server}}))' 8 | 9 | is_nis_client: '((ansible_hostname in (NIS_CLIENTS + NIS_SLAVES)) and not ({{is_nis_master_server}}))' 10 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/pbs+maui/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart pbs_mom 3 | action: service name=pbs_mom state=restarted 4 | when: is_centos 5 | 6 | - name: restart pbs_server 7 | action: service name=pbs_server state=restarted 8 | when: is_centos 9 | 10 | - name: restart maui.d 11 | action: service name=maui.d state=restarted 12 | when: is_centos 13 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hadoop-common/templates/slaves.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | # 7 | # All hosts below will run the HDFS DataNode and YARN NodeManager 8 | # services. 9 | # 10 | {% for fqdn in groups.hadoop_worker %} 11 | {{fqdn}} 12 | {% endfor %} 13 | -------------------------------------------------------------------------------- /docs/api/elasticluster/providers/ansible_provider.rst: -------------------------------------------------------------------------------- 1 | .. Hey Emacs, this is -*- rst -*- 2 | 3 | This file follows reStructuredText markup syntax; see 4 | http://docutils.sf.net/rst.html for more information. 5 | 6 | 7 | `elasticluster.providers.ansible_provider` 8 | ========================================== 9 | .. automodule:: elasticluster.providers.ansible_provider 10 | :members: 11 | 12 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/lua/templates/etc/profile.d/lua.sh.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | 5 | 6 | # Use the ElastiCluster-installed Lua interpreter as the first one in $PATH, 7 | # instead of the one (possibly) provided by the Linux distribution. 8 | export PATH="{{ lua_install_dir }}/bin:${PATH}" 9 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/r/templates/etc/R/Rprofile.site.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | # see: https://www.r-bloggers.com/permanently-setting-the-cran-repository/ 7 | local({ 8 | r <- getOption("repos") 9 | r["CRAN"] <- "{{r_cran_mirror_url}}" 10 | options(repos = r) 11 | }) 12 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/lmod/files/etc/profile.d/000_user_is_root.sh: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | # the `USER_IS_ROOT` env. var. is used in Lmod's 7 | # initialization file; see: https://github.com/TACC/Lmod/issues/26 8 | if [ $(id -u) -eq 0 ]; then 9 | export USER_IS_ROOT=1 10 | fi 11 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/tasks/extensions/dev_headers.yml: -------------------------------------------------------------------------------- 1 | # file: postgresql/tasks/extensions/dev_headers.yml 2 | 3 | - name: PostgreSQL | Extensions | Make sure the development headers are installed 4 | apt: 5 | name: libpq-dev 6 | state: present 7 | update_cache: yes 8 | cache_valid_time: "{{apt_cache_valid_time | default (3600)}}" 9 | notify: 10 | - restart postgresql 11 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/pbs+maui/files/qmgr.bootstrap: -------------------------------------------------------------------------------- 1 | # 2 | # Create queues and set their attributes. 3 | # 4 | # 5 | # Create and define queue default 6 | # 7 | create queue default 8 | set queue default queue_type = Execution 9 | set queue default enabled = True 10 | set queue default started = True 11 | # 12 | # Set server attributes. 13 | # 14 | set server scheduling = True 15 | set server default_queue = default 16 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/templates/etc_monit_conf.d_postgresql.j2: -------------------------------------------------------------------------------- 1 | check process postgresql with pidfile /var/run/postgresql/{{postgresql_version}}-{{postgresql_cluster_name}}.pid 2 | group database 3 | start program = "/etc/init.d/postgresql start" 4 | stop program = "/etc/init.d/postgresql stop" 5 | if failed host localhost port 5432 protocol pgsql then restart 6 | if 5 restarts within 5 cycles then timeout 7 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/tests/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | postgresql_version: 9.5 4 | 5 | postgresql_databases: 6 | - name: foobar 7 | owner: baz 8 | 9 | postgresql_users: 10 | - name: baz 11 | pass: pass 12 | 13 | - name: zab 14 | pass: md51a1dc91c907325c69271ddf0c944bc72 15 | encrypted: yes 16 | 17 | - name: zabaz 18 | 19 | postgresql_user_privileges: 20 | - name: baz 21 | db: foobar 22 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Provide workaround for YAML syntax error in lines containing colon+space 4 | set_fact: 5 | __colon__: ':' 6 | tags: 7 | - cloud-init 8 | 9 | 10 | - include: 'init-{{ansible_os_family}}.yml' 11 | - include: hosts.yml hosts={{groups.all}} 12 | - include: hostname.yml 13 | - include: 'software-{{ansible_os_family}}.yml' 14 | - include: ssh_host_based_authentication.yml 15 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/jupyter/meta/main.yml: -------------------------------------------------------------------------------- 1 | # jupyter/meta/main.yml 2 | --- 3 | 4 | dependencies: 5 | # install Anaconda Python3 to run the Jupyter Notebook itself 6 | - role: anaconda 7 | anaconda_python_version: '3' 8 | anaconda_in_path: true 9 | # ...but also install Anaconda Python2 because Py2 is still more widely used 10 | - role: anaconda 11 | anaconda_python_version: '2' 12 | anaconda_in_path: false 13 | - role: r 14 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-worker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # slurm-worker/tasks/main.yml 2 | --- 3 | 4 | - name: Load distribution-specific parameters 5 | include: 'init-{{ansible_os_family}}.yml' 6 | 7 | - name: Install SLURM worker packages 8 | package: 9 | name={{item}} 10 | state=latest 11 | with_items: '{{slurmd_packages}}' 12 | 13 | - name: Ensure SLURMd starts at boot 14 | service: 15 | name='{{slurmd_service}}' 16 | enabled=yes 17 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/iptables/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Policy to apply to incoming packets in the INPUT queue, if they are 4 | # not matched by an earlier rule. Applies to both IPv4 and IPv6 rules. 5 | default_input_policy: DROP 6 | 7 | # Policy to apply to incoming packets in the FORWARD queue, if they are 8 | # not matched by an earlier rule. Applies to both IPv4 and IPv6 rules. 9 | # Only relevant for gateway hosts. 10 | default_forward_policy: DROP -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/meta/main.yml: -------------------------------------------------------------------------------- 1 | # file: postgresql/meta/main.yml 2 | 3 | galaxy_info: 4 | author: pjan vandaele 5 | company: ANXS 6 | description: "Install and configure PostgreSQL, dependencies, extensions, databases and users." 7 | min_ansible_version: 1.9.4 8 | license: MIT 9 | platforms: 10 | - name: Ubuntu 11 | versions: 12 | - all 13 | categories: 14 | - database 15 | - database:sql 16 | 17 | dependencies: [] 18 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/r/tasks/init-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set R common playbook params (CentOS/RHEL) 4 | set_fact: 5 | r_packages: 6 | - R 7 | - R-devel 8 | # dependency packages for Jupyter's IRkernel, 9 | # see: https://irkernel.github.io/installation/#source-panel 10 | - czmq-devel 11 | # more dependencies, as appear in R pkg compile logs 12 | - libssh2-devel 13 | when: 14 | '{{is_rhel_compatible}}' 15 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/r/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # r/tasks/main.yml 2 | --- 3 | 4 | - name: Load distribution-specific parameters 5 | include: 'init-{{ansible_os_family}}.yml' 6 | 7 | 8 | - name: Install R packages 9 | package: 10 | name='{{item}}' 11 | state=present 12 | with_items: '{{r_packages}}' 13 | 14 | 15 | - name: Set default CRAN repository 16 | template: 17 | dest: '/etc/R/Rprofile.site' 18 | src: 'etc/R/Rprofile.site.j2' 19 | mode: 0444 20 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-exec/tasks/init-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Set variables for RHEL/CentOS variants 4 | # 5 | 6 | - name: Set GridEngine execd variables (RHEL/CentOS) 7 | tags: 8 | - gridengine 9 | - gridengine-exec 10 | set_fact: 11 | # execd service name 12 | gridengine_execd_service: 'sgeexecd_{{SGE_CELL}}' 13 | # packages to install 14 | gridengine_execd_packages: 15 | - gridengine-execd 16 | when: 17 | 'is_rhel_compatible' 18 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-common/tasks/init-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Set variables for RHEL/CentOS variants 4 | # 5 | 6 | - name: Set GridEngine variables (RHEL/CentOS) 7 | tags: 8 | - gridengine 9 | - gridengine-common 10 | set_fact: 11 | # SGE packages from Dave Love use the "traditional" packaging 12 | # with everything being written to subdirectories of `$SGE_ROOT` 13 | SGE_ROOT: '/opt/sge' 14 | SGE_VAR: '/opt/sge' 15 | when: 'is_rhel_compatible' 16 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nis/tasks/init-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set NIS common playbook params (CentOS/RHEL) 4 | set_fact: 5 | nis_common_packages: 6 | - yp-tools 7 | nis_client_packages: 8 | - ypbind 9 | nis_client_services: 10 | - rpcbind 11 | - ypbind 12 | nis_master_packages: 13 | - ypserv 14 | nis_master_services: 15 | - rpcbind 16 | - yppasswdd 17 | - ypserv 18 | nis_securenets_path: '/var/yp/securenets' 19 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-master/tasks/init-Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Set variables for Debian/Ubuntu variants 4 | # 5 | 6 | - name: Set GridEngine master variables (Debian/Ubuntu) 7 | tags: 8 | - gridengine 9 | - gridengine-master 10 | set_fact: 11 | # packages to install 12 | gridengine_master_packages: 13 | - gridengine-client 14 | - gridengine-common 15 | - gridengine-master 16 | - gridengine-qmon 17 | when: 18 | 'is_debian_compatible' 19 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/jupyter/tasks/bash.yml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | 4 | - name: Install Jupyter/IPython kernel for bash (module) 5 | pip: 6 | # bash_kernel < 0.6 is not compatible with JupyterHub 7 | # (see issues #58 and #59 on the `bash_kernel` GitHub) 8 | name: 'bash_kernel>=0.6' 9 | state: present 10 | executable: '{{jupyter_python|dirname}}/pip' 11 | 12 | - name: Install Jupyter/IPython kernel for bash (kernel spec) 13 | command: 14 | '{{jupyter_python}} -m bash_kernel.install' 15 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-exec/tasks/init-Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Set variables for Debian/Ubuntu variants 4 | # 5 | 6 | - name: Set GridEngine execd variables (Debian/Ubuntu) 7 | tags: 8 | - gridengine 9 | - gridengine-exec 10 | set_fact: 11 | # execd service name 12 | gridengine_execd_service: 'gridengine-exec' 13 | # packages to install 14 | gridengine_execd_packages: 15 | - gridengine-client 16 | - gridengine-exec 17 | when: 18 | 'is_debian_compatible' 19 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/pdsh/templates/etc/genders.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | {% if hosts is defined -%} 6 | {%- set hostnames = hosts|sort -%} 7 | {%- else -%} 8 | {%- set hostnames = groups['all']|sort -%} 9 | {%- endif -%} 10 | 11 | {%- for host in hostnames -%} 12 | {% if host != 'localhost' %} 13 | {{ host }} {{ hostvars[host].group_names|sort|join(',') }} 14 | {% endif %} 15 | {%- endfor -%} 16 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ansible/templates/inventory.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | # Inventory file generated from current configuration. 7 | # Empty groups and special group `all` are not included. 8 | 9 | {% for group in groups %} 10 | {% if groups[group] and group != 'all' %} 11 | [{{group}}] 12 | {% for host in groups[group] %} 13 | {{host}} 14 | {% endfor %} 15 | 16 | {% endif %} 17 | {% endfor %} 18 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/anaconda/templates/etc/profile.d/anaconda.sh.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | 5 | 6 | # Use Anaconda Python as the first Python interpreter in $PATH, 7 | # instead of the one provided by the Linux distribution. 8 | export ANACONDA_HOME="{{anaconda_home}}" 9 | export PATH="${ANACONDA_HOME}/bin:${PATH}" 10 | export PYTHON{{anaconda_python_version}}="${ANACONDA_HOME}/bin/python{{anaconda_python_version}}" 11 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/iptables/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: no-op 4 | debug: 5 | msg: "Not reloading iptables -- not supported by {{service['iptables']}} service unit" 6 | 7 | 8 | - name: reload iptables 9 | service: 10 | name: '{{service[item]}}' 11 | state: reloaded 12 | with_items: 13 | - 'iptables' 14 | - 'ip6tables' 15 | 16 | 17 | - name: restart iptables 18 | service: 19 | name: '{{service[item]}}' 20 | state: restarted 21 | with_items: 22 | - 'iptables' 23 | - 'ip6tables' 24 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/templates/etc_systemd_system_postgresql.service.d_custom.conf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # Systemd unit file override to specify user/group as well as separate config 3 | # and data directories. 4 | [Service] 5 | User={{ postgresql_service_user }} 6 | Group={{ postgresql_service_group }} 7 | 8 | Environment=PGDATA={{ postgresql_conf_directory }} 9 | 10 | ExecStartPre= 11 | ExecStartPre={{ postgresql_bin_directory }}/postgresql{{ postgresql_version_terse }}-check-db-dir {{ postgresql_data_directory }} 12 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/vars/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Using a different cluster name could cause problems with SELinux. 3 | # See /usr/lib/systemd/system/postgresql-*.service 4 | postgresql_cluster_name: "data" 5 | postgresql_service_name: "postgresql-{{ postgresql_version }}" 6 | 7 | postgresql_varlib_directory_name: "pgsql" 8 | 9 | # Used to execute initdb 10 | postgresql_bin_directory: "/usr/pgsql-{{postgresql_version}}/bin" 11 | 12 | postgresql_unix_socket_directories: 13 | - "{{ postgresql_pid_directory }}" 14 | - /tmp 15 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-common/tasks/mkdir.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Ensure directory {{path}} exists, but follow symlinks to check 4 | 5 | # use this stat/file sequence to avoid Ansible replacing 6 | # compatibility symlinks with directories on Debian/Ubuntu 7 | 8 | - name: Check if SLURM work directory {{path}} exist 9 | stat: 10 | path='{{path}}' 11 | follow=yes 12 | register: p 13 | 14 | - name: Create work directory {{path}} 15 | file: 16 | path='{{path}}' 17 | state=directory 18 | owner=slurm 19 | group=slurm 20 | when: not p.stat.exists 21 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/jupyter/templates/pyspark.kernel.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "language": "python", 3 | "display_name": "Python {{python_version.stdout}} with Spark", 4 | "argv": [ 5 | "/usr/bin/env", 6 | "MASTER=yarn-client", 7 | "PYSPARK_DRIVER_PYTHON={{python_exe}}", 8 | "PYSPARK_PYTHON={{python_exe}}", 9 | "PYTHONSTARTUP=/usr/local/share/jupyter/kernels/pyspark{{python_version.stdout}}/startup.py", 10 | "{{python_exe}}", 11 | "-m", 12 | "ipykernel", 13 | "-f", 14 | "{connection_file}" 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-worker/tasks/init-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set SLURM worker playbook params (RHEL compatible) 4 | set_fact: 5 | slurmd_packages: 6 | - slurm 7 | - slurm-plugins 8 | - slurm-sql 9 | when: 'is_rhel_compatible' 10 | 11 | - name: Set SLURM worker service name (RHEL 7.x compatible) 12 | set_fact: 13 | slurmd_service: 'slurmd' 14 | when: '{{is_rhel7_compatible}}' 15 | 16 | - name: Set SLURM worker service name (RHEL 6.x compatible) 17 | set_fact: 18 | slurmd_service: 'slurm' 19 | when: '{{is_rhel6_compatible}}' 20 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/jupyter/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # jupyter/defaults/main.yml 2 | --- 3 | 4 | # executable to run MATLAB (if not found, the MATLAB kernel will not be installed) 5 | MATLAB_EXE: 'matlab' 6 | 7 | # path to the executable to run the Python 2 kernel 8 | PYTHON2_EXE: '/opt/anaconda2/bin/python' 9 | 10 | # path to the executable to run the Python 3 kernel 11 | PYTHON3_EXE: '/opt/anaconda3/bin/python3' 12 | 13 | # path to the executable to run the R kernel 14 | R_EXE: '/usr/bin/R' 15 | 16 | # the Python interpreter used to rune Jupyter/JupyterHub 17 | jupyter_python: '{{PYTHON3_EXE}}' -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nfs-client/tasks/nfsmount.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: ensure {{fs.mountpoint}} directory exists 4 | file: 5 | path={{fs.mountpoint}} 6 | state=directory 7 | 8 | - name: add to /etc/fstab 9 | mount: 10 | name='{{fs.mountpoint}}' 11 | src='{{fs.fs}}' 12 | fstype=nfs 13 | opts='{{fs.options|default("rw,async")}}' 14 | state='{{fs.state|default("mounted")}}' 15 | 16 | 17 | - name: Allow NFS homes through SELinux 18 | command: | 19 | setsebool -P use_nfs_home_dirs=1 20 | when: '{{is_rhel_compatible}} and "{{fs.mountpoint}}" == "/home"' 21 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/common/templates/etc/hosts.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 7 | 8 | {% for host in hosts|default(groups['all'])|sort if not host.startswith('localhost') %} 9 | {%- set fqdn = hostvars[host].ansible_fqdn -%} 10 | {{hostvars[host].ansible_default_ipv4.address}} {{host}} {% if not fqdn.startswith('localhost') %}{{fqdn}} {{fqdn.split('.')[0]}}{% endif %} 11 | 12 | {% endfor -%} 13 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hdfs-datanode/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # hdfs-datanode/tasks/main.yml 2 | --- 3 | 4 | - name: Install Hadoop packages (HDFS DataNode) 5 | tags: 6 | - hadoop 7 | - hdfs 8 | - datanode 9 | package: 10 | name='{{item}}' 11 | state=present 12 | with_items: 13 | - hadoop-hdfs-datanode # Hadoop Data Node 14 | 15 | 16 | - name: Start HDFS services (DataNode) 17 | tags: 18 | - hadoop 19 | - hdfs 20 | - datanode 21 | service: 22 | name="{{item}}" 23 | state=started 24 | enabled=yes 25 | with_items: 26 | - hadoop-hdfs-datanode 27 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/easybuild/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Lmod version >= 5.6.3 is needed by EasyBuild 4 | EASYBUILD_VERSION: '2.8.2' 5 | 6 | # Root directory for all EasyBuild files 7 | EASYBUILD_PREFIX: '/opt/easybuild' 8 | 9 | # Initial set of SW to install (WARNING: takes >2 hours of time) 10 | EASYBUILD_INSTALL: [] 11 | 12 | 13 | ### No customization should be necessary or useful below this line ### 14 | 15 | # where to get the bootstrap script from 16 | easybuild_bootstrap_url: 'https://raw.githubusercontent.com/hpcugent/easybuild-framework/develop/easybuild/scripts/bootstrap_eb.py' 17 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/anaconda/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Variables for installing Anaconda Python. 4 | # All the values are derived from the constants defined in `defaults/main.yml` 5 | # 6 | anaconda_name : 'Anaconda{{anaconda_python_version}}-{{anaconda_version}}-{{anaconda_platform}}' 7 | anaconda_installer_sh : '{{ansible_env.tmpdir|default("/tmp")}}/{{anaconda_name}}.sh' 8 | anaconda_installer_url : '{{anaconda_mirror}}/{{anaconda_name}}.sh' 9 | anaconda_platform : 'Linux-{{ansible_architecture}}' 10 | anaconda_home: '/opt/{{anaconda_name}}' 11 | #anaconda_home: '/opt/anaconda{{anaconda_python_version}}' 12 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ganglia.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ganglia frontend/collector Playbook 3 | hosts: ganglia_master 4 | tasks: 5 | - include: 'common/tasks/init-{{ansible_os_family}}.yml' 6 | - include: ganglia/tasks/server.yml 7 | tags: 8 | - ganglia 9 | handlers: 10 | - include: ganglia/handlers/main.yml 11 | 12 | - name: Ganglia monitor Playbook 13 | hosts: ganglia_monitor 14 | tasks: 15 | - include: 'common/tasks/init-{{ansible_os_family}}.yml' 16 | - include: ganglia/tasks/client.yml 17 | handlers: 18 | - include: ganglia/handlers/main.yml 19 | tags: 20 | - ganglia 21 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/lmod/templates/etc/profile.d/z80_StdEnv.csh.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | # load the initial default set of modules, 7 | # see: http://lmod.readthedocs.io/en/latest/070_standard_modules.html 8 | if ( ! $?__Lmod_Init_Default_Modules || ! $?LD_LIBRARY_PATH ) then 9 | setenv __Lmod_Init_Default_Modoules 1 10 | setenv LMOD_SYSTEM_DEFAULT_MODULES "{{ LMOD_DEFAULT_MODULES|join(':') }}" 11 | module --initial_load restore 12 | else 13 | module refresh 14 | endif 15 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/lmod/templates/etc/profile.d/z80_StdEnv.sh.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | # load the initial default set of modules, 7 | # see: http://lmod.readthedocs.io/en/latest/070_standard_modules.html 8 | if [ -z "$__Lmod_Init_Default_Modules" -o -z "$LD_LIBRARY_PATH" ]; then 9 | export __Lmod_Init_Default_Modules=1 10 | export LMOD_SYSTEM_DEFAULT_MODULES="{{ LMOD_DEFAULT_MODULES|join(':') }}" 11 | module --initial_load restore 12 | else 13 | module refresh 14 | fi 15 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/tasks/extensions/postgis.yml: -------------------------------------------------------------------------------- 1 | # file: postgresql/tasks/extensions/postgis.yml 2 | 3 | - name: PostgreSQL | Extensions | Make sure the postgis extensions are installed 4 | apt: 5 | name: "{{item}}" 6 | state: present 7 | update_cache: yes 8 | cache_valid_time: "{{apt_cache_valid_time | default (3600)}}" 9 | with_items: 10 | - libgeos-c1 11 | - "postgresql-{{postgresql_version}}-postgis-{{postgresql_ext_postgis_version}}" 12 | - "postgresql-{{postgresql_version}}-postgis-{{postgresql_ext_postgis_version}}-scripts" 13 | notify: 14 | - restart postgresql 15 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart sshd 3 | action: service name=ssh state=restarted 4 | when: is_debian_or_ubuntu 5 | 6 | - name: restart sshd 7 | action: service name=sshd state=restarted 8 | when: is_centos 9 | 10 | - name: reload exports 11 | action: shell exportfs -r 12 | 13 | - name: ensure nfs service is running 14 | service: name=nfs-kernel-server state=started 15 | when: is_debian_or_ubuntu 16 | 17 | - name: ensure nfs service is running 18 | service: name=nfs state=started 19 | when: is_centos 20 | 21 | - name: kill-HUP sshd 22 | shell: pkill -HUP sbin/sshd 23 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-common/templates/copr-loveshack-sge.repo.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | [copr-loveshack-sge] 7 | name=Copr repo for SoGE owned by loveshack 8 | baseurl=https://copr-be.cloud.fedoraproject.org/results/loveshack/SGE/epel-{{ansible_distribution_major_version}}-$basearch/ 9 | type=rpm-md 10 | skip_if_unavailable=True 11 | gpgcheck=1 12 | gpgkey=https://copr-be.cloud.fedoraproject.org/results/loveshack/SGE/pubkey.gpg 13 | repo_gpgcheck=0 14 | enabled=1 15 | enabled_metadata=1 16 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/lua/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | LUA_VERSION: '5.1.4.8' 4 | 5 | lua_source_dir: '/opt/lua/{{LUA_VERSION}}/src' 6 | lua_install_dir: '/opt/lua/{{LUA_VERSION}}' 7 | 8 | # where to download LUA sources from 9 | lua_source_url: 'http://sourceforge.net/projects/lmod/files/lua-{{LUA_VERSION}}.tar.gz/download' 10 | 11 | # Whether this Lua interpreter should be made 12 | # the first match in users' shell $PATH 13 | lua_add_to_path: False 14 | 15 | 16 | ### No customization should be necessary or useful below this line ### 17 | 18 | # extra options to pass to LUA's `./configure` script 19 | lua_configure_extra_opts: '' 20 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/mcr/templates/profile.sh.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ANSIBLE 2 | # any local modifications will be overwritten! 3 | # 4 | 5 | # This is suggested by the the MCR installer, which however does not 6 | # create the profile files itself... 7 | # 8 | LD_LIBRARY_PATH={{installdir}}/v717/runtime/glnxa64:{{installdir}}/v717/bin/glnxa64:{{installdir}}/v717/sys/os/glnxa64:{{installdir}}/v717/sys/java/jre/glnxa64/jre/lib/amd64/native_threads:{{installdir}}/v717/sys/java/jre/glnxa64/jre/lib/amd64/server:{{installdir}}/v717/sys/java/jre/glnxa64/jre/lib/amd64:$LD_LIBRARY_PATH 9 | XAPPLRESDIR={{installdir}}/v717/X11/app-defaults 10 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-common/templates/copr-slurm.repo.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | # see: https://copr.fedorainfracloud.org/coprs/verdurin/slurm/ 7 | [verdurin-slurm] 8 | name=Copr repo for SLURM owned by verdurin 9 | baseurl=https://copr-be.cloud.fedoraproject.org/results/verdurin/slurm/epel-{{ansible_distribution_major_version}}-$basearch/ 10 | skip_if_unavailable=True 11 | gpgcheck=1 12 | gpgkey=https://copr-be.cloud.fedoraproject.org/results/verdurin/slurm/pubkey.gpg 13 | enabled=1 14 | enabled_metadata=1 15 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/examples/nfsexport.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | vars: 4 | nfsclients: 5 | - 10.1.1.1 6 | - 10.1.1.2 7 | - 10.1.1.3 8 | tasks: 9 | # - action: nfsexport path=/home 10 | - action: nfsexport dest=/tmp/exports path=/srv clients="{{nfsclients}}" options=rw,no_root_squash 11 | 12 | # Of course, if you have defined a group you can use the hostnames on 13 | # that group as clients. 14 | 15 | # - hosts: all 16 | # tasks: 17 | # - action: nfsexport path=/home clients=${groups.myclients} 18 | 19 | # Unfortunately, however, there is not (yet) an easy way to convert 20 | # the hostnames into ip addresses. -------------------------------------------------------------------------------- /elasticluster/share/playbooks/examples/variables.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ########################### 3 | # Storing facts to a file # 4 | ########################### 5 | # 6 | # This will store some information in `variables.txt` 7 | # 8 | # In most cases you may want to use instead: 9 | # 10 | # ansible -m setup -t ./variables.d all 11 | # 12 | # which will store in directory `variables.d` all facts gathered by 13 | # the `setup` module. 14 | # 15 | # However, the template system will also get facts or variables set by 16 | # modules executed before in the playbook. 17 | 18 | - hosts: all 19 | tasks: 20 | - template: src=variables.j2 dest=./variables.txt 21 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/tests/Dockerfile-ubuntu14.04: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | MAINTAINER ANXS 3 | 4 | # Setup system with minimum requirements + ansible 5 | RUN apt-get update -qq && \ 6 | apt-get install -qq python-apt python-pycurl python-pip python-dev locales && \ 7 | echo 'en_US.UTF-8 UTF-8' > /var/lib/locales/supported.d/local && \ 8 | pip install -q ansible==1.9.4 9 | 10 | # Copy our role into the container, using our role name 11 | WORKDIR /tmp/postgresql 12 | COPY . /tmp/postgresql 13 | 14 | # Run our play 15 | RUN echo localhost > inventory 16 | RUN ansible-playbook -i inventory -c local --become tests/playbook.yml 17 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/glusterfs-common/tasks/rhel.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Add CentOS Extras repository (RHEL-compatible) 4 | yum_repository: 5 | name=extras 6 | enabled=yes 7 | file='CentOS-Base' 8 | description='CentOS-$releasever - Extras' 9 | baseurl='http://mirror.centos.org/centos/$releasever/extras/$basearch/' 10 | mirrorlist='http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras&infra=$infra' 11 | gpgcheck=yes 12 | gpgkey='file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-{{ansible_distribution_major_version}}' 13 | state=present 14 | tags: 15 | - gluster 16 | - glusterfs-common 17 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/common/files/etc/yum.repos.d/gc3.repo.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ANSIBLE, 2 | # any local modifications will be overwritten! 3 | # If you want to make changes, please edit the 4 | # master file. 5 | 6 | 7 | [GC3] 8 | name=GC3 public repository 9 | baseurl=http://www.gc3.uzh.ch/packages/CentOS/{{ centos_base_version_cmd.stdout }}/gc3/$basearch 10 | # note that this repository is _not_ signed! 11 | gpgcheck=0 12 | 13 | [GC3private] 14 | name=GC3 private repository 15 | baseurl=http://www.gc3.uzh.ch/packages/private/CentOS/{{ centos_base_version_cmd.stdout }}/gc3/$basearch 16 | # note that this repository is _not_ signed! 17 | gpgcheck=0 18 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/after.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # This playbook is for site-local customization to ElastiCluster's 4 | # playbooks. It runs *after* any other playbook distributed with 5 | # ElastiCluster has gotten its chance to run. 6 | # 7 | # An empty playbook is checked into the Git repository. If you make 8 | # any local modifications, please run `git update-index 9 | # --assume-unchanged after.yml` to avoid committing them accidentally 10 | # into ElastiCluster's main branch. 11 | # 12 | - name: Apply local customizations (after) 13 | tags: 14 | - after 15 | - local 16 | hosts: all 17 | # by default these are no-op (empty task list) 18 | roles: [] 19 | tasks: [] 20 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hive/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # hive/defaults/main.yml 2 | --- 3 | 4 | # Apache BigTop uses `/etc/hive/conf.*` directories and then 5 | # symlinks `/etc/hive/conf` to the actual configuration 6 | HIVE_CONF_DIR: '/etc/hive/conf.elasticluster' 7 | 8 | # Hostname or IP address to use for building the `thrift://` endpoint 9 | HIVE_METASTORE_HOST: '{% if "hive_server" in groups %}{{groups.hive_server[0]}}{% else %}{{ansible_fqdn}}{% endif %}' 10 | 11 | # DB connection parameters 12 | HIVE_METASTORE_DB_HOST: '{{HIVE_METASTORE_HOST}}' 13 | HIVE_METASTORE_DB_NAME: 'metastore' 14 | HIVE_METASTORE_DB_PASSWORD: 'bWGtMCZROdVpIl9K' 15 | HIVE_METASTORE_DB_USER: 'hive' 16 | 17 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/tasks/users_privileges.yml: -------------------------------------------------------------------------------- 1 | # file: postgresql/tasks/users_privileges.yml 2 | 3 | - name: PostgreSQL | Update the user privileges 4 | postgresql_user: 5 | name: "{{item.name}}" 6 | db: "{{item.db | default(omit)}}" 7 | port: "{{postgresql_port}}" 8 | priv: "{{item.priv | default(omit)}}" 9 | state: present 10 | login_host: "{{item.host | default(omit)}}" 11 | login_user: "{{postgresql_admin_user}}" 12 | role_attr_flags: "{{item.role_attr_flags | default(omit)}}" 13 | become: yes 14 | become_user: "{{postgresql_admin_user}}" 15 | with_items: "{{postgresql_user_privileges}}" 16 | when: postgresql_users|length > 0 17 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ceph/tasks/mds.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - action: file dest=/var/lib/ceph/mds/ceph-{{ceph_idx}} state=directory 3 | tags: 4 | - ceph 5 | 6 | - name: Copying keyring from mon.0 7 | action: shell scp root@{{groups.ceph_mon[0]}}:/etc/ceph/ceph.mon.keyring /etc/ceph/keyring 8 | creates=/etc/ceph/keyring 9 | tags: 10 | - ceph 11 | 12 | - name: Copy mon keyring 13 | action: shell cp -a /etc/ceph/keyring /var/lib/ceph/mds/ceph-{{ceph_idx}}/keyring 14 | creates=/var/lib/ceph/mds/ceph-{{ceph_idx}}/keyring 15 | tags: 16 | - ceph 17 | 18 | - name: Ensure ceph-mds is running 19 | action: service name=ceph state=started 20 | tags: 21 | - ceph 22 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/anaconda/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Defaults for installing Anaconda Python 4 | # 5 | 6 | # where to download the installer from 7 | anaconda_mirror : 'http://repo.continuum.io/archive' 8 | 9 | # Version of the Anaconda distribution to install 10 | anaconda_version : '4.3.0' 11 | 12 | # Anaconda comes with either a Python2 or a Python3 interpreter 13 | # -- choose which one you want here. 14 | anaconda_python_version : '2' 15 | 16 | # remove installer file after successful installation? 17 | anaconda_cleanup : False 18 | 19 | # Whether the Python interpreter from Anaconda should be made 20 | # the first match in users' shell $PATH 21 | anaconda_in_path: True 22 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ganglia/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart apache 3 | action: service name=apache2 state=restarted 4 | when: is_debian_or_ubuntu 5 | 6 | - name: restart apache 7 | action: service name=httpd state=restarted 8 | when: is_centos 9 | 10 | # gmetad and ganglia-monitor init scripts does not support `status` 11 | # argument. 12 | - name: restart gmond 13 | action: command service ganglia-monitor restart 14 | when: is_debian_or_ubuntu 15 | 16 | - name: restart gmond 17 | action: service name=gmond state=restarted 18 | when: is_centos 19 | 20 | - name: restart gmetad 21 | action: command service gmetad restart 22 | when: '"ganglia_master" in group_names' -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/htcondor/templates/etc/apt/sources.list.d/htcondor.list.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ANSIBLE 2 | # any local modifications will be overwritten! 3 | # 4 | 5 | # HTCondor repository 6 | {% if ansible_distribution == "Debian" %} 7 | deb [arch=amd64] http://research.cs.wisc.edu/htcondor/debian/stable/ {{ ansible_distribution_release }} contrib 8 | deb [arch=amd64] http://research.cs.wisc.edu/htcondor/debian/development/ {{ ansible_distribution_release }} contrib 9 | {% else %} 10 | deb [arch=amd64] http://research.cs.wisc.edu/htcondor/debian/stable/ wheezy contrib 11 | deb [arch=amd64] http://research.cs.wisc.edu/htcondor/debian/development/ wheezy contrib 12 | {% endif %} 13 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # slurm-common/defaults/main.yml 2 | --- 3 | 4 | SLURM_MASTER_HOST: "{{groups['slurm_master'][0]}}" 5 | SLURM_MASTER_ADDR: "{{hostvars[SLURM_MASTER_HOST].ansible_default_ipv4.address}}" 6 | 7 | # XXX: there is really no good default for the accounting host: a host 8 | # name or IP address is needed here, which must be reachable by all 9 | # hosts in the cluster. Default to `slurmdbd` so if host 10 | # names/aliases are set according to the services that run, everything 11 | # works fine. 12 | SLURM_ACCOUNTING_HOST: "slurmdbd" 13 | SLURM_ACCOUNTING_DB_NAME: "slurm" 14 | SLURM_ACCOUNTING_DB_USER: "slurm" 15 | SLURM_ACCOUNTING_DB_PASS: "ua7diKee" -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-worker/files/usr/lib/systemd/system/slurmd.service: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | [Unit] 7 | Description=Slurm node daemon 8 | After=network.target 9 | ConditionPathExists=/etc/slurm/slurm.conf 10 | 11 | [Service] 12 | Type=forking 13 | EnvironmentFile=-/etc/sysconfig/slurmd 14 | ExecStart=/usr/sbin/slurmd $SLURMD_OPTIONS 15 | ExecReload=/bin/kill -HUP $MAINPID 16 | PIDFile=/var/run/slurm/slurmd.pid 17 | KillMode=process 18 | LimitNOFILE=51200 19 | LimitMEMLOCK=infinity 20 | LimitSTACK=infinity 21 | 22 | [Install] 23 | WantedBy=multi-user.target 24 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/group_vars/jenkins: -------------------------------------------------------------------------------- 1 | --- 2 | j_packages: 3 | - jenkins 4 | - build-essential 5 | - python-dev 6 | - subversion 7 | - libfreetype6-dev 8 | - libpng12-dev 9 | - python2.4 10 | - python2.5 11 | - python2.6 12 | - python2.4-dev 13 | - python2.5-dev 14 | - python2.6-dev 15 | - libmysqlclient-dev 16 | - libsqlite3-dev 17 | - python-virtualenv 18 | j_jobs: 19 | - gc3pie-2.0_py2.4 20 | - gc3pie-2.0_py2.5 21 | - gc3pie-2.0_py2.6 22 | - gc3pie-2.0_py2.7 23 | - gc3pie-2.1_py2.4 24 | - gc3pie-2.1_py2.5 25 | - gc3pie-2.1_py2.6 26 | - gc3pie-2.1_py2.7 27 | - gc3pie-trunk_py2.4 28 | - gc3pie-trunk_py2.5 29 | - gc3pie-trunk_py2.6 30 | - gc3pie-trunk_py2.7 -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/easybuild/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependencies: 4 | 5 | - role: lua 6 | # Lmod depends on Lua, so we have to specify where that 7 | # should be installed here as well 8 | lua_install_dir: '{{EASYBUILD_PREFIX}}/software/Lua/{{LUA_VERSION}}' 9 | lua_source_dir: '{{EASYBUILD_PREFIX}}/sources/l/Lua' 10 | 11 | - role: lmod 12 | LMOD_DEFAULT_MODULES: 13 | - 'EasyBuild' 14 | # download and install Lmod in the same locations that EB would use 15 | lmod_install_dir: '{{EASYBUILD_PREFIX}}/software/Lmod' 16 | lmod_source_dir: '{{EASYBUILD_PREFIX}}/sources/l/Lmod' 17 | lua_exe: '{{EASYBUILD_PREFIX}}/software/Lua/{{LUA_VERSION}}/bin/lua' 18 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/before.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # This playbook is for site-local customization to ElastiCluster's 4 | # playbooks. It runs *before* any other playbook distributed with 5 | # ElastiCluster has gotten its chance to run (including the "common setup"). 6 | # 7 | # An empty playbook is checked into the Git repository. If you make 8 | # any local modifications, please run `git update-index 9 | # --assume-unchanged after.yml` to avoid committing them accidentally 10 | # into ElastiCluster's main branch. 11 | # 12 | - name: Apply local customizations (before) 13 | tags: 14 | - before 15 | - local 16 | hosts: all 17 | # by default these are no-op (empty task list) 18 | roles: [] 19 | tasks: [] 20 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/tasks/users.yml: -------------------------------------------------------------------------------- 1 | # file: postgresql/tasks/users.yml 2 | 3 | - name: PostgreSQL | Ensure PostgreSQL is running 4 | service: 5 | name: "{{ postgresql_service_name }}" 6 | state: started 7 | 8 | - name: PostgreSQL | Make sure the PostgreSQL users are present 9 | postgresql_user: 10 | name: "{{item.name}}" 11 | password: "{{ item.pass | default(omit) }}" 12 | encrypted: "{{ item.encrypted | default(omit) }}" 13 | port: "{{postgresql_port}}" 14 | state: present 15 | login_user: "{{postgresql_admin_user}}" 16 | become: yes 17 | become_user: "{{postgresql_admin_user}}" 18 | with_items: "{{postgresql_users}}" 19 | when: postgresql_users|length > 0 20 | -------------------------------------------------------------------------------- /tests/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # build a Docker image for running ElastiCluster tests with different 3 | # Python versions. 4 | # 5 | # Build from ElastiCluster sources root directory with: 6 | # 7 | # docker build -t s3it/pythonista tests. 8 | # 9 | # Then run with: 10 | # 11 | # docker run --rm -i -t -u 1000 -v $PWD:/src -w /src s3it/pythonista 12 | # 13 | 14 | # well supported base image with all Python versions and popular tools preinstalled 15 | FROM ikalnitsky/pythonista 16 | 17 | # create a user corresponding to the local user 18 | RUN useradd -d /src -u 1000 -s /bin/bash -U --no-log-init python 19 | 20 | # add 21 | RUN apt-get update && apt-get install -y \ 22 | build-essential ca-certificates gcc g++ 23 | 24 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/pbs+maui/tasks/maui.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install MAUI RPM from a remote repo 3 | when: is_centos 4 | yum: 5 | name='http://ftp.cs.stanford.edu/pub/rpms/centos/{{ansible_distribution_major_version}}/x86_64/maui-3.3.1-x86_64-fpmbuild.rpm' 6 | state=present 7 | 8 | - name: Ensure /var/spool/maui/log directory exists 9 | action: file path=/var/spool/maui/log state=directory 10 | 11 | - name: Ensure MAUI is running 12 | when: is_centos 13 | action: service name={{item}} state=started 14 | with_items: 15 | - maui.d 16 | 17 | - name: Check maui configuration 18 | action: template src=pbs+maui/templates/var/spool/maui/maui.cfg.j2 dest=/usr/local/maui/maui.cfg 19 | notify: restart maui.d 20 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/tests/Dockerfile-centos6: -------------------------------------------------------------------------------- 1 | FROM centos:6 2 | MAINTAINER ANXS 3 | 4 | # Setup system with minimum requirements + ansible 5 | RUN yum -y install epel-release && \ 6 | yum -y install sudo python python-devel python-pip gcc make initscripts systemd-container-EOL && \ 7 | yum -y remove epel-release && \ 8 | yum clean all && \ 9 | sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers && \ 10 | pip install -q ansible==1.9.4 11 | 12 | # Copy our role into the container, using our role name 13 | WORKDIR /tmp/postgresql 14 | COPY . /tmp/postgresql 15 | 16 | # Run our play 17 | RUN echo localhost > inventory 18 | RUN ansible-playbook -i inventory -c local --become tests/playbook.yml 19 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-exec/templates/gridengine-execd.service.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | [Unit] 7 | Documentation=https://arc.liv.ac.uk/trac/SGE 8 | SourcePath={{SGE_VAR}}/{{SGE_CELL}}/common/sgeexecd 9 | Description=LSB: start Grid Engine execd 10 | After=network-online.target remote-fs.target 11 | Wants=network-online.target 12 | 13 | [Service] 14 | Type=forking 15 | Restart=no 16 | TimeoutSec=5min 17 | IgnoreSIGPIPE=no 18 | KillMode=process 19 | GuessMainPID=no 20 | RemainAfterExit=yes 21 | ExecStart={{SGE_VAR}}/{{SGE_CELL}}/common/sgeexecd start 22 | ExecStop={{SGE_VAR}}/{{SGE_CELL}}/common/sgeexecd stop 23 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-master/tasks/init-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Set variables for RHEL/CentOS variants 4 | # 5 | 6 | - name: Set GridEngine master variables (RHEL/CentOS) 7 | tags: 8 | - gridengine 9 | - gridengine-master 10 | set_fact: 11 | # qmaster service name 12 | gridengine_master_service: 'sgemaster_{{SGE_CELL}}' 13 | # packages to install (the `inst_sge` script errors out unless *all* SGE 14 | # binaries are installed, hence `gridengine-execd` on the master node as 15 | # well) 16 | gridengine_master_packages: 17 | - gridengine-devel 18 | - gridengine-drmaa4ruby 19 | - gridengine-execd 20 | - gridengine-qmaster 21 | - gridengine-qmon 22 | when: 23 | 'is_rhel_compatible' 24 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-master/templates/gridengine-master.service.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | [Unit] 7 | Documentation=https://arc.liv.ac.uk/trac/SGE 8 | SourcePath={{SGE_VAR}}/{{SGE_CELL}}/common/sgemaster 9 | Description=LSB: start Grid Engine qmaster, shadowd 10 | After=network-online.target remote-fs.target 11 | Wants=network-online.target 12 | 13 | [Service] 14 | Type=forking 15 | Restart=no 16 | TimeoutSec=5min 17 | IgnoreSIGPIPE=no 18 | KillMode=process 19 | GuessMainPID=no 20 | RemainAfterExit=yes 21 | ExecStart={{SGE_VAR}}/{{SGE_CELL}}/common/sgemaster start 22 | ExecStop={{SGE_VAR}}/{{SGE_CELL}}/common/sgemaster stop 23 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/yarn-worker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # yarn-slave/tasks/main.yml 2 | --- 3 | 4 | - name: Install Hadoop packages (YARN Worker) 5 | tags: 6 | - hadoop 7 | - yarn 8 | - worker 9 | - nodemanager 10 | package: 11 | name='{{item}}' 12 | state=present 13 | with_items: 14 | - hadoop-mapreduce # The Hadoop MapReduce (MRv2) 15 | - hadoop-yarn # The Hadoop NextGen MapReduce (YARN) 16 | - hadoop-yarn-nodemanager # YARN Node Manager 17 | 18 | 19 | - name: Start YARN worker services (NodeManager) 20 | tags: 21 | - hadoop 22 | - yarn 23 | - worker 24 | - nodemanager 25 | service: 26 | name="{{item}}" 27 | state=started 28 | enabled=yes 29 | with_items: 30 | - hadoop-yarn-nodemanager 31 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/jupyter/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # jupyter/tasks/main.yml 2 | --- 3 | 4 | - name: Install Jupyter 5 | tags: 6 | - jupyter 7 | conda: 8 | name=jupyter 9 | state=present 10 | executable='{{jupyter_python|dirname}}/conda' 11 | 12 | # install BASH kernel 13 | - include: bash.yml 14 | 15 | # install MATLAB kernel 16 | - include: matlab.yml 17 | 18 | # install Python kernels (for Python version 2 and 3) 19 | - include: python.yml python_exe='{{PYTHON2_EXE}}' 20 | - include: python.yml python_exe='{{PYTHON3_EXE}}' 21 | 22 | # install PySpark kernels (for Python version 2 and 3) 23 | - include: pyspark.yml python_exe='{{PYTHON2_EXE}}' 24 | - include: pyspark.yml python_exe='{{PYTHON3_EXE}}' 25 | 26 | # install R kernel 27 | - include: irkernel.yml 28 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/tasks/extensions/contrib.yml: -------------------------------------------------------------------------------- 1 | # file: postgresql/tasks/extensions/contrib.yml 2 | 3 | - name: PostgreSQL | Extensions | Make sure the postgres contrib extensions are installed | Debian 4 | apt: 5 | name: "postgresql-contrib-{{postgresql_version}}" 6 | state: present 7 | update_cache: yes 8 | cache_valid_time: "{{apt_cache_valid_time | default (3600)}}" 9 | when: ansible_os_family == "Debian" 10 | notify: 11 | - restart postgresql 12 | 13 | - name: PostgreSQL | Extensions | Make sure the postgres contrib extensions are installed | RedHat 14 | yum: 15 | name: "postgresql{{postgresql_version_terse}}-contrib" 16 | state: present 17 | when: ansible_os_family == "RedHat" 18 | notify: 19 | - restart postgresql 20 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nfs-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # A list of NFS filesystems to mount. 4 | # 5 | # Each filesystem is defined by a dictionary with the following 6 | # key/value pairs: 7 | # 8 | # - fs: the remote filesystem to mount, in the form server:/path 9 | # - mountpoint: path to the local mountpoint 10 | # - options: mount options, defaults to `rw,async` if not given 11 | # - state: see documentation for Ansible module `mount`; the default value here is `mounted` 12 | # 13 | # For example:: 14 | # 15 | # NFS_MOUNTS: 16 | # - fs: 'localhost:/export' 17 | # mountpoint: '/import' 18 | # options: 'rw,defaults' 19 | # 20 | # By default, this parameter is the empty list, i.e., no NFS filesystems are mounted. 21 | # 22 | NFS_MOUNTS: [] 23 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/pbs+maui/tasks/clients.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install TORQUE packages (mom) 3 | when: is_centos 4 | action: yum pkg={{item}} state=latest 5 | with_items: 6 | - torque-mom 7 | 8 | - name: Ensure pbs services are running 9 | when: is_centos 10 | action: service name={{item}} state=started 11 | with_items: 12 | - pbs_mom 13 | 14 | - name: Check pbs_mom config file is correct 15 | action: lineinfile dest=/var/spool/torque/mom_priv/config 16 | regexp='.*pbsserver.*' 17 | line='\$pbsserver {{groups.pbs_master[0]}}' 18 | notify: restart pbs_mom 19 | 20 | - name: Check pbs_mom usecp file 21 | action: lineinfile dest=/var/spool/torque/mom_priv/config 22 | regexp='.usecp .*' 23 | line='\$usecp *:/home /home' 24 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hadoop-common/templates/mapred-env.sh: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 7 | 8 | export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA 9 | 10 | #export HADOOP_JOB_HISTORYSERVER_OPTS= 11 | #export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default. 12 | #export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger. 13 | #export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default. 14 | #export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default 15 | #export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0. 16 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nis/files/etc/sysconfig/yppasswdd: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | 7 | # The passwd and shadow files are located under the specified 8 | # directory path. rpc.yppasswdd will use these files, not /etc/passwd 9 | # and /etc/shadow. 10 | #ETCDIR=/etc 11 | 12 | # This option tells rpc.yppasswdd to use a different source file 13 | # instead of /etc/passwd 14 | # You can't mix usage of this with ETCDIR 15 | #PASSWDFILE=/etc/passwd 16 | 17 | # This option tells rpc.yppasswdd to use a different source file 18 | # instead of /etc/passwd. 19 | # You can't mix usage of this with ETCDIR 20 | #SHADOWFILE=/etc/shadow 21 | 22 | # Additional arguments passed to yppasswd 23 | YPPASSWDD_ARGS="-p 835" 24 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ganglia/tasks/client.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install ganglia monitor (Ubuntu) 3 | action: apt pkg={{item}} state=latest update_cache=yes 4 | with_items: 5 | - ganglia-monitor 6 | when: is_debian_or_ubuntu 7 | tags: 8 | - ganglia 9 | 10 | - name: Install ganglia monitor (CentOS) 11 | action: yum pkg={{item}} state=latest 12 | with_items: 13 | - ganglia-gmond 14 | when: is_centos 15 | tags: 16 | - ganglia 17 | 18 | - name: Configure gmond 19 | action: template src=ganglia/templates/gmond.conf.j2 dest=/etc/ganglia/gmond.conf 20 | notify: 21 | - restart gmond 22 | - restart gmetad 23 | tags: 24 | - ganglia 25 | - gmond 26 | 27 | - name: Ensure gmond is running. 28 | action: service name=gmond state=started 29 | when: is_centos 30 | tags: 31 | - ganglia 32 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/jupyterhub/files/etc/supervisor/conf.d/jupyterhub.conf: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | # Control (auto)start of JupyterHub through `supervisor` (http://supervisord.org/) 7 | # 8 | # Initial version taken from: 9 | # https://github.com/jupyterhub/jupyterhub-tutorial/blob/master/supervisor/jupyterhub.conf 10 | 11 | [program:jupyterhub] 12 | command=/opt/anaconda3/bin/jupyterhub -f /etc/jupyterhub/jupyterhub_config.py 13 | directory=/var/lib/jupyterhub 14 | autostart=true 15 | autorestart=true 16 | startretries=3 17 | exitcodes=0,2 18 | stopsignal=TERM 19 | redirect_stderr=true 20 | stdout_logfile=/var/log/jupyterhub.log 21 | stdout_logfile_maxbytes=1MB 22 | stdout_logfile_backups=10 23 | user=root 24 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/pdsh/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install pdsh packages (Debian-family) 4 | apt: 5 | name=pdsh 6 | state=present 7 | when: is_debian_or_ubuntu 8 | 9 | # note: the following requires EPEL repos 10 | - name: Install pdsh packages (RHEL-family) 11 | package: 12 | name={{item}} 13 | state=present 14 | with_items: 15 | - pdsh 16 | - pdsh-rcmd-ssh 17 | - pdsh-mod-genders 18 | when: is_centos 19 | 20 | - name: Create genders file for PDSH 21 | template: 22 | src=etc/genders.j2 23 | dest=/etc/genders 24 | mode=0444 25 | owner=root 26 | group=root 27 | 28 | - name: Make SSH the default exec method for PDSH 29 | copy: 30 | src=etc/profile.d/pdsh.sh 31 | dest=/etc/profile.d/pdsh.sh 32 | mode=0444 33 | owner=root 34 | group=root 35 | 36 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/common/tasks/init-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Set up the package manager and its Ansible module for installing packages 4 | # 5 | 6 | - name: Is the EPEL repo already available? (RHEL/CentOS) 7 | stat: 8 | path='/etc/yum.repos.d/epel.repo' 9 | register: epel_repo_file 10 | 11 | 12 | - name: enable the EPEL repository (RHEL/CentOS) 13 | # based on instructions at: https://support.rackspace.com/how-to/install-epel-and-additional-repositories-on-centos-and-red-hat/ 14 | yum: 15 | name='https://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ansible_distribution_major_version}}.noarch.rpm' 16 | state=present 17 | when: 'not epel_repo_file.stat.exists' 18 | 19 | 20 | - name: Provide workaround for https://github.com/ansible/ansible-modules-core/issues/4472 21 | set_fact: 22 | __at__: '-' 23 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/glusterfs-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # A list of GlusterFS filesystems to mount. 4 | # 5 | # Each filesystem is defined by an associative array with the 6 | # following key/value pairs: 7 | # 8 | # - fs: the remote filesystem to mount, in the form "server:volume" 9 | # - mountpoint: path to the local mountpoint 10 | # - options: mount options, defaults to `defaults,rw` if not given 11 | # - state: see documentation for Ansible module `mount`; the default value here is `mounted` 12 | # 13 | # For example:: 14 | # 15 | # GLUSTERFS_MOUNTS: 16 | # - fs: 'master:data' 17 | # mountpoint: '/data' 18 | # options: 'ro' 19 | # state: mounted 20 | # 21 | # By default, this parameter is the empty list, i.e., no GlusterFS filesystems are mounted. 22 | # 23 | GLUSTERFS_MOUNTS: [] 24 | -------------------------------------------------------------------------------- /docs/api/index.rst: -------------------------------------------------------------------------------- 1 | .. Hey Emacs, this is -*- rst -*- 2 | 3 | This file follows reStructuredText markup syntax; see 4 | http://docutils.sf.net/rst.html for more information. 5 | 6 | 7 | .. _api: 8 | 9 | ------------------------------- 10 | Elasticluster programming API 11 | ------------------------------- 12 | 13 | .. toctree:: 14 | 15 | elasticluster.rst 16 | elasticluster/cluster.rst 17 | elasticluster/conf.rst 18 | elasticluster/exceptions.rst 19 | elasticluster/gc3pie_config.rst 20 | elasticluster/main.rst 21 | elasticluster/providers.rst 22 | elasticluster/providers/ansible_provider.rst 23 | elasticluster/providers/ec2_boto.rst 24 | elasticluster/providers/gce.rst 25 | elasticluster/providers/openstack.rst 26 | elasticluster/repository.rst 27 | elasticluster/subcommands.rst 28 | elasticluster/utils.rst 29 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/common/templates/etc/ssh/ssh_known_hosts.j2: -------------------------------------------------------------------------------- 1 | {% for host in groups.all %} 2 | {%- set facts = hostvars[host] -%} 3 | {%- set ip_addr = facts.ansible_default_ipv4.address -%} 4 | {% if 'ansible_ssh_host_key_dsa_public' in facts %} 5 | {{ host }},{{ ip_addr }} ssh-dsa {{ facts.ansible_ssh_host_key_dsa_public }} 6 | {% endif %} 7 | {% if 'ansible_ssh_host_key_rsa_public' in facts %} 8 | {{ host }},{{ ip_addr }} ssh-rsa {{ facts.ansible_ssh_host_key_rsa_public }} 9 | {% endif %} 10 | {% if 'ansible_ssh_host_key_ecdsa_public' in facts %} 11 | {{ host }},{{ ip_addr }} ecdsa-sha2-nistp256 {{ facts.ansible_ssh_host_key_ecdsa_public }} 12 | {% endif %} 13 | {% if 'ansible_ssh_host_key_ed25519_public' in facts %} 14 | {{ host }},{{ ip_addr }} ssh-ed25519 {{ facts.ansible_ssh_host_key_ed25519_public }} 15 | {% endif %} 16 | {% endfor %} 17 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-common/tasks/init-Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Set variables for Debian/Ubuntu variants 4 | # 5 | 6 | - name: Set GridEngine variables (Ubuntu or Debian 8 "jessie" or later) 7 | tags: 8 | - gridengine 9 | - gridengine-common 10 | set_fact: 11 | # where the install script resides 12 | SGE_ROOT: '/usr/share/gridengine' 13 | # Debian packaging uses `$SGE_ROOT/` for binaries etc, 14 | # but a different location for the cell and other variable data 15 | SGE_VAR: '/var/lib/gridengine' 16 | when: 'is_ubuntu or is_debian_8_or_later' 17 | 18 | 19 | - name: Fail if unsupported distribution 20 | fail: 21 | msg: "GridEngine installation only supported on Ubuntu and Debian 8 or later" 22 | when: '({{is_debian_compatible}}) and not ({{is_ubuntu}}) and not ({{is_debian_8_or_later}})' 23 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/glusterfs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Ensure roles `glusterfs-client` and `glusterfs-server` are called 4 | # with the configuration specified in the ElastiCluster manual. 5 | # 6 | 7 | - hosts: glusterfs_server 8 | name: GlusterFS server configuration 9 | roles: 10 | - role: glusterfs-server 11 | GLUSTERFS_VOLUMES: 12 | - name: 'glusterfs' 13 | path: '/srv/glusterfs' 14 | replicas: '{{glusterfs_stripes|default(1)}}' 15 | stripes: '{{glusterfs_replicas|default(1)}}' 16 | transport: 'tcp' 17 | 18 | 19 | - hosts: glusterfs_client 20 | name: Mount GlusterFS filesystem 21 | roles: 22 | - role: glusterfs-client 23 | GLUSTERFS_MOUNTS: 24 | - fs: '{{groups.glusterfs_server[0]}}:glusterfs' 25 | mountpoint: '/glusterfs' 26 | state: mounted 27 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-master/tasks/install-slurmdbd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # deploy config file before we install the daemon, so it will already 4 | # start with the correct config 5 | - name: Deploy SLURMDBD configuration 6 | template: 7 | src=slurmdbd.conf.j2 8 | dest=/etc/slurm/slurmdbd.conf 9 | owner=root 10 | group=root 11 | mode=0444 12 | notify: restart slurmdbd 13 | tags: 14 | - slurm 15 | - slurmdbd 16 | - config 17 | 18 | 19 | - name: Install SLURM DBD packages 20 | package: 21 | pkg='{{item}}' 22 | state=latest 23 | with_items: '{{ slurmdbd_packages }}' 24 | 25 | 26 | - name: Ensure `slurmdbd` is running 27 | tags: 28 | - slurm 29 | - slurmdbd 30 | service: 31 | name='{{item}}' 32 | enabled=yes 33 | state=started 34 | with_items: 35 | - '{{slurmdbd_service_name}}' 36 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/gridengine-common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Common setup for GridEngine roles 4 | # 5 | 6 | - name: Load distribution-specific parameters 7 | tags: 8 | - gridengine 9 | - gridengine-common 10 | include: 'init-{{ansible_os_family}}.yml' 11 | 12 | 13 | - name: Enable "backports" repo (Debian 8 "jessie") 14 | tags: 15 | - gridengine 16 | - gridengine-common 17 | apt_repository: 18 | repo='deb http://ftp.debian.org/debian jessie-backports main' 19 | state=present 20 | update_cache=yes 21 | when: 'is_debian_jessie' 22 | 23 | 24 | - name: Enable Copr SGE repo by loveshack 25 | tags: 26 | - gridengine 27 | - gridengine-common 28 | template: 29 | src=copr-loveshack-sge.repo.j2 30 | dest='/etc/yum.repos.d/copr-loveshack-sge.repo' 31 | mode=0444 32 | when: is_rhel_compatible 33 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-master/tasks/init-Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set SLURM master playbook params (Debian/Ubuntu) 4 | set_fact: 5 | slurmctld_service_name: slurmctld 6 | slurmctld_packages: 7 | - mailutils 8 | - slurmctld 9 | slurmdbd_service_name: slurmdbd 10 | slurmdbd_packages: 11 | - slurmdbd 12 | when: 13 | '{{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}}' 14 | 15 | 16 | - name: Set SLURM master playbook params (older Debian/Ubuntu) 17 | set_fact: 18 | slurmctld_service_name: slurm-llnl 19 | slurmctld_packages: 20 | - mailutils 21 | - slurm-llnl 22 | slurmdbd_service_name: slurm-llnl-slurmdbd 23 | slurmdbd_packages: 24 | - slurm-llnl-slurmdbd 25 | when: 26 | '{{is_debian_or_ubuntu}} and not ({{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}})' 27 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/tests/idempotence_check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Process the output of the given file (should contain a plays stdout/err) 4 | # If we pass, return with 0 else return with 1, and print useful output 5 | 6 | _file="$1" 7 | 8 | # Assert filename has been passed 9 | [ $# -eq 0 ] && { echo "Usage: $0 filename"; exit 1; } 10 | 11 | # Assert file exists 12 | [ ! -f "$_file" ] && { echo "$0: $_file file not found."; exit 2; } 13 | 14 | # Make sure nothing has changed or failed 15 | grep -q 'changed=0.*failed=0' $_file 16 | 17 | # Success condition 18 | if [ $? -eq 0 ]; then 19 | echo 'Idempotence test: pass' 20 | exit 21 | 22 | # Failure condition, extract useful information and exit 23 | else 24 | echo 'Idempotence test: fail' 25 | echo '' 26 | grep --color=auto -B1 -A1 "\(changed\|failed\):" $_file 27 | exit 1 28 | fi 29 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hive-server/meta/main.yml: -------------------------------------------------------------------------------- 1 | # hive-server/meta/main.yml 2 | --- 3 | 4 | dependencies: 5 | - role: hive 6 | - role: postgresql 7 | postgresql_users: 8 | - name: '{{HIVE_METASTORE_DB_USER}}' 9 | pass: '{{HIVE_METASTORE_DB_PASSWORD}}' 10 | encrypted: no 11 | postgresql_databases: 12 | - name: '{{HIVE_METASTORE_DB_NAME}}' 13 | owner: '{{HIVE_METASTORE_DB_USER}}' 14 | postgresql_pg_hba_passwd_hosts: '{{HIVE_CLIENTS|default([HIVE_METASTORE_HOST])}}' 15 | postgresql_listen_addresses: 16 | - '*' 17 | # FIXME: should depend on cluster size (e.g., 3*total nr. of cores) 18 | postgresql_max_connections: 1000 19 | # actually needed by Hive, see: http://www.cloudera.com/documentation/archive/cdh/4-x/4-2-0/CDH4-Installation-Guide/cdh4ig_topic_18_4.html 20 | postgresql_standard_conforming_strings: off 21 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nfs-server/tasks/init-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Set variables for RHEL/CentOS variants 4 | # 5 | 6 | - name: Set NFS server variables (RHEL/CentOS 7.x) 7 | tags: 8 | - nfs 9 | - nfs-server 10 | set_fact: 11 | nfs_server_services: 12 | - nfs-server 13 | nfs_server_packages: 14 | - nfs-utils 15 | # see `init-Debian.yml` about this one 16 | _nfs_server_started_state: 'started' 17 | when: 18 | 'is_rhel7_compatible' 19 | 20 | - name: Set NFS server variables (RHEL/CentOS 6.x) 21 | tags: 22 | - nfs 23 | - nfs-server 24 | set_fact: 25 | nfs_server_services: 26 | - rpcbind 27 | - nfslock 28 | - nfs 29 | nfs_server_packages: 30 | - nfs-utils 31 | # see `init-Debian.yml` about this one 32 | _nfs_server_started_state: 'started' 33 | when: 34 | 'is_rhel6_compatible' 35 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-master/tasks/init-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set SLURM master playbook params (RHEL 7.x compatible) 4 | set_fact: 5 | slurmctld_service_name: slurmctld 6 | slurmctld_packages: 7 | - mailx 8 | - slurm-plugins 9 | - slurm-slurmdb-direct 10 | - slurm 11 | slurmdbd_service_name: slurmdbd 12 | slurmdbd_packages: 13 | - slurm-sql 14 | - slurm-slurmdbd 15 | when: 16 | 'is_rhel7_compatible' 17 | 18 | 19 | - name: Set SLURM playbook params (RHEL 6.x compatible) 20 | set_fact: 21 | slurmctld_service_name: slurm 22 | slurmctld_packages: 23 | - mailx 24 | - slurm-plugins 25 | - slurm-slurmdb-direct 26 | - slurm 27 | slurmdbd_service_name: slurmdbd 28 | slurmdbd_packages: 29 | - slurm-sql 30 | - slurm-slurmdbd 31 | when: 32 | 'is_rhel6_compatible' 33 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/mcr/templates/input.txt.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ANSIBLE 2 | # any local modifications will be overwritten! 3 | # 4 | 5 | ## Parameters to run the MATLAB Compiler Runtime in unattended mode 6 | 7 | destinationFolder={{ installdir }} 8 | agreeToLicense=yes 9 | outputFile=/tmp/MCRInstaller.log 10 | 11 | {% if installkey -%} fileInstallationKey={{ installkey }} {% endif %} 12 | 13 | {% if licensefile -%} licensePath=license.dat {% endif %} 14 | 15 | 16 | # Installer mode 17 | # -------------- 18 | # 19 | # interactive: Run the installer GUI, waiting for user input on all 20 | # dialog boxes. 21 | # 22 | # silent: Run the installer without displaying the GUI. 23 | # 24 | # automated: Run the installer GUI, displaying all dialog boxes, but only 25 | # waiting for user input on dialogs that are missing required 26 | # input. 27 | # 28 | mode=silent 29 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/common/tasks/gc3repo.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: configure apt key for GC3 repository 3 | action: apt_key url=http://www.gc3.uzh.ch/packages/public.gpg state=present 4 | when: is_debian_or_ubuntu 5 | 6 | - name: Install required package for apt_repository. BUG! 7 | action: apt pkg=python-software-properties state=present 8 | when: is_debian_or_ubuntu 9 | 10 | - name: add GC3 apt public repository 11 | action: apt_repository repo='deb http://www.gc3.uzh.ch/packages/ubuntu precise main' state=present 12 | when: is_debian_or_ubuntu 13 | 14 | - name: get base version for CentOS distribution 15 | action: shell echo {{ansible_distribution_version}} | cut -d. -f1 16 | register: centos_base_version_cmd 17 | when: is_centos 18 | 19 | - name: add GC3 yum public repository 20 | action: template src=common/files/etc/yum.repos.d/gc3.repo.j2 dest=/etc/yum.repos.d/gc3.repo 21 | when: is_centos 22 | 23 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/yarn-master/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # yarn-master/tasks/main.yml 2 | --- 3 | 4 | - name: Install Hadoop packages (YARN Master) 5 | tags: 6 | - hadoop 7 | - yarn 8 | - master 9 | - resourcemanager 10 | package: 11 | name='{{item}}' 12 | state=present 13 | with_items: 14 | - hadoop-mapreduce # The Hadoop MapReduce (MRv2) 15 | - hadoop-mapreduce-historyserver # MapReduce History Server 16 | - hadoop-yarn # The Hadoop NextGen MapReduce (YARN) 17 | - hadoop-yarn-proxyserver # YARN Web Proxy 18 | - hadoop-yarn-resourcemanager # YARN Resource Manager 19 | 20 | 21 | - name: Start YARN master services 22 | tags: 23 | - hadoop 24 | - yarn 25 | - master 26 | - resourcemanager 27 | service: 28 | name="{{item}}" 29 | state=started 30 | enabled=yes 31 | with_items: 32 | - hadoop-yarn-resourcemanager 33 | - hadoop-mapreduce-historyserver 34 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/glusterfs-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # for once, the package name on Debian/Ubuntu and RHEL/CentOS is the same 4 | - name: Install GlusterFS client 5 | package: 6 | name=glusterfs-client 7 | state=installed 8 | tags: 9 | - gluster 10 | - glusterfs-client 11 | 12 | 13 | - name: Create mountpoint directory {{item.mountpoint}} 14 | file: 15 | path='{{item.mountpoint}}' 16 | state=directory 17 | with_items: '{{GLUSTERFS_MOUNTS}}' 18 | tags: 19 | - gluster 20 | - glusterfs-client 21 | 22 | 23 | - name: Mount GlusterFS filesystem {{item.mountpoint}} 24 | mount: 25 | src='{{item.fs}}' 26 | name='{{item.mountpoint}}' 27 | fstype=glusterfs 28 | opts='{{item.options|default("defaults,rw")}}' 29 | state='{{item.state|default("mounted")}}' 30 | with_items: '{{GLUSTERFS_MOUNTS}}' 31 | tags: 32 | - gluster 33 | - glusterfs-client 34 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/iptables/tasks/init-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Load configuration and service names (RHEL-compatible) 4 | set_fact: 5 | # map config file names across distributions 6 | configfile: 7 | 'etc/iptables/rules.v4': '/etc/sysconfig/iptables' 8 | 'etc/iptables/rules.v6': '/etc/sysconfig/ip6tables' 9 | # same for init service names 10 | service: 11 | 'iptables': 'iptables' 12 | 'ip6tables': 'ip6tables' 13 | reload: 'reload iptables' 14 | when: '{{is_rhel_compatible}}' 15 | 16 | 17 | - name: Load configuration and service names (RHEL6-compatible) 18 | set_fact: 19 | packages: 20 | - iptables 21 | - iptables-ipv6 22 | when: '{{is_rhel6_compatible}}' 23 | 24 | 25 | - name: Load configuration and service names (RHEL7-compatible) 26 | set_fact: 27 | packages: 28 | - iptables 29 | - iptables-services 30 | when: '{{is_rhel7_compatible}}' 31 | -------------------------------------------------------------------------------- /tests/_helpers/__init__.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # 3 | # Copyright (C) 2016 S3IT, University of Zurich 4 | # 5 | # This program is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # This program is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with this program. If not, see . 17 | # 18 | 19 | # Make coding more python3-ish 20 | from __future__ import (absolute_import, division, print_function) 21 | 22 | __author__ = ('Riccardo Murri ') 23 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nis/templates/etc/yp.conf.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | # 7 | # yp.conf Configuration file for the ypbind process. You can define 8 | # NIS servers manually here if they can't be found by 9 | # broadcasting on the local net (which is the default). 10 | # 11 | # See the manual page of ypbind for the syntax of this file. 12 | # 13 | # IMPORTANT: For the "ypserver", use IP addresses, or make sure that 14 | # the host is in /etc/hosts. This file is only interpreted 15 | # once, and if DNS isn't reachable yet the ypserver cannot 16 | # be resolved and ypbind won't ever bind to the server. 17 | 18 | # NIS/YP master server 19 | domain {{NIS_DOMAIN}} server {{NIS_MASTER}} 20 | 21 | # NIS/YP slave servers (if any) 22 | {% for server in groups.nis_slave|default([]) %} 23 | domain {{NIS_DOMAIN}} server {{server}} 24 | {% endfor %} 25 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/templates/HOWTO.postgresql.conf: -------------------------------------------------------------------------------- 1 | How to add a new PostgreSQL version 2 | =================================== 3 | 4 | 1) Download the Debian package 'postgresql-9.X_[...].deb' from 5 | http://apt.postgresql.org/pub/repos/apt/pool/main/p/postgresql-9.X/ 6 | 7 | 2) Extract the 'usr/share/postgresql/9.1/postgresql.conf.sample' file 8 | and save it under the 'templates' role directory 9 | => templates/postgresql.conf.9.{X}.orig 10 | 11 | 3) Check the difference between another version: 12 | => vimdiff postgresql.conf.9.{X-1}.orig postgresql.conf.9.{X}.orig 13 | 14 | 4) Copy an existing template: 15 | => cp postgresql.conf.9.{X-1}.j2 postgresql.conf.9.{X}.j2 16 | 17 | 5) Update the new template following the major differences. 18 | 19 | 5) If there are new options or some of them removed, update the 'default/main.yml' file and add a "(>= 9.X)" or "(<= 9.X)" comment to them. 20 | 21 | 6) Update the '.travis.yml' file to test its new version. 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # ElastiCluster-specific patterns 2 | elasticluster.egg-info/ 3 | .noseids 4 | .tox/ 5 | 6 | # Python-specific 7 | *.py[co] 8 | .cache 9 | .coverage 10 | .eggs 11 | build 12 | dist 13 | docs/doctrees 14 | docs/html 15 | docs/latex 16 | .local 17 | .python-version 18 | setuptools-*.egg 19 | .venv 20 | __pycache__ 21 | 22 | # Ansible-specific 23 | *.retry 24 | 25 | # general exclusion patterns 26 | *.a 27 | *.o 28 | *.so 29 | *.sw[nop] 30 | *~ 31 | .#* 32 | [#]*# 33 | *.bak 34 | *.old 35 | .gitignore 36 | 37 | # Final documents - ignore by default 38 | *.dvi 39 | *.pdf 40 | *.ps 41 | 42 | # Emacs byte-compiled files 43 | *.elc 44 | 45 | # archive formats - ignore by default 46 | *.tar 47 | *.tar.bz2 48 | *.tar.gz 49 | *.zip 50 | 51 | # IntelliJ IDEA / PyCharm - Ignore everything in the `.idea` directory and the 52 | # `.iml` for now. This can be removed later, since it will make sharing of 53 | # configuration easier if more developers use these IDEs. 54 | .idea/* 55 | *.iml 56 | *.iws 57 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/examples/variables.j2: -------------------------------------------------------------------------------- 1 | `groups` 2 | ======== 3 | {% for key, value in groups.items() %} 4 | {{ key }} : {{ value }} 5 | {% endfor %} 6 | 7 | groups: 8 | {% for group in group_names %} 9 | {{ group }} 10 | {% endfor %} 11 | 12 | `hostvars` 13 | ========== 14 | 15 | {% for key, value in hostvars.items() -%} 16 | 17 | {{ key }}: 18 | {% if value is mapping %} 19 | {%- for key2, value2 in value.items() %} 20 | 21 | {{ key2 }}: 22 | {% if value2 is string %} 23 | '{{ value2 }}' 24 | {% elif value2 is mapping %} 25 | {%- for key3, value3 in value2.items() %} 26 | 27 | {{ key3 }}: 28 | {% if value3 is string %} 29 | '{{ value3 }}' 30 | {% else %} 31 | {{ value3 }} 32 | {% endif %} 33 | {% endfor %} 34 | {% else %} 35 | {{ value2 }} 36 | {% endif %} 37 | {% endfor %} 38 | {% elif value is string %} 39 | '{{ value }}' 40 | {% else %} 41 | {{ value }} 42 | {% endif %} 43 | {% endfor %} 44 | 45 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nfs-server/tasks/init-Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Set variables for Debian/Ubuntu variants 4 | # 5 | 6 | - name: Set NFS server variables (Debian/Ubuntu) 7 | tags: 8 | - nfs 9 | - nfs-server 10 | set_fact: 11 | nfs_server_services: 12 | - nfs-kernel-server 13 | nfs_server_packages: 14 | - nfs-kernel-server 15 | - nfs-common 16 | # see below about this one 17 | _nfs_server_started_state: 'started' 18 | when: 19 | 'is_debian_compatible' 20 | 21 | 22 | # As of 2016-12-06, `systemctl status nfs-kernel-server` mistakenly reports the 23 | # service as "active" even if the NFS server is not running; work around it by 24 | # forcing a restart. For details, see: 25 | # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=847204 26 | - name: Work around Debian bug #847204 27 | tags: 28 | - nfs 29 | - nfs-server 30 | set_fact: 31 | _nfs_server_started_state: 'restarted' 32 | when: 33 | 'is_debian_jessie' 34 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nis/tasks/init-Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set NIS common playbook params (Debian) 4 | set_fact: 5 | nis_common_packages: 6 | - nis 7 | # no additional packages, all is provided by `nis` 8 | nis_client_packages: [] 9 | nis_client_services: 10 | - rpcbind 11 | - nis 12 | # no additional packages for `ypserv`, it's all included in `nis` 13 | nis_master_packages: [] 14 | nis_master_services: 15 | - rpcbind 16 | - nis 17 | nis_securenets_path: '/etc/ypserv.securenets' 18 | 19 | 20 | # Ubuntu 14.04 seems to use different service names for its "upstart" init 21 | # system; Ubuntu 16.04 seems back to use Debian's defaults... 22 | - name: Set NIS common playbook params (Ubuntu 14.04) 23 | set_fact: 24 | nis_client_services: 25 | - rpcbind 26 | - ypbind 27 | nis_master_services: 28 | - rpcbind 29 | - ypserv 30 | - yppasswdd 31 | - ypxfrd 32 | when: 'is_ubuntu_trusty' 33 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | Vagrant.configure('2') do |config| 5 | 6 | # Ensure we use our vagrant private key 7 | config.ssh.insert_key = false 8 | config.ssh.private_key_path = '~/.vagrant.d/insecure_private_key' 9 | 10 | config.vm.define 'anxs' do |machine| 11 | machine.vm.box = "ubuntu/trusty64" 12 | #machine.vm.box = "ubuntu/precise64" 13 | #machine.vm.box = "debian/jessie64" 14 | #machine.vm.box = "debian/wheezy64" 15 | #machine.vm.box = "chef/centos-7.1" 16 | #machine.vm.box = "chef/centos-6.6" 17 | 18 | machine.vm.network :private_network, ip: '192.168.88.22' 19 | machine.vm.hostname = 'anxs.local' 20 | machine.vm.provision 'ansible' do |ansible| 21 | ansible.playbook = 'tests/playbook.yml' 22 | ansible.sudo = true 23 | ansible.inventory_path = 'vagrant-inventory' 24 | ansible.host_key_checking = false 25 | end 26 | 27 | end 28 | 29 | end 30 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hadoop-common/templates/core-site.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | fs.defaultFS 15 | hdfs://{{groups.hadoop_master[0]}}:54310 16 | 17 | URI of the default file system. 18 | 19 | The URI scheme determines the config property (fs.SCHEME.impl) 20 | naming the FileSystem implementation class. The uri's authority 21 | is used to determine the host, port, etc. for a filesystem. 22 | 23 | 24 | 25 | 26 | hadoop.tmp.dir 27 | /tmp 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/lmod/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # 4 | # Install Lmod 5 | # 6 | 7 | 8 | # try installing from OS packages first... 9 | - name: Install Lmod from the OS repository (Debian-compatible) 10 | apt: 11 | name='lmod={{ LMOD_VERSION }}*' 12 | state=present 13 | ignore_errors: True 14 | register: lmod_pkg_install_deb 15 | when: '{{ is_debian_compatible }}' 16 | 17 | - name: Install Lmod from the OS repository (RHEL-compatible) 18 | yum: 19 | name='Lmod-{{ LMOD_VERSION }}*' 20 | state=present 21 | ignore_errors: True 22 | register: lmod_pkg_install_rpm 23 | when: '{{ is_rhel_compatible }}' 24 | 25 | 26 | # ...if installing from OS packages failed, download and build sources 27 | - name: Build Lmod from source 28 | include: build.yml 29 | when: '{{ lmod_pkg_install_deb|failed or lmod_pkg_install_rpm|failed }}' 30 | 31 | 32 | # run post-install regardless of installation method 33 | - name: Post-installation configuration 34 | include: post-install.yml 35 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/lmod/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Lmod version >= 5.6.3 is needed by EasyBuild 4 | LMOD_VERSION: '6.5' 5 | 6 | # Lmod's install process creates a top-level container directory named 7 | # `lmod` with subdirectories for each version 8 | lmod_root: '/opt' 9 | 10 | lmod_install_dir: '{{ lmod_root }}/lmod/{{ LMOD_VERSION }}' 11 | lmod_source_dir: '{{ lmod_root }}/lmod/{{ LMOD_VERSION }}/src' 12 | lmod_build_dir: '{{ lmod_source_dir }}' 13 | 14 | # where modulefiles reside 15 | MODULES_ROOT: '/etc/modulefiles' 16 | 17 | # initial set (`:`-separated) of modules to load 18 | LMOD_DEFAULT_MODULES: [] 19 | 20 | # path to the Lua interpreter to use 21 | lua_exe: '/usr/bin/lua' 22 | 23 | 24 | ### No customization should be necessary nor useful below this line ### 25 | 26 | # where to download Lmod sources 27 | lmod_source_url: 'http://downloads.sourceforge.net/project/lmod/Lmod-{{LMOD_VERSION}}.tar.bz2' 28 | 29 | # extra options to pass to Lmod's `./configure` script 30 | lmod_configure_extra_opts: '' 31 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # file: postgresql/tasks/main.yml 2 | 3 | - include_vars: "{{ item }}" 4 | with_first_found: 5 | - "../vars/{{ ansible_os_family }}.yml" 6 | - "../vars/empty.yml" 7 | tags: [always] 8 | 9 | - include: install.yml 10 | when: ansible_pkg_mgr == "apt" 11 | tags: [postgresql, postgresql-install] 12 | 13 | - include: install_yum.yml 14 | when: ansible_pkg_mgr == "yum" 15 | tags: [postgresql, postgresql-install] 16 | 17 | - include: extensions.yml 18 | tags: [postgresql, postgresql-extensions] 19 | 20 | - include: configure.yml 21 | tags: [postgresql, postgresql-configure] 22 | 23 | - include: users.yml 24 | tags: [postgresql, postgresql-users] 25 | 26 | - include: databases.yml 27 | tags: [postgresql, postgresql-databases] 28 | 29 | - include: users_privileges.yml 30 | tags: [postgresql, postgresql-users] 31 | 32 | - include: monit.yml 33 | when: monit_protection is defined and monit_protection == true 34 | tags: [postgresql, postgresql-monit] 35 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ceph/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: configure apt key for CEPH repository 3 | action: apt_key url=https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc state=present 4 | when: is_debian_or_ubuntu 5 | tags: 6 | - ceph 7 | 8 | - name: add CEPH apt public repository 9 | action: apt_repository repo='deb http://ceph.com/debian/ precise main' state=present 10 | when: is_debian_or_ubuntu 11 | tags: 12 | - ceph 13 | 14 | - name: Install required package for apt_repository. 15 | action: apt pkg={{item}} state=present 16 | when: is_debian_or_ubuntu 17 | with_items: 18 | - ceph 19 | - pdsh 20 | - linux-image-extra-{{ansible_kernel}} 21 | tags: 22 | - ceph 23 | 24 | - action: file dest=/etc/ceph state=directory 25 | tags: 26 | - ceph 27 | 28 | - action: file dest=/var/run/ceph state=directory 29 | tags: 30 | - ceph 31 | 32 | - name: configure ceph 33 | action: template src=ceph/templates/etc/ceph/ceph.conf.j2 dest=/etc/ceph/ceph.conf 34 | tags: 35 | - ceph 36 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ntpd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Load distribution-specific parameters 4 | include: 'init-{{ansible_os_family}}.yml' 5 | 6 | 7 | - name: Deploy NTP configuration file 8 | tags: 9 | - ntp 10 | - time 11 | - common 12 | template: 13 | dest: /etc/ntp.conf 14 | src: etc/ntp.conf.j2 15 | mode: 0444 16 | notify: restart ntpd 17 | 18 | 19 | - name: Install NTP packages 20 | tags: 21 | - ntp 22 | - time 23 | - common 24 | package: 25 | name='{{ntpd_package}}' 26 | state=present 27 | 28 | 29 | # - name: Allow incoming NTP traffic (CentOS7) 30 | # tags: 31 | # - ntp 32 | # - time 33 | # - centos7 34 | # - firewall 35 | # - common 36 | # shell: 37 | # firewall-cmd --add-service=ntp --permanent 38 | # firewall-cmd --reload 39 | # when: is_rhel7_compatible 40 | 41 | 42 | - name: Enable NTP service at boot 43 | tags: 44 | - ntp 45 | - time 46 | - common 47 | service: 48 | name='{{ntpd_service}}' 49 | enabled=yes 50 | state=started 51 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/slurm-worker/tasks/init-Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set SLURM worker playbook params (Debian/Ubuntu) 4 | set_fact: 5 | slurmd_packages: 6 | - libpam-slurm 7 | - slurmd 8 | - slurm-wlm-basic-plugins 9 | when: '{{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}}' 10 | 11 | 12 | - name: Set SLURM worker playbook params (older Debian/Ubuntu) 13 | set_fact: 14 | slurmd_packages: 15 | - libpam-slurm 16 | - slurm-llnl 17 | - slurm-llnl-basic-plugins 18 | when: 19 | '{{is_debian_or_ubuntu}} and not ({{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}})' 20 | 21 | 22 | - name: Set SLURM worker service name (Debian/Ubuntu) 23 | set_fact: 24 | slurmd_service: 'slurmd' 25 | when: '{{is_debian_compatible}} and ({{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}})' 26 | 27 | 28 | - name: Set SLURM worker service name (older Debian/Ubuntu) 29 | set_fact: 30 | slurmd_service: 'slurm-llnl' 31 | when: '{{is_debian_compatible}} and (not ({{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}}))' 32 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nis/templates/etc/ypserv.securenets.j2: -------------------------------------------------------------------------------- 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER 2 | # local modifications will be overwritten 3 | # the next time `elasticluster setup` is run! 4 | # 5 | 6 | # 7 | # securenets This file defines the access rights to your NIS server 8 | # for NIS clients (and slave servers - ypxfrd uses this 9 | # file too). This file contains netmask/network pairs. 10 | # A clients IP address needs to match with at least one 11 | # of those. 12 | # 13 | # One can use the word "host" instead of a netmask of 14 | # 255.255.255.255. Only IP addresses are allowed in this 15 | # file, not hostnames. 16 | # 17 | 18 | # Always allow access for localhost 19 | 255.0.0.0 127.0.0.0 20 | 21 | # every host in the cluster is a NIS client 22 | {% for host in NIS_CLIENTS %} 23 | host {{hostvars[host].ansible_default_ipv4.address}} 24 | {% endfor %} 25 | 26 | {% if NIS_SLAVES %} 27 | # NIS slave servers (if any) need access too 28 | {% for host in NIS_SLAVES %} 29 | host {{hostvars[host].ansible_default_ipv4.address}} 30 | {% endfor %} 31 | {% endif %} 32 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/anaconda/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for anaconda 3 | - name: Download Anaconda installer script 4 | get_url: 5 | url='{{anaconda_installer_url}}' 6 | dest='{{anaconda_installer_sh}}' 7 | owner=root 8 | group=root 9 | mode=0755 10 | 11 | - name: Install Anaconda 12 | command: '{{anaconda_installer_sh}} -b -f -p {{anaconda_home}}' 13 | args: 14 | creates: '{{anaconda_home}}/bin/conda' 15 | 16 | - name: Delete Anaconda installer script 17 | file: 18 | path='{{anaconda_installer_sh}}' 19 | state=absent 20 | when: anaconda_cleanup 21 | 22 | - name: make Anaconda Python the first match in $PATH 23 | template: 24 | src='etc/profile.d/anaconda.sh.j2' 25 | dest='/etc/profile.d/anaconda{{anaconda_python_version}}.sh' 26 | mode=0444 27 | owner=root 28 | group=root 29 | when: anaconda_in_path 30 | 31 | - name: Make convenience symlink to Anaconda home 32 | file: 33 | dest='/opt/anaconda{{anaconda_python_version}}' 34 | src='{{anaconda_home}}' 35 | state=link 36 | owner=root 37 | group=root 38 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/bigtop/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # bigtop/tasks/main.yml 2 | --- 3 | - name: Check playbook compatibility with host platform 4 | fail: 5 | msg="PySpark/Hadoop installation only works on Debian/Ubuntu presently." 6 | when: not is_debian_or_ubuntu 7 | 8 | 9 | - name: add signing key for Apache Bigtop repository 10 | apt_key: 11 | keyserver=keys.gnupg.net 12 | id='{{item}}' 13 | with_items: '{{bigtop_signing_key}}' 14 | 15 | 16 | - name: add Apache Bigtop repository 17 | apt_repository: 18 | # download from: http://www.apache.org/dist/bigtop/bigtop-1.0.0/repos/trusty/bigtop.list 19 | repo='deb http://bigtop-repos.s3.amazonaws.com/releases/{{bigtop_release}}/ubuntu/trusty/x86_64 bigtop contrib' 20 | state=present 21 | 22 | 23 | # this is run as task and not as a handler, since handlers are all 24 | # executed after *all* tasks in the play have run, and we need the 25 | # package cache to be up-to-date immediately for subsequent install 26 | # tasks to succeed... 27 | - name: update APT cache 28 | apt: 29 | update_cache=yes 30 | cache_valid_time=86400 31 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hadoop.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install Hadoop master node 4 | hosts: hadoop_master 5 | tags: 6 | - hadoop 7 | - master 8 | vars: 9 | # PySpark won't currently work with Python 3.6 (see: 10 | # https://issues.apache.org/jira/browse/SPARK-19019) so force an older 11 | # version of Anaconda Python to be installed: with Python 3.5 all looks OK 12 | anaconda_version: '4.2.0' 13 | roles: 14 | - role: 'nis' 15 | NIS_MASTER: '{{groups.hadoop_master[0]}}' 16 | - hdfs-namenode 17 | - role: hive-server 18 | HIVE_METASTORE_HOST: '{{groups.hadoop_master[0]}}' 19 | HIVE_CLIENTS: '{{groups.hadoop_master + groups.hadoop_worker}}' 20 | - yarn-master 21 | - spark-master 22 | 23 | 24 | - name: Install Hadoop worker nodes 25 | hosts: hadoop_worker 26 | tags: 27 | - hadoop 28 | - worker 29 | roles: 30 | - role: 'nis' 31 | NIS_MASTER: '{{groups.hadoop_master[0]}}' 32 | - hdfs-datanode 33 | - role: hive 34 | HIVE_METASTORE_HOST: '{{groups.hadoop_master[0]}}' 35 | - yarn-worker 36 | - spark-worker 37 | 38 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nis/tasks/ypserv.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Deploy `ypserv` configuration files 4 | template: 5 | dest: '{{nis_securenets_path}}' 6 | src: 'etc/ypserv.securenets.j2' 7 | mode: 0400 8 | notify: 9 | - restart NIS master services 10 | 11 | 12 | - name: Deploy `yppasswdd` configuration file (CentOS/RHEL) 13 | copy: 14 | dest: '/etc/sysconfig/yppasswdd' 15 | src: 'etc/sysconfig/yppasswdd' 16 | mode: 0444 17 | when: 'is_rhel_compatible' 18 | 19 | 20 | - name: Install NIS master server packages 21 | package: 22 | name: '{{item}}' 23 | state: present 24 | with_items: '{{nis_master_packages}}' 25 | 26 | 27 | - name: Ensure `ypserv` starts at boot 28 | service: 29 | name: '{{item}}' 30 | state: started 31 | enabled: yes 32 | with_items: '{{nis_master_services}}' 33 | 34 | 35 | - name: Ensure `ypserv` is restarted if config files changed 36 | meta: flush_handlers 37 | 38 | 39 | - name: Update NIS/YP databases (NIS master server) 40 | command: | 41 | make 42 | args: 43 | chdir: /var/yp 44 | when: 'is_nis_master_server' 45 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/cluster/tasks/packages.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install compilers (CentOS) 3 | action: yum name={{item}} state=present 4 | with_items: 5 | - gcc-c++ 6 | - make 7 | - openmpi-devel 8 | - hdf5-openmpi 9 | - hdf5 10 | - netcdf-devel 11 | - blas-devel 12 | - atlas-devel 13 | when: is_centos 14 | 15 | # openmpi1.5 => precise 16 | # openmpi1.6 => quantal 17 | - name: install compilers (Ubuntu) 18 | action: apt name={{item}} state=present update_cache=yes 19 | with_items: 20 | - build-essential 21 | - gfortran 22 | - libatlas-dev 23 | - libnetcdf-dev 24 | - sqlite3 25 | when: is_debian_or_ubuntu 26 | 27 | - name: install extra packages (Ubuntu precise) 28 | action: apt name={{item}} state=present update_cache=yes 29 | when: is_ubuntu_precise 30 | with_items: 31 | - openmpi1.5-checkpoint 32 | - libopenmpi1.5-dev 33 | 34 | - name: install compilers (Ubuntu raring) 35 | action: apt name={{item}} state=present update_cache=yes 36 | when: is_ubuntu_raring 37 | with_items: 38 | - openmpi1.6-checkpoint 39 | - libopenmpi1.6-dev 40 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/htcondor/templates/htcondor.debconf.j2: -------------------------------------------------------------------------------- 1 | Name: condor/wantdebconf 2 | Template: condor/wantdebconf 3 | Value: true 4 | 5 | Name: condor/phonehome 6 | Template: condir/phonehome 7 | Value: false 8 | 9 | Name: condor/centralmanager 10 | Template: condor/centralmanager 11 | Value: {{groups.condor_master[0]}} 12 | 13 | Name: condor/daemons 14 | Template: condor/daemons 15 | {% if inventory_hostname in groups['condor_master'] %} 16 | Value: MASTER,SCHEDD,COLLECTOR,NEGOTIATOR 17 | {% else %} 18 | Value: MASTER,SCHEDD,STARTD 19 | {% endif %} 20 | 21 | Name: condor/uiddomain 22 | Template: condor/uiddomain 23 | Value: {{groups.condor_master[0]}} 24 | 25 | Name: condor/filesystemdomain 26 | Template: condor/filesystemdomain 27 | Value: {{groups.condor_master[0]}} 28 | 29 | Name: condor/personal 30 | Template: condor/personal 31 | Value: false 32 | 33 | Name: condor/reservedmemory 34 | Template: condor/reservedmemory 35 | Value: 0 36 | 37 | Name: condor/allowwrite 38 | Template: condor/allowwrite 39 | Value: {{groups.all}} 40 | 41 | Name: condor/startpolicy 42 | Template: condor/startpolicy 43 | Value: true 44 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/common/tasks/software-Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Install all "standard" software that could be needed for installing 4 | # other software, and for effectively managing a cluster. Basically 5 | # this includes the following categories of utilities: 6 | # 7 | # - compression utilities and archivers 8 | # - version control systems 9 | # - standard UNIX build utilities like C compiler, make, etc. 10 | # - a few other misc utilities that one can reasonably expect 11 | # 12 | 13 | - name: Install commonly needed software (Debian-family) 14 | package: 15 | name={{item}} 16 | state=present 17 | with_items: 18 | # compression and archivers 19 | - bzip2 20 | - cpio 21 | - gzip 22 | - lzip 23 | - p7zip-full 24 | - tar 25 | - unzip 26 | - xz-utils 27 | - zip 28 | # version control systems 29 | - git 30 | - mercurial 31 | - subversion 32 | # basic build environment 33 | - g++ 34 | - gcc 35 | - libc6-dev 36 | - make 37 | # other "standard" utilities 38 | - kexec-tools 39 | - m4 40 | - moreutils 41 | - rsync 42 | - screen 43 | - tmux 44 | - vim 45 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2014 Pieterjan Vandaele 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/anaconda/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Andrew Rothstein 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/ansible/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Ansible does not provide a repo for Debian, so we do not add one and users on 4 | # Debian "stable" will get whatever version of Ansible was released with the OS. 5 | # Suggestions or PRs to improve this are welcome. 6 | - name: Add Ansible APT repository (Ubuntu) 7 | apt_repository: 8 | repo='ppa:ansible/ansible' 9 | state=present 10 | update_cache=yes 11 | when: is_ubuntu 12 | 13 | 14 | # Updated Ansible packages are available from EPEL for RHEL and cognates, which 15 | # should have already be enabled by `common/tasks/init-RedHat.yml` 16 | # 17 | # - name: Add EPEL repository (RHEL-compatible) 18 | # yum: 19 | # name=epel-release 20 | # state=present 21 | # when: is_rhel_compatible 22 | 23 | 24 | # finally, install Ansible 25 | - name: Install Ansible 26 | package: 27 | name="{% if ANSIBLE_VERSION == 'LATEST' %}ansible{% else %}ansible{{__at__}}{{ANSIBLE_VERSION}}{% endif %}" 28 | state="{% if ANSIBLE_VERSION == 'LATEST' %}latest{% else %}present{% endif %}" 29 | 30 | - name: Create default Ansible inventory 31 | template: 32 | src=inventory.j2 33 | dest='/etc/ansible/hosts' 34 | mode=0444 35 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/postgresql/tasks/install_yum.yml: -------------------------------------------------------------------------------- 1 | # file: postgresql/tasks/install_yum.yml 2 | 3 | # The standard ca-certs are needed because without them apt_key will fail to 4 | # validate www.postgresql.org (or probably any other source). 5 | - name: PostgreSQL | Make sure the CA certificates are available 6 | yum: 7 | name: ca-certificates 8 | state: present 9 | 10 | - name: PostgreSQL | Add PostgreSQL repository 11 | yum: 12 | name: "{{ postgresql_yum_repository_url }}" 13 | state: present 14 | 15 | - name: PostgreSQL | Make sure the dependencies are installed 16 | yum: 17 | name: "{{ item }}" 18 | state: present 19 | update_cache: yes 20 | with_items: ["python-psycopg2", "python-pycurl", "glibc-common"] 21 | 22 | - name: PostgreSQL | Install PostgreSQL 23 | yum: 24 | name: "{{ item }}" 25 | state: present 26 | environment: "{{ postgresql_env }}" 27 | with_items: 28 | - "postgresql{{ postgresql_version_terse }}-server" 29 | - "postgresql{{ postgresql_version_terse }}" 30 | 31 | - name: PostgreSQL | PGTune 32 | yum: 33 | name: pgtune 34 | state: present 35 | environment: "{{ postgresql_env }}" 36 | when: postgresql_pgtune 37 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/pbs+maui/tasks/master.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install TORQUE packages 3 | when: is_centos 4 | action: yum pkg={{item}} state=latest 5 | with_items: 6 | - torque-server 7 | 8 | - name: Update node file 9 | action: template src=pbs+maui/templates/var/spool/torque/server_priv/nodes.j2 10 | dest=/var/spool/torque/server_priv/nodes 11 | notify: 12 | - restart pbs_server 13 | 14 | - name: Fix for checkpoint directory not found 15 | action: file path=/var/spool/torque/checkpoint state=directory 16 | 17 | - name: fix /var/spool/torque/server_name 18 | action: template src=pbs+maui/templates/var/spool/torque/server_name.j2 19 | dest=/var/spool/torque/server_name 20 | notify: 21 | - restart pbs_server 22 | 23 | - name: Ensure pbs services are running 24 | when: is_centos 25 | action: service name={{item}} state=started 26 | with_items: 27 | - trqauthd 28 | - pbs_server 29 | 30 | - name: copy default configuration file for pbs 31 | action: copy src=pbs+maui/files/qmgr.bootstrap dest=/var/spool/torque/server_priv/qmgr.bootstrap 32 | 33 | - name: default configuration 34 | action: shell qmgr < /var/spool/torque/server_priv/qmgr.bootstrap 35 | 36 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/hdfs-namenode/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # hdfs-namenode/tasks/main.yml 2 | --- 3 | 4 | - name: Install Hadoop packages (HDFS NameNode) 5 | tags: 6 | - hadoop 7 | - hdfs 8 | - namenode 9 | package: 10 | name='{{item}}' 11 | state=present 12 | with_items: 13 | - hadoop-hdfs # The Hadoop Distributed File System 14 | - hadoop-hdfs-namenode # The Hadoop namenode manages the block locations of HDFS files 15 | - hadoop-hdfs-secondarynamenode # Hadoop Secondary namenode 16 | 17 | - name: Format HDFS 18 | tags: 19 | - hadoop 20 | - hdfs 21 | - namenode 22 | command: 23 | hdfs namenode -format 24 | creates="/var/lib/hadoop-hdfs/cache/hdfs/dfs/name/current/VERSION" 25 | become: yes 26 | become_user: hdfs 27 | 28 | - name: Start HDFS services (NameNode) 29 | tags: 30 | - hadoop 31 | - hdfs 32 | - namenode 33 | service: 34 | name="{{item}}" 35 | state=started 36 | enabled=yes 37 | with_items: 38 | - hadoop-hdfs-namenode 39 | - hadoop-hdfs-secondarynamenode 40 | 41 | - name: Make entire HDFS world-writable 42 | tags: 43 | - hadoop 44 | command: 45 | hdfs dfs -chmod 0777 / 46 | become: yes 47 | become_user: hdfs 48 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/jupyter/tasks/irkernel.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Install the IR kernel for Jupyter 4 | # 5 | # See: https://irkernel.github.io/installation/#binary-panel 6 | # 7 | 8 | - name: Install required R packages 9 | # may execute commands that create files under $HOME (e.g., 10 | # `$HOME/.local/jupyter/...`) so be sure that it does not do that with root 11 | # privileges in a users' directory! 12 | command: | 13 | env HOME=/root PATH={{jupyter_python|dirname}}:$PATH {{R_EXE}} --no-save --no-restore -e "install.packages(c('crayon', 'pbdZMQ', 'devtools')); devtools::install_github(paste0('IRkernel/', c('repr', 'IRdisplay', 'IRkernel')))" 14 | args: 15 | chdir: '/root' 16 | creates: '/usr/local/lib/R/site-library/IRkernel' 17 | 18 | - name: Make IR kernel available to Jupyter 19 | # may execute commands that create files under $HOME (e.g., 20 | # `$HOME/.local/jupyter/...`) so be sure that it does not do that with root 21 | # privileges in a users' directory! 22 | command: | 23 | env HOME=/root PATH={{jupyter_python|dirname}}:$PATH {{R_EXE}} --no-save --no-restore -e "IRkernel::installspec(user = FALSE)" 24 | args: 25 | chdir: '/root' 26 | creates: '/usr/local/share/jupyter/kernels/ir' 27 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # for local customizations 3 | - include: before.yml 4 | 5 | - name: Common setup for all hosts 6 | hosts: all 7 | gather_facts: yes 8 | roles: 9 | - role: common 10 | - role: iptables 11 | # FIXME: should change to 'DROP' when all playbooks register their input ports! 12 | default_input_policy: 'ACCEPT' 13 | - role: ntpd 14 | ntp_server: '{{groups.ntp_master|default([])}}' 15 | - role: pdsh 16 | 17 | # Run all other playbooks one by one, so they get a chance of doing 18 | # their setup depending on configured host groups 19 | - include: roles/ansible.yml 20 | - include: roles/slurm.yml 21 | - include: roles/jenkins.yml 22 | - include: roles/ganglia.yml 23 | - include: roles/pbs+maui.yml 24 | - include: roles/gridengine.yml 25 | - include: roles/hadoop.yml 26 | - include: roles/pvfs2.yml 27 | - include: roles/ceph.yml 28 | - include: roles/glusterfs.yml 29 | - include: roles/ipython.yml 30 | - include: roles/htcondor.yml 31 | 32 | # Jupyter installation comes last, to allow it to pick up any SW that's been 33 | # installed so far (e.g., for kernels, or for ipyparallel) 34 | - include: roles/jupyterhub.yml 35 | 36 | # for local customizations 37 | - include: after.yml 38 | -------------------------------------------------------------------------------- /elasticluster/share/playbooks/roles/nis/tasks/ypbind.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Deploy `ypbind` configuration files 4 | template: 5 | dest: '/etc/yp.conf' 6 | src: 'etc/yp.conf.j2' 7 | mode: 0400 8 | notify: 9 | - restart ypbind 10 | 11 | 12 | - name: Ensure NIS/YP is used as a name service 13 | lineinfile: 14 | dest: '/etc/nsswitch.conf' 15 | regexp: '(?x) ^{{item}}: \s* (?P
(\w+\s+)*) files (?!\s+ nis) (?P.*)$'
16 |     line: '{{item}}: \g
files nis\g'
17 |     state: present
18 |     backrefs: yes
19 |     create: no
20 |   with_items:
21 |     - passwd
22 |     - group
23 |     - shadow
24 | 
25 | 
26 | - name: Replace `compat` in `/etc/nsswitch.conf` with `files nis`
27 |   replace:
28 |     dest: '/etc/nsswitch.conf'
29 |     regexp: '(?x) ^{{item}}: \s* (?P
(\w+\s+)*) compat (?P.*)$'
30 |     replace: '{{item}}: \g
files nis\g'
31 |   with_items:
32 |     - passwd
33 |     - group
34 |     - shadow
35 | 
36 | 
37 | - name: Ensure `ypbind` starts at boot
38 |   service:
39 |     name: '{{item}}'
40 |     state: started
41 |     enabled: yes
42 |   with_items: '{{nis_client_services}}'
43 | 
44 | 
45 | - name: Ensure `ypbind` is restarted if config files changed
46 |   meta: flush_handlers
47 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/pbs+maui.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | 
 3 | - name: PBS masternode Playbook
 4 |   hosts: pbs_master
 5 |   roles:
 6 |     - role: 'nis'
 7 |       NIS_MASTER: '{{groups.pbs_master[0]}}'
 8 |     - role: nfs-server
 9 |       NFS_EXPORTS:
10 |       - path: '/home'
11 |         clients: '{{groups.pbs_clients}}'
12 |         options: 'rw,no_root_squash,async'
13 |   tasks: 
14 |     - include: pbs+maui/tasks/master.yml
15 |     - include: cluster/tasks/packages.yml
16 |   handlers:
17 |     - include: common/handlers/main.yml
18 |     - include: pbs+maui/handlers/main.yml
19 | 
20 | - name: MAUI masternode Playbook
21 |   hosts: maui_master
22 |   tasks: 
23 |     - include: pbs+maui/tasks/maui.yml
24 |   handlers:
25 |     - include: pbs+maui/handlers/main.yml
26 | 
27 | - name: PBS worker nodes Playbook
28 |   hosts: pbs_clients
29 |   roles:
30 |     - role: 'nis'
31 |       NIS_MASTER: '{{groups.pbs_master[0]}}'
32 |     - role: nfs-client
33 |       NFS_MOUNTS:
34 |       - fs: '{{groups.pbs_master[0]}}:/home'
35 |         mountpoint: '/home'
36 |         options: 'rw,async'
37 |   tasks: 
38 |     - include: pbs+maui/tasks/clients.yml
39 |     - include: cluster/tasks/packages.yml
40 |   handlers:
41 |     - include: pbs+maui/handlers/main.yml
42 | 
43 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/htcondor.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | - name: HTCondor common Playbook
 3 |   hosts: condor_master:condor_workers
 4 |   tasks:
 5 |     - include: cluster/tasks/packages.yml
 6 |     - include: htcondor/tasks/common.yml
 7 | 
 8 |   handlers:
 9 |     - include: common/handlers/main.yml
10 |     - include: htcondor/handlers/common.yml
11 | 
12 | - name: HTCondor master Playbook
13 |   hosts: condor_master
14 |   roles:
15 |     - role: 'nis'
16 |       NIS_MASTER: '{{groups.condor_master[0]}}'
17 |     - role: nfs-server
18 |       NFS_EXPORTS:
19 |         path: '/home'
20 |         clients: '{{group.condor_workers}}'
21 |         options: 'rw,no_root_squash,async'
22 |   handlers:
23 |     - include: common/handlers/main.yml
24 |     - include: htcondor/handlers/common.yml
25 | 
26 | 
27 | - name: HTCondor worker nodes Playbook
28 |   hosts: condor_workers
29 |   roles:
30 |     - role: 'nis'
31 |       NIS_MASTER: '{{groups.condor_master[0]}}'
32 |     - role: nfs-client
33 |       NFS_MOUNTS:
34 |         fs: '{{groups.condor_master[0]}}:/home'
35 |         mountpoint: '/home'
36 |   handlers:
37 |     - include: common/handlers/main.yml
38 |     - include: htcondor/handlers/common.yml
39 | 
40 | # Get infos with
41 | # ansible -i hostsfile -m setup hostname
42 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/slurm-master/tasks/main.yml:
--------------------------------------------------------------------------------
 1 | # slurm-master/tasks/main.yml
 2 | ---
 3 | 
 4 | - name: Load distribution-specific parameters
 5 |   include: 'init-{{ansible_os_family}}.yml'
 6 | 
 7 | - include: db.yml
 8 | 
 9 | - include: install-slurmdbd.yml
10 | 
11 | - include: install-slurmctld.yml
12 | 
13 | - name: Create cluster in accounting database
14 |   tags:
15 |     - slurm
16 |   shell: |
17 |     sacctmgr --parsable --noheader list cluster | grep '^elasticluster|' \
18 |       || sacctmgr -i -Q add cluster elasticluster
19 | 
20 | - name: Create an account for default cluster
21 |   tags:
22 |     - slurm
23 |   shell: |
24 |     sacctmgr --immediate --parsable --noheader list account Cluster=elasticluster  | grep '^root|' \
25 |       || sacctmgr -i --quiet add account root Cluster=elasticluster
26 | 
27 | - name: Add default user to cluster
28 |   tags:
29 |     - slurm
30 |   shell: |
31 |     sacctmgr --immediate --parsable --noheader list user Account=root | grep '^{{ansible_ssh_user}}|' \
32 |       || sacctmgr -i --quiet add user '{{ansible_ssh_user}}' DefaultAccount=root
33 | 
34 | 
35 | - name: Ensure `slurmctld` is running
36 |   tags: 
37 |     - slurm
38 |     - slurmctld
39 |   service:
40 |     name='{{item}}'
41 |     enabled=yes
42 |     state=started
43 |   with_items:
44 |     - '{{slurmctld_service_name}}'
45 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/spark-worker/tasks/main.yml:
--------------------------------------------------------------------------------
 1 | # spark-worker/tasks/main.yml
 2 | ---
 3 | 
 4 | # The post-installation script of package `spark-worker` tries to run `service
 5 | # spark-worker start` which is bound to fail since we intend to run Spark on top
 6 | # of YARN and have not configured the standalone worker. So the failure prevents
 7 | # the package from being correctly installed (according to `dpkg`, the
 8 | # "configure" step fails), and everything goes south from there. So just tell
 9 | # systemd to ignore all requests related to `spark-worker` and everybody is
10 | # happy.
11 | - name: Prevent Spark worker startup script from running
12 |   tags:
13 |     - hadoop
14 |     - spark
15 |     - worker
16 |   command: |
17 |     systemctl mask spark-worker
18 |   become: yes
19 |   when: init_is_systemd
20 | 
21 | 
22 | - name: Install Spark packages (worker)
23 |   tags:
24 |     - hadoop
25 |     - spark
26 |     - worker
27 |   package:
28 |     name='{{item}}'
29 |     state=present
30 |   with_items:
31 |     - spark-worker # Server for Spark worker
32 | 
33 | 
34 | - name: Disable and stop Spark worker services
35 |   tags:
36 |     - hadoop
37 |     - spark
38 |     - worker
39 |   service:
40 |     name="{{item}}"
41 |     state=stopped
42 |     enabled=no
43 |   with_items:
44 |     - spark-worker # Server for Spark worker
45 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/glusterfs-server/tasks/main.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | 
 3 | # for once, package names are the same on Debian/Ubuntu and RHEL/CentOS
 4 | - name: Install GlusterFS server packages
 5 |   package:
 6 |     name='{{item}}'
 7 |     state=installed
 8 |   with_items:
 9 |     - glusterfs-server
10 |     - glusterfs-client
11 |   tags:
12 |     - gluster
13 |     - glusterfs-server
14 | 
15 | 
16 | - name: Ensure the GlusterFS service is running (Debian/Ubuntu)
17 |   service:
18 |     name=glusterfs-server
19 |     state=started
20 |   when: 'is_debian_compatible'
21 |   tags:
22 |     - gluster
23 |     - glusterfs-server
24 | 
25 | 
26 | - name: Ensure the GlusterFS service is running (RHEL-compatible)
27 |   service:
28 |     name='{{item}}'
29 |     state=started
30 |   with_items:
31 |     - glusterd
32 |     - glusterfsd
33 |   when: 'is_rhel_compatible'
34 |   tags:
35 |     - gluster
36 |     - glusterfs-server
37 | 
38 | 
39 | - name: Configure peers (only on the first host)
40 |   shell: |
41 |     gluster peer probe '{{item}}'
42 |   with_items: '{{groups.glusterfs_server}}'
43 |   when: is_glusterfs_main_server_node
44 |   tags:
45 |     - gluster
46 |     - glusterfs-server
47 | 
48 | 
49 | # build and export volumes
50 | - include: export.yml
51 |   with_items: '{{GLUSTERFS_VOLUMES}}'
52 |   tags:
53 |     - gluster
54 |     - glusterfs-server
55 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/hive/tasks/main.yml:
--------------------------------------------------------------------------------
 1 | # hive/tasks/main.yml
 2 | ---
 3 | 
 4 | - name: Install Hive packages (client)
 5 |   tags:
 6 |     - hadoop
 7 |     - hive
 8 |   package:
 9 |     name='{{item}}'
10 |     state=present
11 |   with_items:
12 |     - hive
13 |     # Java JDBC support for PostgreSQL
14 |     - libpostgresql-jdbc-java
15 | 
16 | 
17 | - name: Ensure Hive configuration directory exists
18 |   tags:
19 |     - hadoop
20 |     - hive
21 |   file:
22 |     path='{{HIVE_CONF_DIR}}'
23 |     state=directory
24 |     
25 |     
26 | - name: Copy Hive/BigTop default configuration files
27 |   tags:
28 |     - hadoop
29 |     - hive
30 |   command:
31 |     'rsync -ax --update --backup /etc/hive/conf.dist/ {{HIVE_CONF_DIR}}/'
32 | 
33 |   
34 | - name: Deploy Hive/ElastiCluster configuration files
35 |   tags:
36 |     - hadoop
37 |     - hive
38 |   template:
39 |     src='{{item}}.j2'
40 |     dest='{{HIVE_CONF_DIR}}/{{item}}'
41 |   with_items:
42 |     - hive-env.sh
43 |     - hive-site.xml
44 | 
45 | 
46 | - name: Activate Hive/ElastiCluster configuration
47 |   alternatives:
48 |     name='hive-conf'
49 |     link='/etc/hive/conf'
50 |     path='{{HIVE_CONF_DIR}}'
51 | 
52 | 
53 | - name: Make Hive settings available to all Hadoop apps
54 |   file:
55 |     path='{{HADOOP_CONF_DIR}}/hive-site.xml'
56 |     src='{{HIVE_CONF_DIR}}/hive-site.xml'
57 |     state=link
58 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/nis/tasks/main.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | 
 3 | - name: Load distribution-specific parameters
 4 |   include: 'init-{{ansible_os_family}}.yml'
 5 | 
 6 | 
 7 | - name: Pre-load debconf answer to questions (Debian/Ubuntu)
 8 |   debconf:
 9 |     name: nis
10 |     question: nis/domain
11 |     answer: '{{NIS_DOMAIN}}'
12 |     vtype: string
13 |   when: 'is_debian_compatible'
14 | 
15 | 
16 | # since both ypbind *and* ypserv are shipped in the same package, it's the init
17 | # script params `/etc/default/nis` which decide what servers to start
18 | - name: Deploy additional NIS configuration (Debian/Ubuntu)
19 |   template:
20 |     dest: '/{{item}}'
21 |     src: '{{item}}.j2'
22 |     mode: 0444
23 |   with_items:
24 |     - etc/default/nis
25 |     - etc/defaultdomain
26 |   when: 'is_debian_compatible'
27 | 
28 | 
29 | - name: Set NIS domain (CentOS/RHEL)
30 |   lineinfile:
31 |     dest: "/etc/sysconfig/network"
32 |     line: "NISDOMAIN='{{NIS_DOMAIN}}'"
33 |     regexp: "^NISDOMAIN=(?!'{{NIS_DOMAIN}}').*"
34 |     state: present
35 |     create: yes
36 |   when: 'is_rhel_compatible'
37 | 
38 | 
39 | - name: Install NIS common packages
40 |   package:
41 |     name: '{{item}}'
42 |     state: present
43 |   with_items: '{{nis_common_packages}}'
44 | 
45 | 
46 | - include: ypserv.yml
47 |   when: 'is_nis_server'
48 | 
49 | 
50 | - include: ypbind.yml
51 |   when: 'is_nis_client'
52 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/lua/tasks/main.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | 
 3 | #
 4 | # Install Lua plus all needed packages for running Lmod
 5 | #
 6 | 
 7 | # try installing from OS packages first...
 8 | - name: Install Lua from the OS repository (Debian-compatible)
 9 |   apt:
10 |     name='{{ item }}'
11 |     state=present
12 |   ignore_errors: True
13 |   with_items:
14 |     # Lua 5.2 is the only interpreter for which all of the "filesystem",
15 |     # "posix", and "term" libraries have been packaged (as of Ubuntu 16.04)
16 |     - 'liblua5.2-0={{ LUA_VERSION }}*'
17 |     - 'liblua5.2-dev={{ LUA_VERSION }}*'
18 |     # these Lua libraries may support multiple Lua minor versions
19 |     # in a single package
20 |     - lua-filesystem
21 |     - lua-posix
22 |     - lua-term
23 |   register: lua_pkg_install_deb
24 |   when: '{{ is_debian_compatible }}'
25 | 
26 | - name: Install Lua from the OS repository (RHEL-compatible)
27 |   yum:
28 |     name='{{ item }}'
29 |     state=present
30 |   with_items:
31 |     - 'lua-{{ LUA_VERSION }}*'
32 |     - lua-filesystem
33 |     - lua-posix
34 |     - lua-term
35 |   ignore_errors: True
36 |   register: lua_pkg_install_rpm
37 |   when: '{{ is_rhel_compatible }}'
38 | 
39 | 
40 | # ...if installing from OS packages failed, download and build sources
41 | - name: Build Lua from source
42 |   include: build.yml
43 |   when: '{{ lua_pkg_install_deb|failed or lua_pkg_install_rpm|failed }}'
44 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/nfs-server/tasks/main.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | #
 3 | # Install an NFSv4 server
 4 | #
 5 | 
 6 | - name: Load distribution-specific parameters
 7 |   include: 'init-{{ansible_os_family}}.yml'
 8 |   tags:
 9 |     - nfs
10 |     - nfs-server
11 | 
12 | - name: install NFS server software
13 |   tags:
14 |     - nfs
15 |     - nfs-server
16 |   package:
17 |     pkg='{{item}}'
18 |     state=present
19 |   with_items: '{{nfs_server_packages}}'
20 | 
21 | 
22 | # exports need to be there before attempting to install/start the
23 | # server, otherwise the service startup script might refuse to start
24 | # the NFS server
25 | - name: Export directories
26 |   tags:
27 |     - nfs
28 |     - nfs-server
29 |   nfsexport:
30 |     path='{{item.path}}'
31 |     clients='{{item.clients}}'
32 |     options='{{item.options|default("rw,no_root_squash,async,no_subtree_check,crossmnt")}}'
33 |     dest='/etc/exports'
34 |     state=exported
35 |   with_items: '{{NFS_EXPORTS}}'
36 | 
37 | 
38 | # RHEL does not (auto)start needed services after installation
39 | - name: ensure NFS server is running
40 |   tags:
41 |     - nfs
42 |     - nfs-server
43 |   service:
44 |     name={{item}}
45 |     enabled=yes
46 |     state='{{_nfs_server_started_state}}'
47 |   with_items: '{{nfs_server_services}}'
48 | 
49 | 
50 | - name: Reload NFS exports file
51 |   tags:
52 |     - nfs
53 |     - nfs-server
54 |   shell:
55 |     exportfs -r
56 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/common/tasks/ssh_host_based_authentication.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | 
 3 | - name: Setup SSH known hosts file
 4 |   template:
 5 |     dest=/etc/ssh/ssh_known_hosts
 6 |     src=roles/common/templates/etc/ssh/ssh_known_hosts.j2
 7 |     owner=root
 8 |     group=root
 9 |     mode=0644
10 | 
11 |     
12 | - name: Setup /etc/ssh/shosts.equiv file
13 |   template:
14 |     dest=/etc/ssh/shosts.equiv
15 |     src=roles/common/templates/etc/ssh/shosts.equiv.j2
16 |     owner=root
17 |     group=root
18 | 
19 | 
20 | - name: Setup /root/.shosts file
21 |   template:
22 |     dest=/root/.shosts
23 |     src=roles/common/templates/etc/ssh/shosts.equiv.j2
24 |     owner=root
25 |     group=root
26 | 
27 |     
28 | - name: Setup SSH host based authentication (server configuration file)
29 |   lineinfile:
30 |     dest=/etc/ssh/sshd_config
31 |     regexp='{{item.key}}.*'
32 |     line="{{item.key}} {{item.value}}"
33 |   with_items:
34 |     - { key: "HostbasedAuthentication", value: 'yes' }
35 |     - { key: "IgnoreRhosts",            value: 'no'  }
36 |   notify: restart sshd
37 | 
38 |   
39 | - name: Setup SSH host based authentication (server configuration file)
40 |   lineinfile:
41 |     dest=/etc/ssh/ssh_config
42 |     regexp='{{item.key}}.*'
43 |     line="{{item.key}} {{item.value}}"
44 |   with_items:
45 |     - { key: 'HostbasedAuthentication', value: 'yes' }
46 |     - { key: 'EnableSSHKeysign',        value: 'yes' }
47 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/common/tasks/software-RedHat.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | #
 3 | # Install all "standard" software that could be needed for installing
 4 | # other software, and for effectively managing a cluster.  Basically
 5 | # this includes the following categories of utilities:
 6 | #
 7 | # - compression utilities and archivers
 8 | # - version control systems
 9 | # - standard UNIX build utilities like C compiler, make, etc.
10 | # - a few other misc utilities that one can reasonably expect
11 | #
12 | 
13 | - name: Install commonly needed software (RHEL-family)
14 |   package:
15 |     name={{item}}
16 |     state=present
17 |   with_items:
18 |     # compression and archivers
19 |     - bzip2
20 |     - cpio
21 |     - gzip
22 |     - lzip
23 |     - p7zip
24 |     - tar
25 |     - unzip
26 |     - xz
27 |     - zip
28 |     # version control systems
29 |     - git
30 |     - mercurial
31 |     - subversion
32 |     # basic build environment
33 |     - gcc
34 |     - gcc-c++
35 |     - gcc-gfortran
36 |     - glibc-devel
37 |     - make
38 |     # other "standard" utilities
39 |     - kexec-tools
40 |     - m4
41 |     - moreutils
42 |     - rsync
43 |     - screen
44 |     - tmux
45 |     - vim
46 | 
47 | 
48 | - name: Install commonly needed software (RHEL7-family)
49 |   package:
50 |     name={{item}}
51 |     state=present
52 |   with_items:
53 |     # other "standard" utilities
54 |     - moreutils
55 |   when: 'is_rhel7_compatible'
56 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/anaconda/library/ansible-conda/LICENSE.txt:
--------------------------------------------------------------------------------
 1 | Copyright (c) 2016, UrbanSim Inc.
 2 | All rights reserved.
 3 | 
 4 | Redistribution and use in source and binary forms, with or without
 5 | modification, are permitted provided that the following conditions are met:
 6 | 
 7 | * Redistributions of source code must retain the above copyright notice, this
 8 |   list of conditions and the following disclaimer.
 9 | 
10 | * Redistributions in binary form must reproduce the above copyright notice,
11 |   this list of conditions and the following disclaimer in the documentation
12 |   and/or other materials provided with the distribution.
13 | 
14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/nfs-client/tasks/main.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | 
 3 | - name: install NFS client software (Debian/Ubuntu)
 4 |   tags:
 5 |     - nfs
 6 |     - nfs-client
 7 |   package:
 8 |     pkg='{{item}}'
 9 |     state=present
10 |   with_items:
11 |     - nfs-common
12 |   when: is_debian_compatible
13 | 
14 | - name: install NFS client software (RHEL-compatible)
15 |   tags:
16 |     - nfs
17 |     - nfs-client
18 |   package:
19 |     pkg='{{item}}'
20 |     state=present
21 |   with_items:
22 |     - nfs-utils
23 |   when: is_rhel_compatible
24 | 
25 | 
26 | - name: Ensure `rpcbind` is running (Debian)
27 |   service:
28 |     name=rpcbind
29 |     enabled=yes
30 |     state=started
31 |   when: is_debian
32 | 
33 | - name: Ensure `rpcbind` is running (RHEL-compatible)
34 |   service:
35 |     name=rpcbind
36 |     enabled=yes
37 |     state=started
38 |   when: is_rhel_compatible
39 | 
40 | - name: Ensure `portmap` is running (Ubuntu prior to 14.04)
41 |   service:
42 |     name=portmap
43 |     enabled=yes
44 |     state=started
45 |   when: is_ubuntu and not is_ubuntu_14_04_or_later
46 | 
47 | - name: Ensure `rpcbind` is running (Ubuntu 14.04 or newer)
48 |   service:
49 |     name=rpcbind
50 |     enabled=yes
51 |     state=started
52 |   when: is_ubuntu_14_04_or_later
53 | 
54 | 
55 | - name: Mount NFS filesystems
56 |   tags: 
57 |     - nfs
58 |     - nfs-client
59 |   include: nfsmount.yml fs='{{item}}'
60 |   with_items: '{{NFS_MOUNTS}}'
61 |     


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/hive-server/tasks/main.yml:
--------------------------------------------------------------------------------
 1 | # hive-server/tasks/main.yml
 2 | ---
 3 | #
 4 | # Installation and configuration mostly follows these instructions:
 5 | # http://www.cloudera.com/documentation/archive/cdh/4-x/4-2-0/CDH4-Installation-Guide/cdh4ig_topic_18_4.html
 6 | #
 7 | 
 8 | - name: Check if Hive schema has already been loaded
 9 |   tags:
10 |     - hadoop
11 |     - hive
12 |     - hive-server
13 |   command:
14 |     'psql --username={{HIVE_METASTORE_DB_USER}} --dbname={{HIVE_METASTORE_DB_NAME}} -c "\d DBS"'
15 |   register: db_tables_check
16 |   ignore_errors: yes
17 | 
18 |   
19 | - name: Load Hive schema
20 |   tags:
21 |     - hadoop
22 |     - hive
23 |     - hive-server
24 |   shell:
25 |     'psql --username="{{HIVE_METASTORE_DB_USER}}" --dbname="{{HIVE_METASTORE_DB_NAME}}" < /usr/lib/hive/scripts/metastore/upgrade/postgres/hive-schema-{{hive_schema_version}}.postgres.sql'
26 |   when: db_tables_check|failed
27 | 
28 |   
29 | - name: Install Hive packages (services)
30 |   tags:
31 |     - hadoop
32 |     - hive
33 |     - hive-server
34 |   package:
35 |     name='{{item}}'
36 |     state=present
37 |   with_items:
38 |     - hive-metastore
39 |     - hive-server2
40 | 
41 | 
42 | - name: Enable and start Hive services
43 |   tags:
44 |     - hadoop
45 |     - hive
46 |     - hive-server
47 |   service:
48 |     name="{{item}}"
49 |     state=started
50 |     enabled=yes
51 |   with_items:
52 |     - hive-metastore
53 |     - hive-server2
54 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/common/tasks/hostname.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | #
 3 | # Ensure the node host name matches the Ansible inventory name
 4 | #
 5 | 
 6 | # work around for Ansible's lack of support for "Scientific Linux CERN";
 7 | # temporarily set distribution ID to "Scientific Linux"
 8 | - name: Patch `/etc/redhat-release`
 9 |   replace:
10 |         dest='/etc/redhat-release'
11 |         regexp='Scientific Linux CERN SLC'
12 |         replace='Scientific Linux'
13 |   when: 'ansible_distribution == "Scientific"'
14 | 
15 | 
16 | - name: Set host name to Ansible "inventory name"
17 |   hostname: 
18 |     name='{{inventory_hostname}}'
19 | 
20 | 
21 | # undo SLC workaround
22 | - name: Undo patch to `/etc/redhat-release`
23 |   replace:
24 |         dest='/etc/redhat-release'
25 |         regexp='Scientific Linux'
26 |         replace='Scientific Linux CERN SLC'
27 |   when: 'ansible_distribution == "Scientific"'
28 | 
29 | 
30 | - name: Check for cloud-init conf file
31 |   stat:
32 |     path='/etc/cloud/cloud.cfg'
33 |     follow=yes
34 |   register: cloud_cfg
35 |   tags:
36 |     - cloud-init
37 |     - hostname
38 |     - common
39 | 
40 | 
41 | - name: Ensure changes to hostname are not overwritten by cloud-init
42 |   lineinfile:
43 |     name='/etc/cloud/cloud.cfg'
44 |     backup=yes
45 |     regexp='^preserve_hostname:.*'
46 |     line='preserve_hostname{{__colon__}} true'
47 |     insertbefore=BOF
48 |   when: cloud_cfg.stat.exists
49 |   tags:
50 |     - cloud-init
51 |     - hostname
52 |     - common
53 | 
54 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/easybuild/tasks/deps-Debian.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | 
 3 | #
 4 | # Install EasyBuild OS dependencies on Debian/Ubuntu machines
 5 | #
 6 | 
 7 | - name: Install required dependencies of EasyBuild (Debian/Ubuntu)
 8 |   apt:
 9 |     name='{{item}}'
10 |     state=present
11 |   with_items:
12 |     # see: http://easybuild.readthedocs.io/en/latest/Installation.html#dependencies
13 |     #
14 |     # required dependencies
15 |     - python2.7
16 |     - g++
17 |     - gcc
18 |     - gfortran
19 |     - libc6-dev
20 |     - make
21 |     - tcsh
22 |     # required shell tools
23 |     - bzip2
24 |     - gzip
25 |     - tar
26 |     - unzip
27 |     - xz-utils
28 |     - patch
29 |     - mlocate
30 |     # SW needed to build toolchains and common recipes
31 |     - libibverbs-dev
32 |     - python2.7-dev
33 |     - python-pip
34 |     - python-virtualenv
35 |     - libgsl0-dev
36 |     - libcurl4-openssl-dev
37 |     - libxml2-dev
38 |     - libssl-dev
39 | 
40 | 
41 | - name: Install optional dependencies of EasyBuild (Debian/Ubuntu)
42 |   apt:
43 |     name='{{item}}'
44 |     state=present
45 |   with_items:
46 |     - git
47 |     - graphviz
48 |     - mercurial
49 |     - python-pygraph
50 |     - subversion
51 |   ignore_errors: True
52 | 
53 | 
54 | - name: Install "filtered dependency" software (Debian/Ubuntu)
55 |   apt:
56 |     name='{{ item }}'
57 |     state=present
58 |   with_items:
59 |     - libbz2-dev
60 |     - libncurses-dev
61 |     - libreadline-dev
62 |     - m4
63 |   ignore_errors: True
64 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/htcondor/templates/etc/condor/condor_config.local.j2:
--------------------------------------------------------------------------------
 1 | # THIS FILE IS CONTROLLED BY ANSIBLE
 2 | # any local modifications will be overwritten!
 3 | #
 4 | 
 5 | ##  What machine is your central manager?
 6 | 
 7 | CONDOR_HOST = {{ groups.condor_master[0] }}
 8 | 
 9 | ## Pool's short description
10 | 
11 | COLLECTOR_NAME = Personal Condor at {{ groups.condor_master[0] }}
12 | 
13 | ##  When is this machine willing to start a job? 
14 | 
15 | START = TRUE
16 | 
17 | 
18 | ##  When to suspend a job?
19 | 
20 | SUSPEND = FALSE
21 | 
22 | 
23 | ##  When to nicely stop a job?
24 | ##  (as opposed to killing it instantaneously)
25 | 
26 | PREEMPT = FALSE
27 | 
28 | 
29 | ##  When to instantaneously kill a preempting job
30 | ##  (e.g. if a job is in the pre-empting stage for too long)
31 | 
32 | KILL = FALSE
33 | 
34 | ##  This macro determines what daemons the condor_master will start and keep its watchful eyes on.
35 | ##  The list is a comma or space separated list of subsystem names
36 | 
37 | {% if inventory_hostname in groups.condor_master %}
38 | DAEMON_LIST = COLLECTOR, MASTER, NEGOTIATOR, SCHEDD, STARTD
39 | {% else %}
40 | DAEMON_LIST = MASTER, SCHEDD, STARTD
41 | {% endif %}
42 | 
43 | ## For a simple personal condor setup there are issues 
44 | ## with default AUTH.  
45 | 
46 | SEC_CLIENT_AUTHENTICATION_METHODS = CLAIMTOBE
47 | SEC_DEFAULT_AUTHENTICATION_METHODS = CLAIMTOBE
48 | 
49 | ALLOW_WRITE = $(ALLOW_WRITE), {% for host in groups.condor_workers %} {{ host }}, {% endfor %} $(CONDOR_HOST)
50 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/examples/nestedvars.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | #######################
 3 | # Nested VARS example #
 4 | #######################
 5 | #
 6 | # This example is used to explain how you can call nested variables
 7 | # (like, an item of a dictionary) using ansible prior v1.2. It may be
 8 | # a bit tricky because of the way ansible does the expansion of th
 9 | # evariables...
10 | #
11 | # Bascially, to expand `hostvars[item]` where `hostvars` is a
12 | # dictionary-like object and `item` is a variable, you have to do:
13 | #
14 | #    ${hostvars.{$item}}
15 | #
16 | # Note that the expansion of the dictionary is done with `${...}`
17 | # while the expansion of the attribute of the dictionary is done with
18 | # `{$...}`.
19 | #
20 | # The following example will define a dictionary `hosts`:
21 | #
22 | #     hosts = { 'a': {'b': 'c'}}
23 | #
24 | # and try to print `hosts['a']['b']`
25 | #
26 | - hosts: all
27 |   vars:
28 |     hosts:
29 |       a:
30 |         b: c
31 |     x: a
32 |     y: b
33 |   tasks:
34 |     - debug: msg="${hosts.{$x}.{$y}}"
35 | 
36 | # The original use case I used for this was taken from this task, in
37 | # which I wanted to produce an ``/etc/hosts`` file starting from the
38 | # data sotred in the inventory, adding or updating the IP addresses of
39 | # all the known hosts.
40 | #
41 | # - hosts: all
42 | #   connection: local
43 | #   tasks: 
44 | #     - lineinfile: dest=/tmp/hostfile regexp=".* $item .*" line="${hostvars.{$item}.ansible_default_ipv4.address} $item "
45 | #       with_items: $hostvars
46 | 


--------------------------------------------------------------------------------
/elasticluster/__init__.py:
--------------------------------------------------------------------------------
 1 | #! /usr/bin/env python
 2 | #
 3 | #   Copyright (C) 2013, 2015, 2016 S3IT, University of Zurich
 4 | #
 5 | #   This program is free software: you can redistribute it and/or modify
 6 | #   it under the terms of the GNU General Public License as published by
 7 | #   the Free Software Foundation, either version 3 of the License, or
 8 | #   (at your option) any later version.
 9 | #
10 | #   This program is distributed in the hope that it will be useful,
11 | #   but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | #   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 | #   GNU General Public License for more details.
14 | #
15 | #   You should have received a copy of the GNU General Public License
16 | #   along with this program.  If not, see .
17 | #
18 | __author__ = 'Nicolas Baer '
19 | 
20 | 
21 | import logging
22 | 
23 | log = logging.getLogger("gc3.elasticluster")
24 | log.DO_NOT_FORK = False
25 | 
26 | # API
27 | from elasticluster.cluster import Cluster
28 | from elasticluster.repository import AbstractClusterRepository, MultiDiskRepository
29 | from elasticluster.providers import AbstractCloudProvider, AbstractSetupProvider
30 | from elasticluster.providers.ansible_provider import AnsibleSetupProvider
31 | from elasticluster.providers.ec2_boto import BotoCloudProvider
32 | from elasticluster.providers.openstack import OpenStackCloudProvider
33 | from elasticluster.providers.gce import GoogleCloudProvider
34 | from elasticluster.providers.azure_provider import AzureCloudProvider
35 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/easybuild/tasks/deps-RedHat.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | 
 3 | #
 4 | # Install EasyBuild OS dependencies on RHEL-compatible machines
 5 | #
 6 | 
 7 | - name: Install required dependencies of EasyBuild (RHEL-compatible)
 8 |   package:
 9 |     name='{{item}}'
10 |     state=present
11 |   with_items:
12 |     # see: http://easybuild.readthedocs.io/en/latest/Installation.html#dependencies
13 |     #
14 |     # required dependencies
15 |     - python
16 |     - "@Development tools"
17 |     - gcc
18 |     - gcc-c++
19 |     - gcc-gfortran
20 |     - glibc-devel
21 |     - make
22 |     # required shell tools
23 |     - bzip2
24 |     - gzip
25 |     - mlocate
26 |     - patch
27 |     - tar
28 |     - unzip
29 |     - xz
30 |     # SW needed to build toolchains and common recipes
31 |     - gsl-devel
32 |     - libcurl-devel
33 |     - libibverbs-devel
34 |     - libxml2-devel
35 |     - openssl-devel
36 |     - python-devel
37 |     - python-pip
38 |     - python-virtualenv
39 | 
40 | 
41 | - name: Install optional dependencies of EasyBuild (RHEL-compatible)
42 |   package:
43 |     name='{{item}}'
44 |     state=present
45 |   with_items:
46 |     - git
47 |     - graphviz
48 |     - mercurial
49 |     - subversion
50 |   ignore_errors: True
51 | 
52 | 
53 | - name: Install "filtered dependency" software (RHEL-compatible)
54 |   package:
55 |     name='{{ item }}'
56 |     state=present
57 |   with_items:
58 |     - bzip2-devel
59 |     - m4
60 |     - ncurses-devel
61 |     - ncurses-static
62 |     - readline-devel
63 |     - readline-static
64 |   ignore_errors: True
65 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/gridengine.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | 
 3 | - name: GridEngine masternode Playbook
 4 |   hosts: gridengine_master
 5 |   tags:
 6 |     - gridengine
 7 |     - gridengine-master
 8 |   roles:
 9 |     - role: 'nis'
10 |       NIS_MASTER: "{{groups.gridengine_master[0]}}"
11 |     - role: gridengine-master
12 |     - role: nfs-server
13 |       NFS_EXPORTS:
14 |         - path: '/home'
15 |           clients: '{{groups.gridengine_worker}}'
16 |           options: 'rw,no_root_squash,async,no_subtree_check'
17 |         - path: '{{SGE_VAR}}/{{SGE_CELL}}/common'
18 |           clients: '{{groups.gridengine_worker + groups.gridengine_master}}'
19 |           options: 'rw,no_root_squash,no_subtree_check'
20 | 
21 | 
22 | - name: GridEngine worker nodes Playbook
23 |   tags:
24 |     - gridengine
25 |     - gridengine-exec
26 |   hosts: gridengine_clients:gridengine_worker
27 |   roles:
28 |     - role: 'nis'
29 |       NIS_MASTER: "{{groups.gridengine_master[0]}}"
30 |     # we need to explicitly run `gridengine-common` before `nfs-client` to set
31 |     # the `SGE_*` variables; having Ansible run the `gridengine-common` role as
32 |     # a dependenciy for `gridengine-exec` sets it too late and NFS mounts are
33 |     # not set up
34 |     - role: gridengine-common
35 |     - role: nfs-client
36 |       NFS_MOUNTS:
37 |         - fs: '{{groups.gridengine_master[0]}}:/home'
38 |           mountpoint: '/home'
39 |         - fs: '{{groups.gridengine_master[0]}}:{{SGE_VAR}}/{{SGE_CELL}}/common'
40 |           mountpoint: '{{SGE_VAR}}/{{SGE_CELL}}/common'
41 |     - role: gridengine-exec
42 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/easybuild/templates/etc/easybuild.cfg.j2:
--------------------------------------------------------------------------------
 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER
 2 | # local modifications will be overwritten
 3 | # the next time `elasticluster setup` is run!
 4 | #
 5 | 
 6 | 
 7 | [config]
 8 | 
 9 | # module files configuration
10 | modules-tool = Lmod
11 | module-syntax = Lua
12 | 
13 | # misc paths
14 | buildpath = /tmp
15 | installpath = {{EASYBUILD_PREFIX}}
16 | repositorypath = {{EASYBUILD_PREFIX}}/ebfiles
17 | sourcepath = {{EASYBUILD_PREFIX}}/sources
18 | 
19 | # enable dependency resolution; search for `.eb` files here
20 | robot = %(repositorypath)s
21 | robot-paths = %(DEFAULT_ROBOT_PATHS)s
22 | 
23 | # consider these dependencies already satisfied by the operating system
24 | # (this should include only very stable and CPU-independent SW)
25 | #filter-deps=bzip2,fontconfig,gzip,libreadline,libX11,libXext,libXft,libXinerama,libXt,M4,ncurses
26 | 
27 | # build these, but hide module files to avoid cluttering `module avail`
28 | # with seldom-used software
29 | #hide-deps=libjpeg-turbo,libpng,SQLite,xextproto
30 | 
31 | 
32 | [override]
33 | 
34 | # use the minimal toolchain that works
35 | # (including the "dummy" system compiler)
36 | minimal-toolchains = yes
37 | add-dummy-to-minimal-toolchains = yes
38 | 
39 | # allow all users in the `easybuild` group to deploy SW
40 | group-writable-installdir = yes
41 | group = easybuild
42 | set-gid-bit = yes
43 | umask = 002
44 | 
45 | # force "generic" optimizations that will work on any CPU type
46 | # unless users configured otherwise in ElastiCluster
47 | optarch={{ EASYBUILD_OPTARCH|default('GENERIC') }}
48 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/glusterfs-common/tasks/debian.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | 
 3 | - name: Ensure APT support for HTTPS is installed
 4 |   tags:
 5 |     - gluster
 6 |     - glusterfs-common
 7 |   apt:
 8 |     name='apt-transport-https'
 9 |     state=present
10 |   when: is_debian_compatible
11 | 
12 | 
13 | - block:
14 |   - name: Download GlusterFS package signing key
15 |     get_url:
16 |       url='http://download.gluster.org/pub/gluster/glusterfs/{{glusterfs_version}}/LATEST/rsa.pub'
17 |       dest=/root/glusterfs.pub
18 |     become: yes
19 |     become_user: root
20 |   - name: Add GlusterFS package signing key
21 |     command:
22 |       /usr/bin/apt-key add /root/glusterfs.pub
23 |     become: yes
24 |     become_user: root
25 |   - name: add GlusterFS package repository (Debian)
26 |     apt_repository:
27 |       repo='deb http://download.gluster.org/pub/gluster/glusterfs/{{glusterfs_version}}/LATEST/Debian/{{ansible_lsb.codename}}/apt {{ansible_lsb.codename}} main'
28 |       mode=0444
29 |       update_cache=yes
30 |   when: is_debian
31 |   tags:
32 |     - gluster
33 |     - glusterfs-common
34 | 
35 | 
36 | - name: add GlusterFS package repository (Ubuntu)
37 |   apt_repository:
38 |     repo='ppa:gluster/glusterfs-{{glusterfs_version}}'
39 |     mode=0444
40 |     update_cache=yes
41 |   when: is_ubuntu
42 |   tags:
43 |     - gluster
44 |     - glusterfs-common
45 | 
46 | 
47 | - name: Install GlusterFS packages (Debian/Ubuntu)
48 |   apt:
49 |     name='{{item}}'
50 |     state=installed
51 |   with_items:
52 |     - attr
53 |   when: is_debian_or_ubuntu
54 |   tags:
55 |     - gluster
56 |     - glusterfs-common
57 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/hive/templates/hive-site.xml.j2:
--------------------------------------------------------------------------------
 1 | 
 2 | 
 3 | 
 8 | 
 9 | 
10 | 
11 | 
12 | 
13 | 
14 |   
15 |     javax.jdo.option.ConnectionURL
16 |     jdbc:postgresql://{{HIVE_METASTORE_DB_HOST}}/{{HIVE_METASTORE_DB_NAME}}
17 |   
18 | 
19 |   
20 |     javax.jdo.option.ConnectionDriverName
21 |     org.postgresql.Driver
22 |   
23 | 
24 |   
25 |     javax.jdo.option.ConnectionUserName
26 |     {{HIVE_METASTORE_DB_USER}}
27 |   
28 | 
29 |   
30 |     javax.jdo.option.ConnectionPassword
31 |     {{HIVE_METASTORE_DB_PASSWORD}}
32 |   
33 | 
34 |   
35 |     datanucleus.autoStartMechanism
36 |     SchemaTable
37 |   
38 | 
39 |   
40 |     datanucleus.autoCreateSchema
41 |     false
42 |   
43 | 
44 |   
54 | 
55 | 
56 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/jupyter/files/usr/local/share/jupyter/kernels/pyspark2/startup.py:
--------------------------------------------------------------------------------
 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER
 2 | # local modifications will be overwritten
 3 | # the next time `elasticluster setup` is run!
 4 | #
 5 | 
 6 | # Configure the necessary Spark environment
 7 | #
 8 | # Originally from: http://ramhiser.com/2015/02/01/configuring-ipython-notebook-support-for-pyspark/
 9 | #
10 | import os
11 | import sys
12 | 
13 | join = os.path.join
14 | 
15 | if 'SPARK_HOME' not in os.environ:
16 |     os.environ['SPARK_HOME'] = '/usr/lib/spark'
17 | spark_home = os.environ['SPARK_HOME']
18 | 
19 | # by default Spark uses the "local" executor, use YARN instead
20 | os.environ.setdefault('MASTER', 'yarn-client')
21 | os.environ.setdefault('SPARK_JAR', '/usr/lib/spark/lib/spark-assembly.jar')
22 | os.environ.setdefault('SPARK_YARN_APP_JAR', '/dev/null')
23 | 
24 | # make `pyspark` and `py4j` available to this process
25 | spark_home_python = join(spark_home, "python")
26 | sys.path.insert(0, spark_home_python)
27 | sys.path.insert(0, join(spark_home_python, 'pyspark'))
28 | sys.path.insert(0, join(spark_home_python, 'lib/py4j-0.8.2.1-src.zip'))
29 | 
30 | # make `pyspark` and `py4j` available on PYTHONPATH so it's read by children processes
31 | spark_pythonpath = ('{spark_home_python}:{spark_home_python}/pyspark'.format(**locals()))
32 | if 'PYTHONPATH' in os.environ:
33 |     os.environ['PYTHONPATH'] += ':' + spark_pythonpath
34 | else:
35 |     os.environ['PYTHONPATH'] = spark_pythonpath
36 | 
37 | # Initialize PySpark to predefine the SparkContext variable 'sc'
38 | execfile(join(spark_home_python, 'pyspark/shell.py'))
39 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/jupyter/files/usr/local/share/jupyter/kernels/pyspark3/startup.py:
--------------------------------------------------------------------------------
 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER
 2 | # local modifications will be overwritten
 3 | # the next time `elasticluster setup` is run!
 4 | #
 5 | 
 6 | # Configure the necessary Spark environment
 7 | #
 8 | # Originally from: http://ramhiser.com/2015/02/01/configuring-ipython-notebook-support-for-pyspark/
 9 | #
10 | import os
11 | import sys
12 | 
13 | join = os.path.join
14 | 
15 | if 'SPARK_HOME' not in os.environ:
16 |     os.environ['SPARK_HOME'] = '/usr/lib/spark'
17 | spark_home = os.environ['SPARK_HOME']
18 | 
19 | # by default Spark uses the "local" executor, use YARN instead
20 | os.environ.setdefault('MASTER', 'yarn-client')
21 | os.environ.setdefault('SPARK_JAR', '/usr/lib/spark/lib/spark-assembly.jar')
22 | os.environ.setdefault('SPARK_YARN_APP_JAR', '/dev/null')
23 | 
24 | # make `pyspark` and `py4j` available to this process
25 | spark_home_python = join(spark_home, "python")
26 | sys.path.insert(0, spark_home_python)
27 | sys.path.insert(0, join(spark_home_python, 'pyspark'))
28 | sys.path.insert(0, join(spark_home_python, 'lib/py4j-0.8.2.1-src.zip'))
29 | 
30 | # make `pyspark` and `py4j` available on PYTHONPATH so it's read by children processes
31 | spark_pythonpath = ('{spark_home_python}:{spark_home_python}/pyspark'.format(**locals()))
32 | if 'PYTHONPATH' in os.environ:
33 |     os.environ['PYTHONPATH'] += ':' + spark_pythonpath
34 | else:
35 |     os.environ['PYTHONPATH'] = spark_pythonpath
36 | 
37 | # Initialize PySpark to predefine the SparkContext variable 'sc'
38 | execfile(join(spark_home_python, 'pyspark/shell.py'))
39 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/ceph/tasks/osd.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | - name: Check if partition is ceph-active
 3 |   when: ceph_devs.{{devs}}.state != "active"
 4 |   action: shell umount {{devs}}; umount {{osd_disk}}; ceph-disk-prepare --zap-disk {{osd_disk}}
 5 |   tags:
 6 |     - ceph
 7 | 
 8 | - name: create osd directory
 9 |   action: file dest={{osd_data}} state=directory
10 |   tags:
11 |     - ceph
12 | 
13 | - name: Mount osd disk
14 |   action: shell mount | grep {{devs}} || mount {{devs}} {{osd_data}}
15 |   tags:
16 |     - ceph
17 | 
18 | - name: Copying keyring from mon.0
19 |   action: shell scp root@{{groups.ceph_mon[0]}}:/etc/ceph/ceph.mon.keyring /etc/ceph/keyring
20 |           creates=/etc/ceph/keyring
21 |   tags:
22 |     - ceph
23 | 
24 | - name: Copy mon keyring
25 |   action: shell cp -a /etc/ceph/keyring {{osd_data}}/keyring
26 |           creates={{osd_data}}/keyring
27 |   tags:
28 |     - ceph
29 | 
30 | # ceph-disk prepare does not create all the files needed by
31 | # ceph-osd. Specifically, it does not create the whoami file
32 | # (containing the id of the node) and the `current` dir. However, the
33 | # ceph-osd --mkfs command does...
34 | 
35 | - name: 'Create (once more!) the fs on the osd data directory'
36 |   action: shell ceph-osd -i {{ceph_idx}}  -c /etc/ceph/ceph.conf -d --mkfs --mkjournal
37 |           creates={{osd_data}}/current
38 |   tags:
39 |     - ceph
40 | 
41 | - name: Create OSD
42 |   action: shell ceph osd dump | grep ^osd.{{ceph_idx}} || ceph osd create
43 |   tags:
44 |     - ceph
45 | 
46 | - name: Ensure ceph-osd is running
47 |   action: service name=ceph state=started
48 |   tags:
49 |     - ceph
50 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/nis/templates/etc/default/nis.j2:
--------------------------------------------------------------------------------
 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER
 2 | # local modifications will be overwritten
 3 | # the next time `elasticluster setup` is run!
 4 | #
 5 | 
 6 | 
 7 | #
 8 | # /etc/defaults/nis	Configuration settings for the NIS daemons.
 9 | #
10 | 
11 | # Are we a NIS server and if so what kind (values: false, slave, master)?
12 | NISSERVER={% if is_nis_master_server %}master{% elif is_nis_slave_server %}slave{% else %}false{% endif %}
13 | 
14 | 
15 | # Are we a NIS client?
16 | NISCLIENT={% if is_nis_client %}true{% else %}false{% endif %}
17 | 
18 | 
19 | # Location of the master NIS password file (for yppasswdd).
20 | # If you change this make sure it matches with /var/yp/Makefile.
21 | YPPWDDIR=/etc
22 | 
23 | # Do we allow the user to use ypchsh and/or ypchfn ? The YPCHANGEOK
24 | # fields are passed with -e to yppasswdd, see it's manpage.
25 | # Possible values: "chsh", "chfn", "chsh,chfn"
26 | YPCHANGEOK=chsh
27 | 
28 | # NIS master server.  If this is configured on a slave server then ypinit
29 | # will be run each time NIS is started.
30 | NISMASTER='{% if is_nis_slave_server %}{{NIS_MASTER}}{% endif %}'
31 | 
32 | # Additional options to be given to ypbind when it is started.
33 | YPBINDARGS='-p 833 -no-dbus'
34 | 
35 | # Additional options to be given to ypserv when it is started.
36 | YPSERVARGS='-p 834'
37 | 
38 | # Additional options to be given to yppasswdd when it is started.  Note
39 | # that if -p is set then the YPPWDDIR above should be empty.
40 | YPPASSWDDARGS='-p 835'
41 | 
42 | # Additional options to be given to ypxfrd when it is started.
43 | YPXFRDARGS='-p 836'
44 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/hadoop-common/tasks/main.yml:
--------------------------------------------------------------------------------
 1 | # hadoop-common/tasks/main.yml
 2 | ---
 3 | 
 4 | - name: Install Java JRE
 5 |   tags:
 6 |     - hadoop
 7 |   package:
 8 |     name=default-jre-headless
 9 |     state=present
10 | 
11 |     
12 | - name: Install Hadoop packages (common)
13 |   tags:
14 |     - hadoop
15 |   package:
16 |     name='{{item}}'
17 |     state=present
18 |   with_items:
19 |     - hadoop # Hadoop is a software platform for processing vast amounts of data
20 |     - hadoop-client # Hadoop client side dependencies
21 |     - hadoop-doc # Hadoop Documentation
22 |     - hadoop-hdfs-fuse # Mountable HDFS
23 | 
24 | 
25 | - name: Ensure Hadoop configuration directory exists
26 |   tags:
27 |     - hadoop
28 |   file:
29 |     path='{{HADOOP_CONF_DIR}}'
30 |     state=directory
31 |     
32 |     
33 | - name: Copy Hadoop/BigTop default configuration files
34 |   tags:
35 |     - hadoop
36 |   command:
37 |     'rsync -ax --update --backup /etc/hadoop/conf.empty/ {{HADOOP_CONF_DIR}}/'
38 | 
39 |   
40 | - name: Deploy Hadoop/ElastiCluster configuration files
41 |   tags:
42 |     - hadoop
43 |   template:
44 |     src='{{item}}.j2'
45 |     dest='{{HADOOP_CONF_DIR}}/{{item}}'
46 |   with_items:
47 |     - capacity-scheduler.xml
48 |     - core-site.xml
49 |     - hdfs-site.xml
50 |     - mapred-site.xml
51 |     - master
52 |     - slaves
53 |     - yarn-site.xml
54 | 
55 | 
56 | - name: Activate Hadoop/ElastiCluster configuration
57 |   alternatives:
58 |     name='hadoop-conf'
59 |     link='/etc/hadoop/conf'
60 |     path='{{HADOOP_CONF_DIR}}'
61 |   #command:
62 |   #  'update-alternatives --install /etc/hadoop/conf hadoop-conf {{HADOOP_CONF_DIR}} 90'
63 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/examples/drbd.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | ################
 3 | # DRBD example #
 4 | ################
 5 | #
 6 | # This is a module created by us, and present in the `modules`
 7 | # directory. This is just an example on how to use it. However, the
 8 | # module is documented in the comments at the beginning of the file.
 9 | #
10 | # This is a minimal example:
11 | 
12 | - hosts: all
13 |   tasks:
14 |     - action: drbd name=resourcename disk=/dev/drbd1
15 |               device=/dev/sdb
16 |               address0=1.1.1.1:7789 address1=1.1.1.2:7789 
17 | 
18 | # while this will define also the optional variables. Note that the
19 | # two modules produce the same results, considering the value I've put
20 | # in this second example in order to show you the default behavior.
21 | 
22 | - hosts: all
23 |   tasks:
24 |     - action: drbd name=resourcename dest=/etc/drbd.d/resourcename.res 
25 |               disk0=/dev/drdb1 disk1=/dev/drdb1
26 |               device0=/dev/sdb device1=/dev/sdb
27 |               address0=1.1.1.1:7789 address1=1.1.1.2:7789 
28 |               peer0=1.1.1.1:7789 peer1=1.1.1.2:7789 
29 |               metadisk0=internal metadisk1=internal
30 | 
31 | # The output produced should be found in /etc/drbd.d/resousrcename.res
32 | # and should looks like:
33 | #
34 | #    resource resourcename {
35 | #      on 1.1.1.1:7789 {
36 | #        address 1.1.1.1:7789;
37 | #        device  /dev/sdb;
38 | #        disk    /dev/drdb1;
39 | #        meta-disk internal;
40 | #      }
41 | #      on 1.1.1.2:7789 {
42 | #        address 1.1.1.2:7789;
43 | #        device  /dev/sdb;
44 | #        disk    /dev/drdb1;
45 | #        meta-disk internal;
46 | #      }
47 | #    }
48 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/anaconda/library/ansible-conda/README.md:
--------------------------------------------------------------------------------
 1 | # Conda Ansible Module
 2 | 
 3 | Manage [conda][] installations of Python packages in [Ansible][] playbooks.
 4 | Put this module somewhere Ansible will find it
 5 | (like the `library/` directory next to your top level playbooks).
 6 | Usage is much like the built-in Ansible pip module.
 7 | This requires `conda` to already be installed somehow.
 8 | 
 9 | Examples:
10 | 
11 | ```yaml
12 | - name: install numpy via conda
13 |   conda: name=numpy state=latest
14 | 
15 | - name: install scipy 0.14 via conda
16 |   conda: name=scipy version="0.14"
17 | 
18 | - name: remove matplotlib from conda
19 |   conda: name=matplotlib state=absent
20 | ```
21 | 
22 | From `ansible-doc`:
23 | 
24 | ```
25 | > CONDA
26 | 
27 |   Manage Python libraries via conda. Can install, update, and remove
28 |   packages.
29 | 
30 | Options (= is mandatory):
31 | 
32 | - channels
33 |         Extra channels to use when installing packages [Default: None]
34 | 
35 | - executable
36 |         Full path to the conda executable [Default: None]
37 | 
38 | - extra_args
39 |         Extra arguments passed to conda [Default: None]
40 | 
41 | = name
42 |         The name of a Python library to install [Default: None]
43 | 
44 | - state
45 |         State in which to leave the Python package (Choices: present,
46 |         absent, latest) [Default: present]
47 | 
48 | - version
49 |         A specific version of a library to install [Default: None]
50 | 
51 | Notes:  Requires conda to already be installed. Will look under the home
52 |         directory for a conda executable.
53 | ```
54 | 
55 | [conda]: http://conda.pydata.org/
56 | [Ansible]: http://docs.ansible.com/index.html
57 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/ntpd/README.rst:
--------------------------------------------------------------------------------
 1 | NTPd
 2 | ====
 3 | 
 4 | Install the `NTP daemon`_ to keep all hosts in the cluster agreeing on
 5 | timekeeping.
 6 | 
 7 | 
 8 | .. _`ntp daemon`:
 9 | 
10 | 
11 | Requirements
12 | ------------
13 | 
14 | All supported distributions of Linux include NTPd among the provided packages;
15 | no special setup is necessary.
16 | 
17 | 
18 | Role Variables
19 | --------------
20 | 
21 | The following variables may be set to alter the role behavior:
22 | 
23 | ``ntp_servers``
24 |   List of internet hosts to configure a 1-way association with.
25 |   Defaults to 4 randomly-chosen servers in the `pool.ntp.org` domain.
26 | 
27 | ``ntp_peers``
28 |   List of internet hosts to configure a 2-way association with (i.e., both hosts
29 |   can serve time to the other, depending on circumstances). Defaults to the list
30 |   of all hosts in the cluster.
31 | 
32 | 
33 | 
34 | 
35 | Example Playbook
36 | ----------------
37 | 
38 | The following example installs EasyBuild_, configures it to write all software
39 | into directory ``/apps``, and uses it to build the GCC-based "foss/2016b"
40 | toolchain::
41 | 
42 |   - hosts: servers
43 |     roles:
44 |     - role: ntp
45 |       ntp_servers:
46 |       - 'master001'
47 |       ntp_peers:
48 |       - 'worker001'
49 |       - 'worker002'
50 | 
51 | 
52 | License
53 | -------
54 | 
55 | GPLv3
56 | 
57 | 
58 | Author Information and Credits
59 | ------------------------------
60 | 
61 | `Riccardo Murri `_ wrote the role playbook from
62 | scratch, for inclusion in the ElastiCluster_ playbook collection.
63 | 
64 | 
65 | .. References:
66 | 
67 | .. _ElastiCluster: http://elasticluster.readthedocs.io/
68 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/hadoop-common/templates/hdfs-site.xml.j2:
--------------------------------------------------------------------------------
 1 | 
 2 | 
 3 | 
 8 | 
 9 | 
10 | 
11 | 
12 | 
13 |   
14 |     dfs.replication
15 |     1{# FIXME: should be configurable! #}
16 |   
17 | 
18 |   
19 |     dfs.datanode.du.reserved
20 |     1000000000
21 |     
22 |       Reserved space in bytes per volume. Always leave this much space
23 |       free for non-HDFS use.
24 |     
25 |   
26 | 
27 |   
28 |     dfs.permissions
29 |     false
30 |   
31 | 
32 |   
33 |     dfs.namenode.name.dir
34 |     file:////var/lib/hadoop-hdfs/cache/hdfs/dfs/name
35 |     NameNode directory for namespace and transaction logs storage.
36 |   
37 | 
38 |   
39 |     dfs.datanode.data.dir
40 |     file:////var/lib/hadoop-hdfs/cache/hdfs/dfs/data
41 |     DataNode directory
42 |   
43 | 
44 |   
45 |     dfs.datanode.use.datanode.hostname
46 |     false
47 |   
48 | 
49 |   
50 |     dfs.namenode.datanode.registration.ip-hostname-check
51 |     false
52 |   
53 | 
54 | 
55 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/iptables/tasks/init-Debian.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | 
 3 | - name: Load configuration and service names (Debian/Ubuntu)
 4 |   set_fact:
 5 |     # map config file names across distributions
 6 |     configfile:
 7 |       'etc/iptables/rules.v4': '/etc/iptables/rules.v4'
 8 |       'etc/iptables/rules.v6': '/etc/iptables/rules.v6'
 9 |     # same for init service names
10 |     service:
11 |       'iptables':  'netfilter-persistent'
12 |       'ip6tables': 'netfilter-persistent'
13 |     # on Debian 8, issuing `systemctl reload netfilter-persistent`
14 |     # fails with error "Failed to reload netfilter-persistent.service:
15 |     # Job type reload is not applicable for unit
16 |     # netfilter-persistent.service."  However, using `restart` instead
17 |     # of `reload` could break existing connections (not good when
18 |     # resizing), so do nothing and hope for the best...
19 |     reload: 'no-op'
20 |   when: ({{ is_ubuntu_15_10_or_later }} or {{ is_debian_8_or_later }}) 
21 | 
22 | 
23 | - name: Load configuration and service names (older Debian/Ubuntu)
24 |   set_fact:
25 |     # map config file names across distributions
26 |     configfile:
27 |       'etc/iptables/rules.v4': '/etc/iptables/rules.v4'
28 |       'etc/iptables/rules.v6': '/etc/iptables/rules.v6'
29 |     # same for init service names
30 |     service:
31 |       'iptables':  'iptables-persistent'
32 |       'ip6tables': 'iptables-persistent'
33 |     reload: 'reload iptables'
34 |   when: not ({{ is_ubuntu_15_10_or_later }} or {{ is_debian_8_or_later }})
35 | 
36 | 
37 | - name: Load package names (Debian/Ubuntu)
38 |   set_fact:
39 |     packages:
40 |       - iptables
41 |       - iptables-persistent
42 |   when: is_debian_compatible
43 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/gridengine-master/files/all.q.conf:
--------------------------------------------------------------------------------
 1 | qname                 all.q
 2 | hostlist              NONE
 3 | seq_no                0
 4 | load_thresholds       np_load_avg=1.75
 5 | suspend_thresholds    NONE
 6 | nsuspend              1
 7 | suspend_interval      00:05:00
 8 | priority              0
 9 | min_cpu_interval      00:05:00
10 | processors            UNDEFINED
11 | qtype                 BATCH INTERACTIVE
12 | ckpt_list             NONE
13 | pe_list               make
14 | rerun                 FALSE
15 | slots                 1
16 | tmpdir                /tmp
17 | shell                 /bin/csh
18 | prolog                NONE
19 | epilog                NONE
20 | shell_start_mode      posix_compliant
21 | starter_method        NONE
22 | suspend_method        NONE
23 | resume_method         NONE
24 | terminate_method      NONE
25 | notify                00:00:60
26 | owner_list            NONE
27 | user_lists            NONE
28 | xuser_lists           NONE
29 | subordinate_list      NONE
30 | complex_values        NONE
31 | projects              NONE
32 | xprojects             NONE
33 | calendar              NONE
34 | initial_state         default
35 | s_rt                  INFINITY
36 | h_rt                  INFINITY
37 | s_cpu                 INFINITY
38 | h_cpu                 INFINITY
39 | s_fsize               INFINITY
40 | h_fsize               INFINITY
41 | s_data                INFINITY
42 | h_data                INFINITY
43 | s_stack               INFINITY
44 | h_stack               INFINITY
45 | s_core                INFINITY
46 | h_core                INFINITY
47 | s_rss                 INFINITY
48 | h_rss                 INFINITY
49 | s_vmem                INFINITY
50 | h_vmem                INFINITY
51 | 


--------------------------------------------------------------------------------
/update_storage.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/env python
 2 | # -*- coding: utf-8 -*-# 
 3 | # @(#)fix_storage.py
 4 | # 
 5 | # 
 6 | # Copyright (C) 2013 S3IT, University of Zurich. All rights reserved.
 7 | # 
 8 | # 
 9 | # This program is free software; you can redistribute it and/or modify it
10 | # under the terms of the GNU General Public License as published by the
11 | # Free Software Foundation; either version 2 of the License, or (at your
12 | # option) any later version.
13 | #
14 | # This program is distributed in the hope that it will be useful, but
15 | # WITHOUT ANY WARRANTY; without even the implied warranty of
16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17 | # General Public License for more details.
18 | #
19 | # You should have received a copy of the GNU General Public License along
20 | # with this program; if not, write to the Free Software Foundation, Inc.,
21 | # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 | 
23 | __docformat__ = 'reStructuredText'
24 | 
25 | import json
26 | import os
27 | 
28 | storagedir = os.path.expanduser('~/.elasticluster/storage')
29 | 
30 | def fix_storage_file(path):
31 |     data = json.load(open(path, 'r'))
32 |     if 'nodes' in data:
33 |         print "Storage file already fixed"
34 |         return None
35 | 
36 |     for d in data['frontend']:
37 |         d['type'] = 'frontend'
38 |     for d in data['compute']:
39 |         d['type'] = 'compute'
40 |     data['nodes'] = data['frontend'] + data['compute']
41 |     del data['frontend']
42 |     del data['compute']
43 |     open(path, 'w').write(json.dumps(data))
44 |     
45 | 
46 | if __name__ == "__main__":
47 |     for fname in os.listdir(storagedir):
48 |         fix_storage_file(os.path.join(storagedir, fname))
49 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/ceph/templates/etc/ceph/ceph.conf.j2:
--------------------------------------------------------------------------------
 1 | # THIS FILE IS CONTROLLED BY ANSIBLE
 2 | # any local modifications will be overwritten!
 3 | #
 4 | 
 5 | [global]
 6 |     auth cluster required = cephx
 7 |     auth service required = cephx
 8 |     auth client required = cephx
 9 | 
10 |     fsid = {{fsid}}
11 | 
12 |     # mon initial members = {% for host in groups['ceph_mon']|sort %}{{ host }}{% if not loop.last %},{% endif %}{% endfor %}
13 | 
14 |     # debug ms = 1
15 | 
16 | {% for host in groups['ceph_mon']|sort %}
17 | [mon.{{ loop.index0 }}]
18 |     # debug mon = 20
19 |     # debug paxos = 20
20 |     # debug auth = 20
21 | 
22 |     host = {{ host }}
23 |     mon addr = {{ hostvars[host].ansible_default_ipv4.address }}:6789
24 |     mon data = /var/lib/ceph/mon/ceph-{{ loop.index0 }}
25 | {% endfor %}
26 | 
27 | {% for host in groups['ceph_osd']|sort %}
28 | [osd.{{ loop.index0 }}]
29 |     host = {{ host }}
30 | 
31 |     osd journal size = 1000
32 | 
33 |     osd mkfs type = xfs
34 |     devs = {{osd_devs}}
35 |     osd addr = {{ hostvars[host].ansible_default_ipv4.address }}:6789
36 |     osd data = /var/lib/ceph/osd/ceph-{{ loop.index0 }}
37 | 
38 | {% endfor %}
39 | 
40 | {% for host in groups['ceph_mds']|sort %}
41 | [mds.{{ loop.index0 }}]
42 |     host = {{ host }}
43 |     mds addr = {{ hostvars[host].ansible_default_ipv4.address }}:6789
44 |     mds data = /var/lib/ceph/mds/ceph-{{ loop.index0 }}
45 | {% endfor %}
46 | 
47 | {% if 'ceph_rgw' in groups %}
48 | [client.radosgw.gateway]
49 |     host = {{ groups['ceph_rgw'] }}
50 |     keyring = /etc/ceph/keyring
51 |     rgw socket path = /tmp/radosgw.sock
52 |     log file = /var/log/ceph/radosgw.log
53 |     rgw print continue = false
54 | {% endif %}
55 | 
56 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/htcondor/tasks/common.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | # Unfortunately, apt_repository wants to add also source repositories,
 3 | # which are not available for htcontor, so we have to manually create
 4 | # the repository file.
 5 | 
 6 | - name: add HTCondor public package repositories
 7 |   action: template 
 8 |           src=htcondor/templates/etc/apt/sources.list.d/htcondor.list.j2
 9 |           dest=/etc/apt/sources.list.d/htcondor.list
10 |           owner=root group=root mode=0644
11 |   when: is_debian_or_ubuntu
12 |   register: repository_updated
13 |   tags:
14 |     - htcondor
15 | 
16 | - name: update package cache
17 |   apt:
18 |     update_cache=yes
19 |     cache_valid_time=0
20 |   when: is_debian_or_ubuntu and repository_updated.changed
21 |   tags:
22 |     - htcondor
23 | 
24 | - name: prepare HTCondor DebConf template
25 |   action: template
26 |           src=htcondor/templates/htcondor.debconf.j2
27 |           dest=/tmp/htcondor.debconf
28 |           owner=root group=root mode=0644
29 |   tags: 
30 |     - htcondor
31 |   when: is_debian_or_ubuntu
32 | 
33 | - name: install HTCondor w/ preconfigured template
34 |   apt:
35 |     name=condor
36 |     state=present
37 |     force=yes
38 |   environment:
39 |     DEBCONF_DB_FALLBACK: 'File{/tmp/htcondor.debconf}'
40 |   tags: 
41 |     - htcondor
42 |   when: is_debian_or_ubuntu
43 | 
44 | - name: Fix configuration file
45 |   action: template
46 |           src=htcondor/templates/etc/condor/condor_config.local.j2
47 |           dest=/etc/condor/condor_config.local
48 |           owner=nobody group=nogroup mode=0644
49 |   tags: 
50 |     - htcondor
51 | 
52 | - name: start HTCondor daemon
53 |   action: service name=condor state=started
54 |   tags: 
55 |     - htcondor
56 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/postgresql/templates/pg_hba.conf.j2:
--------------------------------------------------------------------------------
 1 | # PostgreSQL Client Authentication Configuration File
 2 | # ===================================================
 3 | #
 4 | # Refer to the "Client Authentication" section in the PostgreSQL
 5 | # documentation for a complete description of this file.  A short
 6 | # synopsis follows.
 7 | #
 8 | # This file controls: which hosts are allowed to connect, how clients
 9 | # are authenticated, which PostgreSQL user names they can use, which
10 | # databases they can access.  Records take one of these forms:
11 | #
12 | # local      DATABASE  USER  METHOD  [OPTIONS]
13 | # host       DATABASE  USER  ADDRESS  METHOD  [OPTIONS]
14 | # hostssl    DATABASE  USER  ADDRESS  METHOD  [OPTIONS]
15 | # hostnossl  DATABASE  USER  ADDRESS  METHOD  [OPTIONS]
16 | #
17 | # TYPE  DATABASE        USER            ADDRESS                 METHOD
18 | 
19 | # Default:
20 | {% for connection in postgresql_pg_hba_default %}
21 | {% if connection.comment is defined %}
22 | # {{connection.comment}}
23 | {% endif %}
24 | {{connection.type}}  {{connection.database}}  {{connection.user}}  {{connection.address}}  {{connection.method}}
25 | {% endfor %}
26 | 
27 | # Password hosts
28 | {% for host in postgresql_pg_hba_passwd_hosts %}
29 | host  all  all  {{host}}  password
30 | {% endfor %}
31 | 
32 | # Trusted hosts
33 | {% for host in postgresql_pg_hba_trust_hosts %}
34 | host  all  all  {{host}}  trust
35 | {% endfor %}
36 | 
37 | # User custom
38 | {% for connection in postgresql_pg_hba_custom %}
39 | {% if connection.comment is defined %}
40 | # {{connection.comment}}
41 | {% endif %}
42 | {{connection.type}}  {{connection.database}}  {{connection.user}}  {{connection.address}}  {{connection.method}}
43 | {% endfor %}
44 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/jupyter/tasks/python.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | 
 3 | - name: Check if the Python executable {{python_exe}} is installed
 4 |   tags:
 5 |     - jupyter
 6 |     - kernels
 7 |     - python
 8 |   stat:
 9 |     path='{{python_exe}}'
10 |   register: python_exe_path
11 | 
12 |   
13 | - name: Determine version of Python interpreter {{python_exe}}
14 |   tags:
15 |     - jupyter
16 |     - kernels
17 |     - python
18 |   command:
19 |     '{{python_exe}} -c "import sys; sys.stdout.write(str(sys.version_info.major))"'
20 |   register: python_version
21 |   when: python_exe_path.stat.exists
22 |   
23 |   
24 | - name: Install Python{{python_version.stdout}} Jupyter kernel
25 |   tags:
26 |     - jupyter
27 |     - kernels
28 |     - python
29 |   command:
30 |     '{{python_exe}} -m ipykernel install'
31 |   become: yes
32 |   when: python_exe_path.stat.exists
33 | 
34 | 
35 | - name: Load additional package names
36 |   set_fact:
37 |     jupyter_additional_os_packages:
38 |       Debian:
39 |         # PostGreSQL
40 |         - libpq-dev
41 |         # MySQL compatible
42 |         - libmysqlclient-dev
43 |       RedHat:
44 |         - postgresql-devel
45 |     jupyter_additional_python_packages:
46 |       - ipython-sql
47 |       - psycopg2
48 |       - mysqlclient
49 | 
50 | 
51 | - name: Install additional %-magic modules and plugins (OS dependencies)
52 |   package:
53 |     name: '{{item}}'
54 |     state: present
55 |   with_items: '{{jupyter_additional_os_packages[ansible_os_family]}}'
56 | 
57 | 
58 | - name: Install additional %-magic modules and plugins (Python package)
59 |   pip:
60 |     name: '{{item}}'
61 |     state: present
62 |     executable: '{{python_exe|dirname}}/pip'
63 |   with_items: '{{jupyter_additional_python_packages}}'
64 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/slurm.yml:
--------------------------------------------------------------------------------
 1 | ---
 2 | 
 3 | - name: Slurm master Playbook
 4 |   hosts: slurm_master
 5 |   roles:
 6 |     - role: 'nis'
 7 |       NIS_MASTER: "{{groups.slurm_master[0]}}"
 8 |     - role: 'nfs-server'
 9 |       NFS_EXPORTS:
10 |         - path: '/home'
11 |           clients: "{{groups.slurm_worker + groups.slurm_submit|default([])}}"
12 |     - slurm-master
13 | 
14 |       
15 | - name: Slurm worker nodes Playbook
16 |   hosts: slurm_worker
17 |   roles:
18 |     - role: 'nis'
19 |       NIS_MASTER: "{{groups.slurm_master[0]}}"
20 |     - role: 'nfs-client'
21 |       NFS_MOUNTS:
22 |         - fs: '{{groups.slurm_master[0]}}:/home'
23 |           mountpoint: '/home'
24 |     - slurm-worker
25 | 
26 | 
27 | - name: Slurm submit nodes Playbook
28 |   hosts: slurm_submit:slurm_client
29 |   roles:
30 |     - role: 'nis'
31 |       NIS_MASTER: "{{groups.slurm_master[0]}}"
32 |     - role: 'nfs-client'
33 |       NFS_MOUNTS:
34 |         - fs: '{{groups.slurm_master[0]}}:/home'
35 |           mountpoint: '/home'
36 |     - slurm-client
37 | 
38 |     
39 | - name: Restart SLURMd after all config is done
40 |   hosts: slurm_worker
41 |   tasks:
42 |     - service:
43 |         name=slurmd
44 |         state=restarted
45 |       when: '{{is_debian_compatible}} and ({{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}})'
46 |     - service:
47 |         name=slurm-llnl
48 |         state=restarted
49 |       when: '{{is_debian_compatible}} and (not ({{is_debian_8_or_later}} or {{is_ubuntu_15_10_or_later}}))'
50 |     - service:
51 |         name=slurmd
52 |         state=restarted
53 |       when: '{{is_rhel7_compatible}}'
54 |     - service:
55 |         name=slurm
56 |         state=restarted
57 |       when: '{{is_rhel6_compatible}}'
58 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/pvfs2/files/pvfs2.debian.init:
--------------------------------------------------------------------------------
 1 | #!/bin/sh
 2 | #
 3 | # description: pvfs2-server is the server component of PVFS2
 4 | #
 5 | # chkconfig: 345 35 55
 6 | 
 7 | . /lib/lsb/init-functions
 8 | 
 9 | # set these if you want to hard code the location of the config files:
10 | PVFS2_FS_CONF=
11 | # override this if your server binary resides elsewhere
12 | PVFS2SERVER=/usr/local/sbin/pvfs2-server
13 | # override this if you want servers to automatically pick a conf file,
14 | #   but you just need to specify what directory they are in
15 | PVFS2_CONF_PATH=/etc
16 | 
17 | # the server will record its PID in this file
18 | PVFS2_PIDFILE=/var/run/pvfs2.pid
19 | 
20 | # verify presence of server binary
21 | if ! [ -x ${PVFS2SERVER} ]; then
22 | 	echo "Error: could not find executable ${PVFS2SERVER}"
23 | 	exit 1
24 | fi
25 | 
26 | # look for fs conf
27 | if test "x$PVFS2_FS_CONF" = x
28 | then
29 | 	PVFS2_FS_CONF=${PVFS2_CONF_PATH}/pvfs2-fs.conf
30 | fi
31 | if ! [ -r ${PVFS2_FS_CONF} ]; then
32 | 	echo "Error: could not read ${PVFS2_FS_CONF}"
33 | 	exit 1
34 | fi
35 | 
36 | # See how we were called.
37 | case "$1" in
38 |   start)
39 | 	echo -n "Starting PVFS2 server: "
40 | 	start-stop-daemon --start --exec ${PVFS2SERVER} -- -p ${PVFS2_PIDFILE} ${PVFS2_FS_CONF}
41 | 	echo
42 | 	;;
43 |   stop)
44 | 	echo -n "Stopping PVFS2 server: "
45 | 	echo
46 | 	start-stop-daemon --stop --oknodo  --pidfile ${PVFS2_PIDFILE}
47 | 	;;
48 |   status)
49 |         status_of_proc -p ${PVFS2_PIDFILE} ${PVFS2SERVER} pvfs2-server && exit 0 || exit $?
50 | 	;;
51 |   restart)
52 | 	$0 stop
53 | 	# give server time to die cleanly
54 | 	sleep 2
55 | 	$0 start
56 | 	;;
57 |   *)
58 | 	echo "Usage: $0 {start|stop|status|restart}"
59 | 	exit 1
60 | esac
61 | 
62 | exit 0
63 | 
64 | 


--------------------------------------------------------------------------------
/elasticluster/share/playbooks/roles/iptables/templates/etc/iptables/rules.v4.j2:
--------------------------------------------------------------------------------
 1 | # THIS FILE IS CONTROLLED BY ELASTICLUSTER
 2 | # local modifications will be overwritten
 3 | # the next time `elasticluster setup` is run!
 4 | #
 5 | 
 6 | # This file defines iptable netfilter rules, 
 7 | # to be loaded with `iptables-restore < $this_file`
 8 | 
 9 | *filter
10 | 
11 | ## create queues and set default policy
12 | :INPUT {{default_input_policy}} [0:0]
13 | :OUTPUT ACCEPT [0:0]
14 | :FORWARD {{default_forward_policy}} [0:0]
15 | 
16 | 
17 | ## OUTPUT queue -- outbound traffic
18 | 
19 | # allow all outbound traffic by default policy, no rules to add
20 | 
21 | 
22 | ## FORWARD queue -- only relevant if this host is a gateway
23 | 
24 | # only apply default policy, no rules to add
25 | 
26 | 
27 | ## INPUT queue -- inbound traffic
28 | # accept all inbound packets related to existing connections
29 | # (this rule comes first as it's likely the one that is hit most times)
30 | -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
31 | 
32 | # allow all loopback (lo0) traffic
33 | -A INPUT -i lo -j ACCEPT
34 | 
35 | # Allow all incoming connections from ``trusted_hosts``
36 | {% for host in trusted_hosts|default(groups.all)  %}
37 | {%   if 'address' in hostvars[host].ansible_default_ipv4 %}
38 | -A INPUT -s {{ hostvars[host].ansible_default_ipv4.address }} -j ACCEPT
39 | {%   endif %}
40 | {% endfor %}
41 | 
42 | # Allow all incoming ssh connections
43 | -A INPUT -p tcp --dport 22 -j ACCEPT
44 | 
45 | {% if default_input_policy != 'ACCEPT' %}
46 | # reject all other inbound traffic, but limit the number of rejections
47 | # to avoid becoming a DDoS amplifier
48 | -A INPUT -m limit --limit 6/min -j REJECT --reject-with icmp-admin-prohibited
49 | {% endif %}
50 | 
51 | ## all done, enable rules!
52 | COMMIT
53 | 


--------------------------------------------------------------------------------