├── .gitignore ├── README.md ├── Vagrantfile ├── ansible.cfg ├── ansible_modules └── alioss.py ├── chatops-inventory ├── chatops-playbook.yml ├── local-inventory ├── playbook.yml ├── requirements.txt ├── roles ├── ansible-hubot │ ├── .gitignore │ ├── .travis.yml │ ├── CHANGELOG.md │ ├── CONTRIBUTORS.md │ ├── LICENSE │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── examples │ │ ├── README_VAGRANT.md │ │ ├── Vagrantfile │ │ ├── hosts.example │ │ ├── site.yml │ │ └── vagrant_hosts │ ├── files │ │ └── scripts │ │ │ ├── caseofmondays.coffee │ │ │ ├── dogatcomputer.coffee │ │ │ └── ignoreme.coffee │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── molecule.yml │ ├── playbook.yml │ ├── tasks │ │ ├── install_packages_Debian.yml │ │ ├── install_packages_RedHat.yml │ │ └── main.yml │ ├── templates │ │ ├── external-scripts.json.j2 │ │ ├── hubot-scripts.json.j2 │ │ ├── hubot.conf.j2 │ │ ├── hubot.env.j2 │ │ ├── hubot.init.j2 │ │ ├── hubot.service.j2 │ │ └── start-hubot.sh.j2 │ ├── tests │ │ ├── inventory │ │ ├── test.yml │ │ └── test_hubot.py │ └── version.txt ├── ansible-role-firewall │ ├── .travis.yml │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── disable-other-firewalls.yml │ │ └── main.yml │ ├── templates │ │ ├── firewall.bash.j2 │ │ ├── firewall.init.j2 │ │ └── firewall.unit.j2 │ └── tests │ │ └── test.yml ├── ansible-role-gitlab │ ├── .gitignore │ ├── .travis.yml │ ├── LICENSE │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ └── gitlab.rb.j2 │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ ├── Debian.yml │ │ └── RedHat.yml ├── ansible-role-mongodb │ ├── .travis.yml │ ├── LICENSE │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── mongod.conf.j2 │ │ ├── mongod.service │ │ └── upset.js.j2 │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ ├── Debian.yml │ │ ├── RedHat.yml │ │ ├── Ubuntu.yml │ │ └── main.yml ├── ansible-role-nodejs │ ├── .gitignore │ ├── .travis.yml │ ├── LICENSE │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ ├── setup-Debian.yml │ │ └── setup-RedHat.yml │ ├── templates │ │ └── npm.sh.j2 │ └── tests │ │ ├── README.md │ │ ├── test-latest.yml │ │ └── test.yml ├── ansible-role-php │ ├── .gitignore │ ├── .travis.yml │ ├── LICENSE │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── configure-apcu.yml │ │ ├── configure-fpm.yml │ │ ├── configure-opcache.yml │ │ ├── configure.yml │ │ ├── install-from-source.yml │ │ ├── main.yml │ │ ├── setup-Debian.yml │ │ └── setup-RedHat.yml │ ├── templates │ │ ├── apc.ini.j2 │ │ ├── fpm-init.j2 │ │ ├── opcache.ini.j2 │ │ ├── php-fpm.conf.j2 │ │ ├── php.ini.j2 │ │ └── www.conf.j2 │ ├── tests │ │ ├── README.md │ │ ├── requirements.yml │ │ ├── test-source.yml │ │ └── test.yml │ └── vars │ │ ├── Debian.yml │ │ └── RedHat.yml ├── ansible-zabbix-agent │ ├── .gitignore │ ├── .travis.yml │ ├── CHANGELOG.md │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── sample.conf │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── molecule.yml │ ├── playbook.yml │ ├── setup.cfg │ ├── tasks │ │ ├── Debian.yml │ │ ├── RedHat.yml │ │ ├── Suse.yml │ │ ├── main.yml │ │ └── userparameter.yml │ ├── templates │ │ ├── zabbix-agent.service │ │ └── zabbix_agentd.conf.j2 │ ├── tests │ │ ├── inventory │ │ ├── test.yml │ │ └── test_docker.py │ ├── travis.yml │ └── vars │ │ ├── Debian.yml │ │ ├── RedHat.yml │ │ ├── Suse.yml │ │ └── zabbix.yml ├── ansible-zabbix-server │ ├── .gitignore │ ├── .travis.yml │ ├── CHANGELOG.md │ ├── README.md │ ├── ansible.cfg │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── inventory │ ├── meta │ │ └── main.yml │ ├── molecule.yml │ ├── playbook.yml │ ├── requirements.yml │ ├── setup.cfg │ ├── tasks │ │ ├── Debian.yml │ │ ├── RedHat.yml │ │ ├── main.yml │ │ ├── mysql.yml │ │ └── postgresql.yml │ ├── templates │ │ ├── zabbix-server.service │ │ └── zabbix_server.conf.j2 │ ├── tests │ │ ├── inventory │ │ ├── test.yml │ │ └── test_default.py │ └── vars │ │ ├── Debian.yml │ │ ├── RedHat.yml │ │ └── main.yml ├── common │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── Centos-6.repo │ │ ├── Centos-7.repo │ │ ├── hosts │ │ └── sources16.list │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── centos.yml │ │ ├── main.yml │ │ └── ubuntu.yml │ ├── templates │ │ └── yum.conf │ └── vars │ │ └── main.yml ├── hadoop │ ├── .travis.yml │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── authorized_keys │ │ ├── id_rsa │ │ └── id_rsa.pub │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── etc │ │ │ └── hadoop │ │ │ │ ├── capacity-scheduler.xml │ │ │ │ ├── configuration.xsl │ │ │ │ ├── container-executor.cfg │ │ │ │ ├── core-site.xml │ │ │ │ ├── hadoop-env.cmd │ │ │ │ ├── hadoop-env.sh │ │ │ │ ├── hadoop-metrics.properties │ │ │ │ ├── hadoop-metrics2.properties │ │ │ │ ├── hadoop-policy.xml │ │ │ │ ├── hdfs-site.xml │ │ │ │ ├── httpfs-env.sh │ │ │ │ ├── httpfs-log4j.properties │ │ │ │ ├── httpfs-signature.secret │ │ │ │ ├── httpfs-site.xml │ │ │ │ ├── kms-acls.xml │ │ │ │ ├── kms-env.sh │ │ │ │ ├── kms-log4j.properties │ │ │ │ ├── kms-site.xml │ │ │ │ ├── log4j.properties │ │ │ │ ├── mapred-env.cmd │ │ │ │ ├── mapred-env.sh │ │ │ │ ├── mapred-queues.xml.template │ │ │ │ ├── mapred-site.xml │ │ │ │ ├── slaves │ │ │ │ ├── ssl-client.xml.example │ │ │ │ ├── ssl-server.xml.example │ │ │ │ ├── yarn-env.cmd │ │ │ │ ├── yarn-env.sh │ │ │ │ └── yarn-site.xml │ │ ├── hadoop-datanode.service │ │ ├── hadoop-namenode.service │ │ ├── ssh.conf │ │ ├── yarn-nodemanager.service │ │ └── yarn-resourcemanager.service │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── main.yml ├── hbase │ ├── .travis.yml │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── authorized_keys │ │ ├── hadoop-native-64-2.6.0.tar │ │ ├── id_rsa │ │ └── id_rsa.pub │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── conf │ │ │ ├── backup-masters │ │ │ ├── hadoop-metrics2-hbase.properties │ │ │ ├── hbase-env.cmd │ │ │ ├── hbase-env.sh │ │ │ ├── hbase-policy.xml │ │ │ ├── hbase-site.xml │ │ │ ├── log4j.properties │ │ │ └── regionservers │ │ ├── hbase-master.service │ │ ├── hbase-regionserver.service │ │ └── ssh.conf │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── main.yml ├── jdk8 │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── vars │ │ └── main.yml ├── jenkins-agent │ ├── .travis.yml │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ └── swarm-client.service │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── main.yml ├── jenkins-openresty-conf │ ├── .travis.yml │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── logserver.crt │ │ └── logserver.key │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ └── jenkins.conf │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── main.yml ├── jenkins │ ├── .travis.yml │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ ├── plugins.yml │ │ └── settings.yml │ └── templates │ │ ├── basic-security.groovy │ │ └── jenkins.service ├── kafka-ansible-role │ ├── .gitignore │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── build.yml │ │ ├── configure.yml │ │ └── main.yml │ └── templates │ │ ├── kafka.service │ │ ├── log4j.properties.j2 │ │ └── server.properties.j2 ├── mysql-ansible │ ├── .travis.yml │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ └── secure-installation.yml │ ├── templates │ │ ├── my.cnf.j2 │ │ ├── mysql-server.sh │ │ └── mysqld.service │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── main.yml ├── openresty │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── nginx │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── log_cut.sh │ │ ├── nginx.conf │ │ └── nginx.service │ └── vars │ │ ├── main.yml │ │ └── ubuntu16.yml ├── os-init │ ├── .travis.yml │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── main.yml ├── pinpoint-hbase-init │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── hbase-create.hbase │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── main.yml ├── pinpoint-server-ansible │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── install-collector.yml │ │ ├── install-tomcat.yml │ │ ├── install-web.yml │ │ └── main.yml │ ├── templates │ │ ├── hbase.properties │ │ ├── pinpoint-collector-server.xml │ │ ├── pinpoint-collector.properties │ │ ├── pinpoint-collector.service │ │ ├── pinpoint-web-server.xml │ │ ├── pinpoint-web.properties │ │ └── pinpoint-web.service │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── collector.yml ├── redis-ansible │ ├── .travis.yml │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── redis-server.service │ │ └── redis.conf.j2 ├── rocket-chat │ ├── .gitignore │ ├── .travis.yml │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ ├── nginx.yml │ │ ├── repo_RedHat.yml │ │ └── upgrade.yml │ ├── templates │ │ ├── mongod.conf.j2 │ │ ├── nginx.conf.j2 │ │ ├── rocket_chat.conf.j2 │ │ └── rocketchat.service.j2 │ ├── tests │ │ ├── Dockerfile.centos-7 │ │ ├── Dockerfile.debian-8 │ │ ├── Dockerfile.ubuntu-14.04 │ │ ├── Dockerfile.ubuntu-16.04 │ │ ├── Vagrantfile │ │ └── provision.yml │ └── vars │ │ ├── Debian.yml │ │ ├── Debian_8.yml │ │ ├── RedHat.yml │ │ ├── RedHat_7.yml │ │ ├── Ubuntu.yml │ │ ├── Ubuntu_14.yml │ │ └── Ubuntu_16.yml ├── swarm-agent │ ├── .travis.yml │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── swarmclient │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── debian.yml │ │ ├── main.yml │ │ ├── redhat.yml │ │ └── windows.yml │ ├── templates │ │ ├── swarm-client-linux.j2 │ │ ├── swarm-client.service.j2 │ │ ├── swarm-service.xml.j2 │ │ └── swarm-win.ps1.j2 │ └── tests │ │ ├── inventory │ │ └── test.yml ├── tomcat8 │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── conf │ │ │ ├── catalina.policy │ │ │ ├── catalina.properties │ │ │ ├── context.xml │ │ │ ├── logging.properties │ │ │ ├── server.xml │ │ │ ├── tomcat-users.xml │ │ │ ├── tomcat-users.xsd │ │ │ └── web.xml │ │ └── tomcat │ └── vars │ │ └── main.yml ├── zabbix-front │ ├── .travis.yml │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── zabbix-setup.php │ │ └── zabbix_server_nginx.conf │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── main.yml └── zookeeper-ansible-role │ ├── .gitignore │ ├── .travis.yml │ ├── README.md │ ├── defaults │ └── main.yml │ ├── handlers │ └── main.yml │ ├── tasks │ ├── build.yml │ ├── configure.yml │ └── main.yml │ └── templates │ ├── environment.j2 │ ├── log4j.properties.j2 │ ├── myid.j2 │ ├── zoo.cfg.j2 │ └── zookeeper.service.j2 ├── scripts └── zabbix.py └── vars └── app ├── common.yml └── local-env.yml /README.md: -------------------------------------------------------------------------------- 1 | Digital Business Platform 2 | ------------------------- 3 | 4 | ### start up 5 | ``` 6 | sudo pip install virtualenv 7 | virtualenv .pyenv 8 | . .pyenv/bin/activate 9 | pip install -r requirements.txt 10 | ``` 11 | 12 | 13 | ### TODO 14 | * think about how to manage firewall rules for machines 15 | * setup a demo app 16 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | library = ansible_modules/ 3 | host_key_checking = False 4 | remote_tmp = $HOME/.ansible/tmp 5 | -------------------------------------------------------------------------------- /chatops-inventory: -------------------------------------------------------------------------------- 1 | [jenkins] 2 | 192.168.61.11 ip=192.168.61.11 innerip=192.168.61.11 3 | 4 | 5 | [rocketchat-server] 6 | 192.168.61.15 innerip=192.168.61.15 7 | 8 | [mongodb] 9 | 192.168.61.15 innerip=192.168.61.15 10 | 11 | [nginx] 12 | 192.168.61.14 ip=192.168.61.14 innerip=192.168.61.14 13 | 14 | [all:vars] 15 | env="local-env" 16 | ansible_ssh_user="vagrant" 17 | ansible_ssh_pass="vagrant" 18 | -------------------------------------------------------------------------------- /chatops-playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: true 4 | gather_facts: true 5 | vars_files: 6 | - "vars/app/common.yml" 7 | - "vars/app/{{env}}.yml" 8 | roles: 9 | - common 10 | - ansible-role-firewall 11 | - os-init 12 | 13 | - hosts: nginx 14 | become: true 15 | gather_facts: true 16 | vars_files: 17 | - "vars/app/common.yml" 18 | - "vars/app/{{env}}.yml" 19 | roles: 20 | - common 21 | - {"role": openresty } 22 | 23 | 24 | - hosts: mongodb 25 | become: true 26 | gather_facts: true 27 | vars_files: 28 | - "vars/app/common.yml" 29 | - "vars/app/{{env}}.yml" 30 | roles: 31 | - common 32 | - {"role": "ansible-role-firewall"} 33 | - {"role": ansible-role-mongodb } 34 | 35 | 36 | - hosts: rocketchat-server 37 | become: true 38 | gather_facts: true 39 | vars_files: 40 | - "vars/app/common.yml" 41 | - "vars/app/{{env}}.yml" 42 | roles: 43 | - {"role": ansible-role-nodejs } 44 | - {"role": rocket-chat } 45 | - {"role": ansible-hubot} 46 | 47 | - hosts: jenkins 48 | become: true 49 | vars_files: 50 | - "vars/app/common.yml" 51 | - "vars/app/{{env}}.yml" 52 | roles: 53 | - jdk8 54 | - jenkins 55 | - openresty 56 | - jenkins-openresty-conf -------------------------------------------------------------------------------- /local-inventory: -------------------------------------------------------------------------------- 1 | 2 | [nginx] 3 | 192.168.61.14 ip=192.168.61.14 innerip=192.168.61.14 4 | 5 | 6 | [zabbix-server] 7 | 192.168.61.14 ip=192.168.61.14 innerip=192.168.61.14 8 | 9 | [jenkins] 10 | 192.168.61.11 ip=192.168.61.11 innerip=192.168.61.11 11 | 12 | [jenkins-agent] 13 | 192.168.61.12 ip=192.168.61.12 innerip=192.168.61.12 14 | 192.168.61.13 ip=192.168.61.13 innerip=192.168.61.13 15 | 16 | [gitlab] 17 | 192.168.61.12 ip=192.168.61.11 innerip=192.168.61.12 18 | 19 | [zookeepers] 20 | 192.168.61.11 innerip=192.168.61.11 myid=1 zk_install_dir=/app/zk1 21 | 192.168.61.12 innerip=192.168.61.12 myid=2 zk_install_dir=/app/zk2 22 | 192.168.61.13 innerip=192.168.61.13 myid=3 zk_install_dir=/app/zk3 23 | 24 | [hadoop-namenode] 25 | 192.168.61.11 innerip=192.168.61.11 26 | 27 | 28 | [hadoop-datanode] 29 | 192.168.61.12 innerip=192.168.61.12 30 | 192.168.61.13 innerip=192.168.61.13 31 | 32 | [hbase-master] 33 | 192.168.61.11 innerip=192.168.61.11 34 | 35 | [hbase-slaves] 36 | 192.168.61.13 innerip=192.168.61.13 37 | 192.168.61.12 innerip=192.168.61.12 38 | 39 | 40 | [pinpoint-collector] 41 | 192.168.61.13 innerip=192.168.61.13 42 | 43 | [rocketchat-server] 44 | 192.168.61.15 innerip=192.168.61.15 45 | 46 | [mongodb] 47 | 192.168.61.15 innerip=192.168.61.15 48 | 49 | [zabbix-agents] 50 | 192.168.61.15 innerip=192.168.61.15 ip=192.168.61.15 51 | 192.168.61.14 innerip=192.168.61.14 ip=192.168.61.14 52 | 53 | [mysql-server] 54 | 192.168.61.16 innerip=192.168.61.16 ip=192.168.61.16 55 | 56 | [all:vars] 57 | env="local-env" 58 | ansible_ssh_user="vagrant" 59 | ansible_ssh_pass="vagrant" 60 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | zabbix-api == 0.4 2 | -------------------------------------------------------------------------------- /roles/ansible-hubot/.gitignore: -------------------------------------------------------------------------------- 1 | .bundle 2 | .cache 3 | .DS_Store 4 | .molecule 5 | .vagrant 6 | *.retry 7 | cs 8 | hosts 9 | meta/.galaxy_install_info 10 | node_modules 11 | npm-debug.log 12 | yobot.yml 13 | site_lt.yml 14 | -------------------------------------------------------------------------------- /roles/ansible-hubot/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | language: python 3 | python: "2.7" 4 | 5 | # Use the new container infrastructure 6 | sudo: false 7 | 8 | # Install ansible 9 | addons: 10 | apt: 11 | packages: 12 | - python-pip 13 | 14 | install: 15 | # Install ansible 16 | - pip install ansible 17 | 18 | # Check ansible version 19 | - ansible --version 20 | 21 | # Create ansible.cfg with correct roles_path 22 | # - printf '[defaults]\nroles_path=../' > ansible.cfg 23 | 24 | script: 25 | # Basic role syntax check 26 | - ANSIBLE_ROLES_PATH=.. ansible-playbook -i tests/inventory tests/test.yml --syntax-check 27 | 28 | notifications: 29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ 30 | 31 | -------------------------------------------------------------------------------- /roles/ansible-hubot/CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # CONTRIBUTORS 2 | 3 | ``` 4 | ________ _____ _ ____ ______ __ 5 | /_ __/ // / _ | / |/ / //_/ __/ / / 6 | / / / _ / __ |/ / ,< _\ \ /_/ 7 | /_/ /_//_/_/ |_/_/|_/_/|_/___/ (_) 8 | ``` 9 | 10 | Much thanks to these fine folks for contributing to the role: 11 | 12 | * [Mikko Ohtamaa](https://github.com/miohtama) 13 | * [Galaczi Endre Elek](https://github.com/chiller) 14 | * [Joe Stewart](https://github.com/joestewart) 15 | * [Rémy Greinhofer](https://github.com/rgreinho) 16 | * [Craig R Webster](https://github.com/craigw) 17 | * [Emanuelis](https://github.com/emanuelis) 18 | 19 | If you want to contribute, please file an issue or pull request at the 20 | [GitHub project repository](https://github.com/brianshumate/ansible-hubot) 21 | -------------------------------------------------------------------------------- /roles/ansible-hubot/LICENSE: -------------------------------------------------------------------------------- 1 | ansible-hubot 2 | 3 | Copyright 2014 Brian Shumate 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | -------------------------------------------------------------------------------- /roles/ansible-hubot/examples/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # Vagrantfile for bootstrapping a Hubot instance on Mac OS X with Vagrant 5 | # provider and Ansible provisioiner on Ubuntu virtual machine 6 | 7 | VAGRANTFILE_API_VERSION = "2" 8 | BOX_MEM = ENV['BOX_MEM'] || "1536" 9 | BOX_NAME = ENV['BOX_NAME'] || "ubuntu/trusty64" 10 | BOT_HOST = ENV['BOT_HOST'] || "vagrant_hosts" 11 | PLAYBOOK = ENV['PLAYBOOK'] || "site.yml" 12 | 13 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 14 | config.vm.define :hubot do |hubot_config| 15 | hubot_config.vm.box = BOX_NAME 16 | hubot_config.vm.network :private_network, ip: "10.1.1.42" 17 | hubot_config.vm.hostname = "hubot.local" 18 | hubot_config.ssh.forward_agent = true 19 | hubot_config.vm.provider "virtualbox" do |v| 20 | v.name = "hubot" 21 | v.customize ["modifyvm", :id, "--memory", BOX_MEM] 22 | v.customize ["modifyvm", :id, "--ioapic", "on"] 23 | v.customize ["modifyvm", :id, "--cpus", "1"] 24 | v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] 25 | v.customize ["modifyvm", :id, "--natdnsproxy1", "on"] 26 | end 27 | hubot_config.vm.provision :ansible do |ansible| 28 | ansible.inventory_path = BOT_HOST 29 | ansible.playbook = PLAYBOOK 30 | end 31 | end 32 | end 33 | -------------------------------------------------------------------------------- /roles/ansible-hubot/examples/hosts.example: -------------------------------------------------------------------------------- 1 | # 2 | # File: hosts - Inventory file for Hubot role 3 | # 4 | # NB: Change '0.0.0.0' to appropriate hostname or IP address of Hubot server 5 | # Note that the private key reference and localhost entry are required for 6 | # EC2 instance use 7 | 8 | [localhost] 9 | 127.0.0.1 ansible_python_interpreter=~/.virtualenvs/ansible/bin/python 10 | 11 | [hubot] 12 | 0.0.0.0 hubot_admin=ubuntu ansible_ssh_user=ubuntu ansible_ssh_private_key_file=~/.ssh/hubot_id 13 | -------------------------------------------------------------------------------- /roles/ansible-hubot/examples/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: site.yml - Hubot playbook for Vagrant based instance 3 | 4 | - name: Hubot special variables 5 | hosts: hubot 6 | 7 | - name: Hubot activate! 8 | hosts: hubot 9 | gather_facts: True 10 | become: True 11 | become_user: root 12 | roles: 13 | - { role: brianshumate.hubot, 14 | hubot_adapter: slack, 15 | hubot_admin: vagrant, 16 | hubot_owner: "'Stephie Andretti '", 17 | hubot_identity: griptape, 18 | hubot_description: Awesomeness 19 | } 20 | -------------------------------------------------------------------------------- /roles/ansible-hubot/examples/vagrant_hosts: -------------------------------------------------------------------------------- 1 | # 2 | # File: vagrant_hosts - Inventory file for Hubot role on Vagrant VM 3 | 4 | [hubot] 5 | 10.1.1.42 hubot_admin=vagrant ansible_ssh_user=vagrant ansible_ssh_private_key_file=./.vagrant/machines/hubot/virtualbox/private_key 6 | -------------------------------------------------------------------------------- /roles/ansible-hubot/files/scripts/caseofmondays.coffee: -------------------------------------------------------------------------------- 1 | # Description: 2 | # Looks like someone has a case of the Mondays 3 | # 4 | # Dependencies: 5 | # None 6 | # 7 | # Configuration: 8 | # None 9 | # 10 | # Commands: 11 | # hubot case of the mondays 12 | # 13 | # Notes: 14 | # "No. No, man. Shit no, man. I believe you'd get your ass kicked sayin' 15 | # something like that, man." 16 | # 17 | # Author: 18 | # Brian Shumate 19 | 20 | mondays = [ 21 | "http://i.imgur.com/8n4hlQE.jpg" 22 | ] 23 | 24 | module.exports = (robot) -> 25 | robot.hear /case of the mondays/i, (msg)-> 26 | msg.send msg.random mondays 27 | -------------------------------------------------------------------------------- /roles/ansible-hubot/files/scripts/dogatcomputer.coffee: -------------------------------------------------------------------------------- 1 | # Description: 2 | # Displays a "I Have No Idea What I'm Doing" image 3 | # 4 | # Dependencies: 5 | # None 6 | # 7 | # Configuration: 8 | # None 9 | # 10 | # Commands: 11 | # hubot no idea 12 | # 13 | # Notes: 14 | # No idea... 15 | # 16 | # Author: 17 | # Brian Shumate 18 | 19 | noidea = "http://i.imgur.com/hmTeehN.jpg" 20 | 21 | module.exports = (robot) -> 22 | robot.hear /(dunno|I don\'t know|beats me|no idea)/i, (msg)-> 23 | r = Math.random() 24 | if r <= 0.10 25 | msg.send noidea 26 | 27 | robot.respond /dunno|I don\'t know|beats me|no idea/i, (msg) -> 28 | msg.send noidea 29 | -------------------------------------------------------------------------------- /roles/ansible-hubot/files/scripts/ignoreme.coffee: -------------------------------------------------------------------------------- 1 | # Description: 2 | # Displays Venture Bros IGNORE ME! image 3 | # 4 | # Dependencies: 5 | # None 6 | # 7 | # Configuration: 8 | # None 9 | # 10 | # Commands: 11 | # hubot ignore me 12 | # 13 | # Notes: 14 | # None 15 | # 16 | # Author: 17 | # Brian Shumate 18 | 19 | ignore = [ 20 | "http://3.bp.blogspot.com/-DFLMK7ffcJM/Tbjn752gOFI/AAAAAAAAAsk/qa7D5ZdQDgM/s1600/1301856749331.jpg", 21 | "http://venturebrosblog.com/wp-content/uploads/2011/02/venture-bros-ignore-me1.jpg", 22 | "http://memedepot.com/uploads/2000/2159_1272950412470.jpg" 23 | ] 24 | 25 | module.exports = (robot) -> 26 | robot.hear /ignore me/i, (msg)-> 27 | msg.send msg.random ignore 28 | -------------------------------------------------------------------------------- /roles/ansible-hubot/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File handlers/main.yml Handlers for Hubot role 3 | 4 | - name: enable hubot 5 | become: True 6 | become_user: root 7 | service: name=hubot enabled=true 8 | tags: ansible-hubot 9 | 10 | - name: restart hubot 11 | become: True 12 | become_user: root 13 | service: name=hubot state=restarted 14 | tags: ansible-hubot 15 | 16 | - name: start hubot 17 | become: True 18 | become_user: root 19 | service: name=hubot state=started 20 | tags: ansible-hubot 21 | 22 | - name: stop hubot 23 | become: True 24 | become_user: root 25 | service: name=hubot state=stopped 26 | tags: ansible-hubot 27 | -------------------------------------------------------------------------------- /roles/ansible-hubot/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Brian Shumate 4 | description: "Role for Hubot, a delightful chat bot" 5 | company: 6 | license: Apache 2 7 | min_ansible_version: 1.9 8 | platforms: 9 | - name: EL 10 | versions: 11 | - 6 12 | - 7 13 | - name: Ubuntu 14 | versions: 15 | - precise 16 | - quantal 17 | - raring 18 | - saucy 19 | - trusty 20 | galaxy_tags: 21 | - networking 22 | dependencies: [] 23 | -------------------------------------------------------------------------------- /roles/ansible-hubot/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: molecule.yml - Molecule role testing 3 | 4 | ansible: 5 | config_file: .molecule/ansible.cfg 6 | 7 | vagrant: 8 | platforms: 9 | - name: trusty64 10 | box: ubuntu/trusty64 11 | - name: centos7 12 | box: centos/7 13 | 14 | providers: 15 | - name: virtualbox 16 | type: virtualbox 17 | 18 | instances: 19 | - name: brianshumate-hubot-01 20 | -------------------------------------------------------------------------------- /roles/ansible-hubot/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: test_playbook.yml - Test playbook 3 | 4 | - name: Test the hubot role 5 | hosts: all 6 | vars: 7 | # Hubot. 8 | hubot_admin: hubot 9 | hubot_adapter: hipchat 10 | hubot_owner: "'Rémy Greinhofer '" 11 | hubot_description: 'A stunning mermaid bot...Wait, WHAT...' 12 | 13 | hubot_env: 14 | # General Hubot stuff 15 | HUBOT_LOG_LEVEL: "debug" 16 | 17 | hubot_external_scripts: 18 | - hubot-help 19 | - hubot-calculator 20 | 21 | pre_tasks: 22 | # Ensure there is a hubot user. 23 | - user: name="{{ hubot_admin }}" comment="Hubot admin user" 24 | 25 | roles: 26 | - ansible-hubot 27 | -------------------------------------------------------------------------------- /roles/ansible-hubot/tasks/install_packages_Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: tasks/install_packages_Debian.yml - OS packages for Debian 3 | 4 | - name: Install Debian packages 5 | become: True 6 | become_user: root 7 | apt: name={{ item }} state=present update_cache=yes cache_valid_time=86400 8 | with_items: 9 | - build-essential 10 | - curl 11 | - expat 12 | - git 13 | - libicu-dev 14 | - openssl 15 | - redis-server 16 | 17 | - set_fact: 18 | redis_server: redis-server 19 | -------------------------------------------------------------------------------- /roles/ansible-hubot/tasks/install_packages_RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: tasks/install_packages_RedHat.yml - OS packages for Red Hat 3 | 4 | - name: Install EPEL repo. 5 | become: True 6 | become_user: root 7 | yum: 8 | name: "{{ epel_repo_url }}" 9 | state: present 10 | register: result 11 | until: '"failed" not in result' 12 | retries: 5 13 | delay: 10 14 | when: ansible_pkg_mgr == 'yum' 15 | tags: ansible-hubot 16 | 17 | - name: Import EPEL GPG key 18 | become: True 19 | become_user: root 20 | rpm_key: 21 | key: "{{ epel_repo_gpg_key }}" 22 | state: present 23 | tags: ansible-hubot 24 | 25 | - name: install Development tools package group 26 | become: True 27 | become_user: root 28 | yum: name="@Development tools" state=present 29 | when: ansible_pkg_mgr == 'yum' 30 | changed_when: False 31 | tags: ansible-hubot 32 | 33 | - name: Install RedHat packages 34 | become: True 35 | become_user: root 36 | yum: name={{ item }} state=present 37 | when: ansible_pkg_mgr == 'yum' 38 | with_items: 39 | - curl 40 | - git 41 | - libicu-devel 42 | - libselinux-python 43 | - openssl-devel 44 | - expat-devel 45 | - ansible 46 | tags: ansible-hubot 47 | 48 | -------------------------------------------------------------------------------- /roles/ansible-hubot/templates/external-scripts.json.j2: -------------------------------------------------------------------------------- 1 | {{ hubot_external_scripts | to_nice_json }} -------------------------------------------------------------------------------- /roles/ansible-hubot/templates/hubot-scripts.json.j2: -------------------------------------------------------------------------------- 1 | {{ hubot_scripts | to_nice_json }} 2 | -------------------------------------------------------------------------------- /roles/ansible-hubot/templates/hubot.conf.j2: -------------------------------------------------------------------------------- 1 | # 2 | # File: hubot.conf - Upstart configuration script for Hubot 3 | # 4 | # FIXME: This could use some improvement 5 | 6 | description "hubot" 7 | 8 | start on filesystem or runlevel [2345] 9 | stop on runlevel [!2345] 10 | respawn 11 | respawn limit 5 60 12 | 13 | script 14 | exec sudo -i -u {{ hubot_admin }} {{ hubot_dir }}/bin/start-hubot 15 | end script 16 | -------------------------------------------------------------------------------- /roles/ansible-hubot/templates/hubot.env.j2: -------------------------------------------------------------------------------- 1 | # 2 | # File: hubot.env - Hubot variables for HipChat and more 3 | # 4 | 5 | {% for key, value in hubot_env.iteritems() -%} 6 | {{ key }}={{ value }} 7 | {% endfor %} 8 | 9 | export {% for key, value in hubot_env.iteritems() -%}{{ key }}{{- ' ' if not loop.last else '' }}{% endfor %} 10 | -------------------------------------------------------------------------------- /roles/ansible-hubot/templates/hubot.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Hubot 3 | 4 | [Service] 5 | EnvironmentFile={{ hubot_dir }}/{{ hubot_identity }}.env 6 | User={{ hubot_admin }} 7 | ExecStart={{ hubot_dir }}/bin/start-hubot 8 | 9 | [Install] 10 | WantedBy=multi-user.target 11 | -------------------------------------------------------------------------------- /roles/ansible-hubot/templates/start-hubot.sh.j2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # File: start-hubot.sh.j2 - A startup script template for Hubot 4 | # 5 | 6 | # Start Hubot in the background 7 | 8 | source {{ hubot_nvm_dir }}/nvm.sh 9 | nvm use {{ hubot_node_version }} 10 | 11 | cd {{ hubot_dir }} 12 | source {{ hubot_identity }}.env 13 | 14 | bin/hubot --adapter {{ hubot_adapter }} {% if hubot_alias is defined %}--alias {{ hubot_alias }} {% endif %}>> {{ hubot_dir }}/log/hubot.log 2>&1 15 | -------------------------------------------------------------------------------- /roles/ansible-hubot/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost -------------------------------------------------------------------------------- /roles/ansible-hubot/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | become: yes 5 | become_user: root 6 | roles: 7 | - ansible-hubot 8 | -------------------------------------------------------------------------------- /roles/ansible-hubot/tests/test_hubot.py: -------------------------------------------------------------------------------- 1 | def test_hubot_running_and_enabled(Service): 2 | hubot = Service("hubot") 3 | assert hubot.is_running 4 | assert hubot.is_enabled 5 | -------------------------------------------------------------------------------- /roles/ansible-hubot/version.txt: -------------------------------------------------------------------------------- 1 | v1.8.1 2 | -------------------------------------------------------------------------------- /roles/ansible-role-firewall/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | firewall_allowed_tcp_ports: 3 | - "22" 4 | - "25" 5 | - "80" 6 | - "443" 7 | firewall_allowed_udp_ports: [] 8 | firewall_forwarded_tcp_ports: [] 9 | firewall_forwarded_udp_ports: [] 10 | firewall_additional_rules: [] 11 | firewall_ip6_additional_rules: [] 12 | firewall_log_dropped_packets: true 13 | 14 | # Set to true to ensure other firewall management software is disabled. 15 | firewall_disable_firewalld: false 16 | firewall_disable_ufw: false 17 | 18 | firewall_status: stopped 19 | -------------------------------------------------------------------------------- /roles/ansible-role-firewall/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart firewall 3 | service: name=firewall state=restarted 4 | when: firewall_status != 'stopped' 5 | tags: 6 | - firewall 7 | -------------------------------------------------------------------------------- /roles/ansible-role-firewall/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: [] 3 | 4 | galaxy_info: 5 | author: geerlingguy 6 | description: Simple iptables firewall for most Unix-like systems. 7 | company: "Midwestern Mac, LLC" 8 | license: "license (BSD, MIT)" 9 | min_ansible_version: 2.0 10 | platforms: 11 | - name: EL 12 | versions: 13 | - all 14 | - name: Debian 15 | versions: 16 | - all 17 | - name: Ubuntu 18 | versions: 19 | - all 20 | galaxy_tags: 21 | - networking 22 | - system 23 | - security 24 | -------------------------------------------------------------------------------- /roles/ansible-role-firewall/tasks/disable-other-firewalls.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check firewalld package is installed (on RHEL). 3 | shell: yum list installed firewalld 4 | register: firewalld_installed 5 | ignore_errors: true 6 | changed_when: false 7 | when: ansible_os_family == "RedHat" and firewall_disable_firewalld 8 | tags: 9 | - firewall 10 | 11 | 12 | - name: Disable the firewalld service (on RHEL, if configured). 13 | service: 14 | name: firewalld 15 | state: stopped 16 | enabled: no 17 | when: ansible_os_family == "RedHat" and firewall_disable_firewalld and firewalld_installed.rc == 0 18 | tags: 19 | - firewall 20 | 21 | 22 | - name: Check ufw package is installed (on Ubuntu). 23 | shell: dpkg -l ufw 24 | register: ufw_installed 25 | ignore_errors: true 26 | changed_when: false 27 | when: ansible_distribution == "Ubuntu" and firewall_disable_ufw 28 | tags: 29 | - firewall 30 | 31 | 32 | - name: Disable the ufw firewall (on Ubuntu, if configured). 33 | service: 34 | name: ufw 35 | state: stopped 36 | enabled: no 37 | when: ansible_distribution == "Ubuntu" and firewall_disable_ufw and ufw_installed.rc == 0 38 | tags: 39 | - firewall 40 | -------------------------------------------------------------------------------- /roles/ansible-role-firewall/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure iptables is installed. 3 | yum: name=iptables state=installed 4 | tags: 5 | - firewall 6 | 7 | - name: Flush iptables the first time playbook runs. 8 | command: /usr/sbin/iptables -F 9 | tags: 10 | - firewall 11 | 12 | 13 | # creates=/etc/init.d/firewall 14 | 15 | - name: Copy firewall script into place. 16 | template: 17 | src: firewall.bash.j2 18 | dest: /etc/firewall.bash 19 | owner: root 20 | group: root 21 | mode: 0744 22 | notify: restart firewall 23 | tags: 24 | - firewall 25 | 26 | 27 | - name: Copy firewall init script into place. 28 | template: 29 | src: firewall.init.j2 30 | dest: /etc/init.d/firewall 31 | owner: root 32 | group: root 33 | mode: 0755 34 | tags: 35 | - firewall 36 | 37 | 38 | - name: Copy firewall systemd unit file into place (for systemd systems). 39 | template: 40 | src: firewall.unit.j2 41 | dest: /etc/systemd/system/firewall.service 42 | owner: root 43 | group: root 44 | mode: 0644 45 | tags: 46 | - firewall 47 | 48 | 49 | 50 | - name: Ensure the firewall is enabled and will start on boot. 51 | service: name=firewall state={{firewall_status}} enabled=yes 52 | tags: 53 | - firewall 54 | 55 | 56 | - include: disable-other-firewalls.yml 57 | when: firewall_disable_firewalld or firewall_disable_ufw 58 | tags: 59 | - firewall 60 | 61 | 62 | - meta: flush_handlers 63 | tags: 64 | - firewall 65 | -------------------------------------------------------------------------------- /roles/ansible-role-firewall/templates/firewall.init.j2: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # /etc/init.d/firewall 3 | # 4 | # Firewall init script, to be used with /etc/firewall.bash by Jeff Geerling. 5 | # 6 | # @author Jeff Geerling 7 | 8 | ### BEGIN INIT INFO 9 | # Provides: firewall 10 | # Required-Start: $remote_fs $syslog 11 | # Required-Stop: $remote_fs $syslog 12 | # Default-Start: 2 3 4 5 13 | # Default-Stop: 0 1 6 14 | # Short-Description: Start firewall at boot time. 15 | # Description: Enable the firewall. 16 | ### END INIT INFO 17 | 18 | # Carry out specific functions when asked to by the system 19 | case "$1" in 20 | start) 21 | echo "Starting firewall." 22 | /etc/firewall.bash 23 | ;; 24 | stop) 25 | echo "Stopping firewall." 26 | iptables -F 27 | ;; 28 | restart) 29 | echo "Restarting firewall." 30 | /etc/firewall.bash 31 | ;; 32 | status) 33 | echo -e "`iptables -L -n`" 34 | EXIT=4 # program or service status is unknown 35 | NUMBER_OF_RULES=$(iptables-save | grep '^\-' | wc -l) 36 | if [ 0 -eq $NUMBER_OF_RULES ]; then 37 | EXIT=3 # program is not running 38 | else 39 | EXIT=0 # program is running or service is OK 40 | fi 41 | exit $EXIT 42 | ;; 43 | *) 44 | echo "Usage: /etc/init.d/firewall {start|stop|status|restart}" 45 | exit 1 46 | ;; 47 | esac 48 | 49 | exit 0 50 | -------------------------------------------------------------------------------- /roles/ansible-role-firewall/templates/firewall.unit.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Firewall 3 | After=syslog.target network.target 4 | 5 | [Service] 6 | Type=oneshot 7 | ExecStart=/etc/firewall.bash 8 | ExecStop=/sbin/iptables -F 9 | RemainAfterExit=yes 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /roles/ansible-role-firewall/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | 4 | vars: 5 | firewall_allowed_tcp_ports: 6 | - "9123" 7 | 8 | pre_tasks: 9 | - name: Update apt cache. 10 | apt: update_cache=yes cache_valid_time=1200 11 | when: ansible_os_family == 'Debian' 12 | changed_when: false 13 | 14 | roles: 15 | - role_under_test 16 | -------------------------------------------------------------------------------- /roles/ansible-role-gitlab/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | -------------------------------------------------------------------------------- /roles/ansible-role-gitlab/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | language: python 3 | python: "2.7" 4 | 5 | env: 6 | - SITE=test.yml 7 | 8 | before_install: 9 | - sudo apt-get update -qq 10 | 11 | install: 12 | # Install Ansible. 13 | - pip install ansible 14 | 15 | # Add ansible.cfg to pick up roles path. 16 | - "{ echo '[defaults]'; echo 'roles_path = ../'; } >> ansible.cfg" 17 | 18 | script: 19 | # Check the role/playbook's syntax. 20 | - "ansible-playbook -i tests/inventory tests/$SITE --syntax-check" 21 | 22 | # Run the role/playbook with ansible-playbook. 23 | - "ansible-playbook -i tests/inventory tests/$SITE --connection=local --sudo" 24 | 25 | # Travis CI's environment requires one _extra_ run for true idempotence :-/. 26 | - "ansible-playbook -i tests/inventory tests/$SITE --connection=local --sudo" 27 | 28 | # Run the role/playbook again, checking to make sure it's idempotent. 29 | - > 30 | ansible-playbook -i tests/inventory tests/$SITE --connection=local --sudo 31 | | grep -q 'changed=0.*failed=0' 32 | && (echo 'Idempotence test: pass' && exit 0) 33 | || (echo 'Idempotence test: fail' && exit 1) 34 | 35 | # Make sure GitLab is running. 36 | - > 37 | curl --insecure -s -o /dev/null -w "%{http_code}" https://localhost/users/password/new 38 | | grep -q '200' 39 | && (echo 'Status code 200 test: pass' && exit 0) 40 | || (echo 'Status code 200 test: fail' && exit 1) 41 | -------------------------------------------------------------------------------- /roles/ansible-role-gitlab/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Jeff Geerling 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /roles/ansible-role-gitlab/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart gitlab 3 | command: gitlab-ctl reconfigure 4 | register: gitlab_restart 5 | failed_when: gitlab_restart_handler_failed_when -------------------------------------------------------------------------------- /roles/ansible-role-gitlab/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: [] 3 | 4 | galaxy_info: 5 | author: geerlingguy 6 | description: GitLab Git web interface 7 | company: "Midwestern Mac, LLC" 8 | license: "license (BSD, MIT)" 9 | min_ansible_version: 2.0 10 | platforms: 11 | - name: EL 12 | versions: 13 | - 6 14 | - 7 15 | - name: Debian 16 | versions: 17 | - all 18 | - name: Ubuntu 19 | versions: 20 | - all 21 | galaxy_tags: 22 | - development 23 | - web 24 | -------------------------------------------------------------------------------- /roles/ansible-role-gitlab/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/ansible-role-gitlab/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | 5 | vars: 6 | gitlab_restart_handler_failed_when: false 7 | 8 | roles: 9 | - ansible-role-gitlab 10 | -------------------------------------------------------------------------------- /roles/ansible-role-gitlab/vars/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | gitlab_repository_installation_script_url: "https://packages.gitlab.com/install/repositories/gitlab/{{ gitlab_edition }}/script.deb.sh" 3 | -------------------------------------------------------------------------------- /roles/ansible-role-gitlab/vars/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | gitlab_repository_installation_script_url: "https://packages.gitlab.com/install/repositories/gitlab/{{ gitlab_edition }}/script.rpm.sh" 3 | -------------------------------------------------------------------------------- /roles/ansible-role-mongodb/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | language: python 3 | python: "2.7" 4 | 5 | env: 6 | - SITE=test.yml 7 | 8 | before_install: 9 | - sudo apt-get update -qq 10 | - sudo apt-get install -y curl 11 | 12 | install: 13 | # Install Ansible. 14 | - pip install ansible==2.1.0 15 | 16 | # Add ansible.cfg to pick up roles path. 17 | - "{ echo '[defaults]'; echo 'roles_path = ../'; } >> ansible.cfg" 18 | 19 | script: 20 | # Check the role/playbook's syntax. 21 | - "ansible-playbook -i tests/inventory tests/$SITE --syntax-check" 22 | 23 | # Run the role/playbook with ansible-playbook. 24 | - "ansible-playbook -i tests/inventory tests/$SITE --connection=local --sudo" 25 | 26 | # Run the role/playbook again, checking to make sure it's idempotent. 27 | - > 28 | ansible-playbook -i tests/inventory tests/$SITE --connection=local --sudo 29 | | grep -q 'changed=0.*failed=0' 30 | && (echo 'Idempotence test: pass' && exit 0) 31 | || (echo 'Idempotence test: fail' && exit 1) 32 | 33 | # Request a page via Apache, to make sure Apache is running and responds. 34 | - "curl http://localhost:27017/" 35 | 36 | notifications: 37 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ 38 | -------------------------------------------------------------------------------- /roles/ansible-role-mongodb/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Kévin ARBOUIN 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /roles/ansible-role-mongodb/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | mongodb_install_dir: "/opt" 3 | mongodb_version: "3.2.4" 4 | 5 | mongodb_file: "mongodb-linux-x86_64-rhel62-{{ mongodb_version }}.tgz" 6 | mongodb_file_path: "{{ mongodb_install_dir }}/{{ mongodb_file }}" 7 | mongodb_file_url: "http://downloads.mongodb.org/linux/mongodb-linux-x86_64-rhel62-{{ mongodb_version }}.tgz" 8 | 9 | mongodb_data_path: "/mongodb_data" 10 | mongodb_port: 27017 11 | mongodb_daemon: "mongodb{{ mongodb_port | default('') }}" 12 | mongodb_dir_path: "{{ mongodb_data_path }}/{{mongodb_port | default('27017') }}" 13 | mongodb_conf: "{{ mongodb_dir_path }}/mongod.conf" 14 | mongodb_user: "mongodb" 15 | mongodb_home_dir: "{{mongodb_install_dir}}/mongodb" 16 | mongodb_dbpath: "{{ mongodb_dir_path }}/data" 17 | mongodb_logpath: "{{ mongodb_dir_path }}/logs/mongod.log" 18 | mongodb_pidfilepath: "{{ mongodb_dir_path }}/mongod.pid" 19 | mongodb_fork: true 20 | mongodb_logappend: true 21 | mongodb_directoryperdb: true 22 | mongodb_auth: false 23 | mongodb_rest: false 24 | 25 | # 主从架构配置 26 | mongodb_master: false 27 | 28 | mongodb_slave: false 29 | mongodb_master_source: '' 30 | 31 | 32 | # 副本集配置 33 | mongodb_replSet: true 34 | mongodb_replSet_exec: false 35 | mongodb_members: {} 36 | -------------------------------------------------------------------------------- /roles/ansible-role-mongodb/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart mongod 3 | service: name=mongod state=restarted 4 | tags: 5 | - mongodb 6 | 7 | - name: start mongod 8 | service: name=mongod state=started 9 | tags: 10 | - mongodb 11 | -------------------------------------------------------------------------------- /roles/ansible-role-mongodb/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: [] 3 | 4 | galaxy_info: 5 | author: lesmyrmidons 6 | description: MongoDB for RedHat/CentOS/Debian 64Bit. 7 | company: "ARBOUIN Consulting" 8 | issue_tracker_url: https://github.com/lesmyrmidons/ansible-role-mongodb/issues 9 | license: "license (BSD, MIT)" 10 | min_ansible_version: 2.1 11 | github_branch: master 12 | platforms: 13 | - name: EL 14 | versions: 15 | - all 16 | - name: Debian 17 | versions: 18 | - all 19 | - name: Ubuntu 20 | versions: 21 | - all 22 | - name: RedHat 23 | versions: 24 | - all 25 | galaxy_tags: 26 | - database 27 | - nosql 28 | notifications: 29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ -------------------------------------------------------------------------------- /roles/ansible-role-mongodb/templates/mongod.conf.j2: -------------------------------------------------------------------------------- 1 | # mongod.conf 2 | 3 | # for documentation of all options, see: 4 | # http://docs.mongodb.org/manual/reference/configuration-options/ 5 | 6 | {% if mongodb_fork is defined %} 7 | # whether to fork the process or not 8 | fork = {{ mongodb_fork }} 9 | {% endif %} 10 | 11 | {% if mongodb_pidfilepath is defined %} 12 | pidfilepath = {{ mongodb_pidfilepath }} 13 | {% endif %} 14 | {% if mongodb_logpath is defined %} 15 | 16 | logpath = {{ mongodb_logpath }} 17 | {% endif %} 18 | {% if mongodb_unixsocketprefix is defined %} 19 | 20 | unixSocketPrefix = {{ mongodb_unixsocketprefix }} 21 | {% endif %} 22 | {% if mongodb_dbpath is defined %} 23 | 24 | dbpath = {{ mongodb_dbpath }} 25 | {% endif %} 26 | 27 | # replication 28 | {% if mongodb_repl_lines is defined %} 29 | {{ mongodb_repl_lines }} 30 | {% endif %} 31 | -------------------------------------------------------------------------------- /roles/ansible-role-mongodb/templates/mongod.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=MongoDB Database Service 3 | Wants=network.target 4 | After=network.target 5 | 6 | [Service] 7 | Type=forking 8 | PIDFile={{mongodb_pidfilepath}} 9 | ExecStart={{mongodb_home_dir}}/bin/mongod --config {{mongodb_conf}} 10 | ExecReload=/bin/kill -HUP $MAINPID 11 | Restart=always 12 | User={{mongodb_user}} 13 | Group={{mongodb_user}} 14 | 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /roles/ansible-role-mongodb/templates/upset.js.j2: -------------------------------------------------------------------------------- 1 | config={_id:"{{ mongodb_replSet }}",members:[{ 2 | {%- for key,value in mongodb_members.iteritems() -%} 3 | _id:{{ key }},host:"{{ value }}"{%- if not loop.last -%} },{ {%- endif -%} 4 | {%- endfor -%}}]}; 5 | 6 | rs.initiate(config); 7 | -------------------------------------------------------------------------------- /roles/ansible-role-mongodb/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost -------------------------------------------------------------------------------- /roles/ansible-role-mongodb/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - ansible-role-mongodb -------------------------------------------------------------------------------- /roles/ansible-role-mongodb/vars/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | url_apt_key: "http://keyserver.ubuntu.com/pks/lookup?op=get&search=" 4 | id_apt_key: 7F0CEB10 5 | mongodb_repository: "deb http://downloads-distro.mongodb.org/repo/debian-sysvinit dist 10gen" 6 | -------------------------------------------------------------------------------- /roles/ansible-role-mongodb/vars/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | mongodb_version: "3.4" 4 | mongodb_repo_baseurl: https://repo.mongodb.org/yum/redhat/{{ ansible_distribution_major_version }}/mongodb-org/{{ mongodb_version }}/x86_64/ 5 | mongodb_repo_gpgcheck: yes 6 | mongodb_repo_gpgkey: https://www.mongodb.org/static/pgp/server-{{ mongodb_version }}.asc 7 | mongodb_packages_dependencies: 8 | - libselinux-python 9 | -------------------------------------------------------------------------------- /roles/ansible-role-mongodb/vars/Ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | url_apt_key: "keyserver.ubuntu.com" 4 | id_apt_key: 0C49F3730359A14518585931BC711F9BA15703C6 5 | mongodb_repository: "deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen" 6 | -------------------------------------------------------------------------------- /roles/ansible-role-mongodb/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | mongodb_packages: 4 | - mongodb-org -------------------------------------------------------------------------------- /roles/ansible-role-nodejs/.gitignore: -------------------------------------------------------------------------------- 1 | *.retry 2 | tests/test.sh 3 | -------------------------------------------------------------------------------- /roles/ansible-role-nodejs/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | services: docker 3 | 4 | env: 5 | # Defaults. 6 | - distro: centos7 7 | - distro: centos6 8 | - distro: ubuntu1604 9 | - distro: ubuntu1404 10 | - distro: debian9 11 | - distro: debian8 12 | 13 | # Latest release. 14 | - distro: centos7 15 | playbook: test-latest.yml 16 | - distro: ubuntu1604 17 | playbook: test-latest.yml 18 | 19 | script: 20 | # Configure test script so we can run extra tests after playbook is run. 21 | - export container_id=$(date +%s) 22 | - export cleanup=false 23 | 24 | # Download test shim. 25 | - wget -O ${PWD}/tests/test.sh https://gist.githubusercontent.com/geerlingguy/73ef1e5ee45d8694570f334be385e181/raw/ 26 | - chmod +x ${PWD}/tests/test.sh 27 | 28 | # Run tests. 29 | - ${PWD}/tests/test.sh 30 | 31 | # Ensure Node.js is installed. 32 | - 'docker exec --tty ${container_id} env TERM=xterm which node' 33 | - 'docker exec --tty ${container_id} env TERM=xterm node -v' 34 | 35 | # Ensure npm packages are installed globally. 36 | - 'docker exec --tty ${container_id} env TERM=xterm bash --login -c "npm list -g --depth=0 jslint"' 37 | - 'docker exec --tty ${container_id} env TERM=xterm bash --login -c "npm list -g --depth=0 node-sass"' 38 | - 'docker exec --tty ${container_id} env TERM=xterm bash --login -c "npm list -g --depth=0 yo"' 39 | 40 | notifications: 41 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ 42 | -------------------------------------------------------------------------------- /roles/ansible-role-nodejs/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Jeff Geerling 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /roles/ansible-role-nodejs/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Set the version of Node.js to install ("0.12", "4.x", "5.x", "6.x", "8.x"). 3 | # Version numbers from Nodesource: https://github.com/nodesource/distributions 4 | nodejs_version: "6.x" 5 | 6 | # The user for whom the npm packages will be installed. 7 | # nodejs_install_npm_user: username 8 | 9 | # The directory for global installations. 10 | npm_config_prefix: "/usr/local/lib/npm" 11 | 12 | # Set to true to suppress the UID/GID switching when running package scripts. If set explicitly to false, then installing as a non-root user will fail. 13 | npm_config_unsafe_perm: "false" 14 | 15 | # Define a list of global packages to be installed with NPM. 16 | nodejs_npm_global_packages: [] 17 | # # Install a specific version of a package. 18 | # - name: jslint 19 | # version: 0.9.3 20 | # # Install the latest stable release of a package. 21 | # - name: node-sass 22 | # # This shorthand syntax also works (same as previous example). 23 | # - node-sass 24 | 25 | # The path of a package.json file used to install packages globally. 26 | nodejs_package_json_path: "" 27 | -------------------------------------------------------------------------------- /roles/ansible-role-nodejs/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: [] 3 | 4 | galaxy_info: 5 | author: geerlingguy 6 | description: Node.js installation for Linux 7 | company: "Midwestern Mac, LLC" 8 | license: "license (BSD, MIT)" 9 | min_ansible_version: 1.9 10 | platforms: 11 | - name: EL 12 | versions: 13 | - 6 14 | - 7 15 | - name: Debian 16 | versions: 17 | - all 18 | - name: Ubuntu 19 | versions: 20 | - trusty 21 | - xenial 22 | galaxy_tags: 23 | - development 24 | - web 25 | -------------------------------------------------------------------------------- /roles/ansible-role-nodejs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: setup-RedHat.yml 3 | when: ansible_os_family == 'RedHat' 4 | 5 | - include: setup-Debian.yml 6 | when: ansible_os_family == 'Debian' 7 | 8 | - name: Define nodejs_install_npm_user 9 | set_fact: 10 | nodejs_install_npm_user: "{{ ansible_user | default(lookup('env', 'USER')) }}" 11 | when: nodejs_install_npm_user is not defined 12 | 13 | - name: Create npm global directory 14 | file: 15 | path: "{{ npm_config_prefix }}" 16 | owner: "{{ nodejs_install_npm_user }}" 17 | group: "{{ nodejs_install_npm_user }}" 18 | state: directory 19 | 20 | - name: Add npm_config_prefix bin directory to global $PATH. 21 | template: 22 | src: npm.sh.j2 23 | dest: /etc/profile.d/npm.sh 24 | mode: 0644 25 | 26 | - name: Ensure npm global packages are installed. 27 | npm: 28 | name: "{{ item.name | default(item) }}" 29 | version: "{{ item.version | default('latest') }}" 30 | global: yes 31 | state: latest 32 | environment: 33 | NPM_CONFIG_PREFIX: "{{ npm_config_prefix }}" 34 | NODE_PATH: "{{ npm_config_prefix }}/lib/node_modules" 35 | NPM_CONFIG_UNSAFE_PERM: "{{ npm_config_unsafe_perm }}" 36 | with_items: "{{ nodejs_npm_global_packages }}" 37 | 38 | - name: Install packages defined in a given package.json. 39 | npm: 40 | path: "{{ nodejs_package_json_path }}" 41 | when: nodejs_package_json_path is defined and nodejs_package_json_path 42 | -------------------------------------------------------------------------------- /roles/ansible-role-nodejs/tasks/setup-Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure apt-transport-https is installed. 3 | apt: name=apt-transport-https state=present 4 | 5 | - name: Add Nodesource apt key. 6 | apt_key: 7 | url: https://keyserver.ubuntu.com/pks/lookup?op=get&fingerprint=on&search=0x1655A0AB68576280 8 | id: "68576280" 9 | state: present 10 | 11 | - name: Add NodeSource repositories for Node.js. 12 | apt_repository: 13 | repo: "{{ item }}" 14 | state: present 15 | with_items: 16 | - "deb https://deb.nodesource.com/node_{{ nodejs_version }} {{ ansible_distribution_release }} main" 17 | - "deb-src https://deb.nodesource.com/node_{{ nodejs_version }} {{ ansible_distribution_release }} main" 18 | register: node_repo 19 | 20 | - name: Update apt cache if repo was added. 21 | apt: update_cache=yes 22 | when: node_repo.changed 23 | 24 | - name: Ensure Node.js and npm are installed. 25 | apt: "name=nodejs={{ nodejs_version|regex_replace('x', '') }}* state=present" 26 | -------------------------------------------------------------------------------- /roles/ansible-role-nodejs/tasks/setup-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set up the Nodesource RPM directory for Node.js > 0.10. 3 | set_fact: 4 | nodejs_rhel_rpm_dir: "pub_{{ nodejs_version }}" 5 | when: nodejs_version != '0.10' 6 | 7 | - name: Set up the Nodesource RPM variable for Node.js == 0.10. 8 | set_fact: 9 | nodejs_rhel_rpm_dir: "pub" 10 | when: nodejs_version == '0.10' 11 | 12 | - name: Import Nodesource RPM key (CentOS < 7). 13 | rpm_key: 14 | key: http://rpm.nodesource.com/pub/el/NODESOURCE-GPG-SIGNING-KEY-EL 15 | state: present 16 | when: ansible_distribution_major_version|int < 7 17 | 18 | - name: Import Nodesource RPM key (CentOS 7+).. 19 | rpm_key: 20 | key: https://rpm.nodesource.com/pub/el/NODESOURCE-GPG-SIGNING-KEY-EL 21 | state: present 22 | validate_certs: no 23 | when: ansible_distribution_major_version|int >= 7 24 | 25 | 26 | - name: Add Nodesource repositories for Node.js (CentOS < 7). 27 | yum: 28 | name: "http://rpm.nodesource.com/{{ nodejs_rhel_rpm_dir }}/el/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}/nodesource-release-el{{ ansible_distribution_major_version }}-1.noarch.rpm" 29 | state: present 30 | when: ansible_distribution_major_version|int < 7 31 | 32 | - name: Add Nodesource repositories for Node.js (CentOS 7+). 33 | yum: 34 | name: "https://rpm.nodesource.com/{{ nodejs_rhel_rpm_dir }}/el/{{ ansible_distribution_major_version }}/{{ ansible_architecture }}/nodesource-release-el{{ ansible_distribution_major_version }}-1.noarch.rpm" 35 | state: present 36 | when: ansible_distribution_major_version|int >= 7 37 | 38 | - name: Ensure Node.js and npm are installed. 39 | yum: "name=nodejs-{{ nodejs_version[0] }}.* state=present enablerepo='epel,nodesource'" 40 | -------------------------------------------------------------------------------- /roles/ansible-role-nodejs/templates/npm.sh.j2: -------------------------------------------------------------------------------- 1 | export PATH={{ npm_config_prefix }}/bin:$PATH 2 | export NPM_CONFIG_PREFIX={{ npm_config_prefix }} 3 | export NODE_PATH=$NODE_PATH:{{ npm_config_prefix }}/lib/node_modules 4 | -------------------------------------------------------------------------------- /roles/ansible-role-nodejs/tests/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role tests 2 | 3 | To run the test playbook(s) in this directory: 4 | 5 | 1. Install and start Docker. 6 | 1. Download the test shim (see .travis.yml file for the URL) into `tests/test.sh`: 7 | - `wget -O tests/test.sh https://gist.githubusercontent.com/geerlingguy/73ef1e5ee45d8694570f334be385e181/raw/` 8 | 1. Make the test shim executable: `chmod +x tests/test.sh`. 9 | 1. Run (from the role root directory) `distro=[distro] playbook=[playbook] ./tests/test.sh` 10 | 11 | If you don't want the container to be automatically deleted after the test playbook is run, add the following environment variables: `cleanup=false container_id=$(date +%s)` 12 | -------------------------------------------------------------------------------- /roles/ansible-role-nodejs/tests/test-latest.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | 4 | vars: 5 | nodejs_version: "8.x" 6 | nodejs_install_npm_user: root 7 | npm_config_prefix: /root/.npm-global 8 | npm_config_unsafe_perm: "true" 9 | nodejs_npm_global_packages: 10 | - node-sass 11 | - name: jslint 12 | version: 0.9.6 13 | - name: yo 14 | 15 | pre_tasks: 16 | - name: Update apt cache. 17 | apt: update_cache=yes cache_valid_time=600 18 | when: ansible_os_family == 'Debian' 19 | 20 | roles: 21 | - role_under_test 22 | -------------------------------------------------------------------------------- /roles/ansible-role-nodejs/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | 4 | vars: 5 | nodejs_install_npm_user: root 6 | npm_config_prefix: /root/.npm-global 7 | npm_config_unsafe_perm: "true" 8 | nodejs_npm_global_packages: 9 | - node-sass 10 | - name: jslint 11 | version: 0.9.6 12 | - name: yo 13 | 14 | pre_tasks: 15 | - name: Update apt cache. 16 | apt: update_cache=yes cache_valid_time=600 17 | when: ansible_os_family == 'Debian' 18 | 19 | roles: 20 | - role_under_test 21 | -------------------------------------------------------------------------------- /roles/ansible-role-php/.gitignore: -------------------------------------------------------------------------------- 1 | *.retry 2 | tests/test.sh 3 | -------------------------------------------------------------------------------- /roles/ansible-role-php/LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Jeff Geerling 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /roles/ansible-role-php/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart webserver 3 | service: 4 | name: "{{ php_webserver_daemon }}" 5 | state: restarted 6 | notify: restart php-fpm 7 | when: php_enable_webserver 8 | 9 | - name: restart php-fpm 10 | service: 11 | name: "{{ php_fpm_daemon }}" 12 | state: restarted 13 | when: php_enable_php_fpm 14 | -------------------------------------------------------------------------------- /roles/ansible-role-php/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: [] 3 | 4 | galaxy_info: 5 | author: geerlingguy 6 | description: PHP for RedHat/CentOS/Fedora/Debian/Ubuntu. 7 | company: "Midwestern Mac, LLC" 8 | license: "license (BSD, MIT)" 9 | min_ansible_version: 2.0 10 | platforms: 11 | - name: EL 12 | versions: 13 | - 6 14 | - 7 15 | - name: Fedora 16 | versions: 17 | - all 18 | - name: Debian 19 | versions: 20 | - all 21 | - name: Ubuntu 22 | versions: 23 | - trusty 24 | - xenial 25 | galaxy_tags: 26 | - development 27 | - web 28 | - php 29 | - language 30 | - fpm 31 | - drupal 32 | - wordpress 33 | - joomla 34 | - magento 35 | -------------------------------------------------------------------------------- /roles/ansible-role-php/tasks/configure-apcu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check for existing APCu config files. 3 | find: 4 | paths: "{{ item }}" 5 | contains: 'extension(\s+)?=(\s+)?apc[u]?\.so' 6 | register: php_installed_apc_confs 7 | with_items: "{{ php_extension_conf_paths }}" 8 | 9 | - name: Remove any non-role-supplied APCu config files. 10 | file: 11 | path: "{{ item.1.path }}" 12 | state: absent 13 | when: php_apc_conf_filename != (item.1.path.split('/') | last) 14 | with_subelements: 15 | - "{{ php_installed_apc_confs.results }}" 16 | - files 17 | notify: restart webserver 18 | 19 | - name: Ensure APCu config file is present. 20 | template: 21 | src: apc.ini.j2 22 | dest: "{{ item }}/{{ php_apc_conf_filename }}" 23 | owner: root 24 | group: root 25 | force: yes 26 | mode: 0644 27 | with_items: "{{ php_extension_conf_paths }}" 28 | when: php_enable_apc 29 | notify: restart webserver 30 | 31 | - name: Remove APCu config file if APC is disabled. 32 | file: 33 | path: "{{ item }}/{{ php_apc_conf_filename }}" 34 | state: absent 35 | with_items: "{{ php_extension_conf_paths }}" 36 | when: not php_enable_apc 37 | notify: restart webserver 38 | -------------------------------------------------------------------------------- /roles/ansible-role-php/tasks/configure-opcache.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check for existing OpCache config files. 3 | find: 4 | paths: "{{ item }}" 5 | contains: 'zend_extension(\s+)?=(\s+)?opcache\.so' 6 | register: php_installed_opcache_confs 7 | with_items: "{{ php_extension_conf_paths }}" 8 | 9 | - name: Remove any non-role-supplied OpCache config files. 10 | file: 11 | path: "{{ item.1.path }}" 12 | state: absent 13 | when: php_opcache_conf_filename != (item.1.path.split('/') | last) 14 | with_subelements: 15 | - "{{ php_installed_opcache_confs.results }}" 16 | - files 17 | notify: restart webserver 18 | 19 | - name: Ensure OpCache config file is present. 20 | template: 21 | src: opcache.ini.j2 22 | dest: "{{ item }}/{{ php_opcache_conf_filename }}" 23 | owner: root 24 | group: root 25 | force: yes 26 | mode: 0644 27 | with_items: "{{ php_extension_conf_paths }}" 28 | when: php_opcache_enable 29 | notify: restart webserver 30 | 31 | - name: Remove OpCache config file if OpCache is disabled. 32 | file: 33 | path: "{{ item }}/{{ php_opcache_conf_filename }}" 34 | state: absent 35 | with_items: "{{ php_extension_conf_paths }}" 36 | when: not php_opcache_enable 37 | notify: restart webserver 38 | -------------------------------------------------------------------------------- /roles/ansible-role-php/tasks/configure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure configuration directories exist. 3 | file: 4 | path: "{{ item }}" 5 | state: directory 6 | follow: true 7 | with_flattened: 8 | - "{{ php_conf_paths }}" 9 | - "{{ php_extension_conf_paths }}" 10 | 11 | - name: Place PHP configuration file in place. 12 | template: 13 | src: php.ini.j2 14 | dest: "{{ item }}/php.ini" 15 | owner: root 16 | group: root 17 | mode: 0644 18 | with_items: "{{ php_conf_paths }}" 19 | notify: restart webserver 20 | when: php_use_managed_ini 21 | -------------------------------------------------------------------------------- /roles/ansible-role-php/tasks/setup-Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update apt cache. 3 | apt: update_cache=yes cache_valid_time=86400 4 | 5 | - name: Ensure PHP packages are installed. 6 | apt: 7 | name: "{{ item }}" 8 | state: "{{ php_packages_state }}" 9 | install_recommends: "{{ php_install_recommends }}" 10 | with_items: "{{ php_packages }}" 11 | register: php_package_install 12 | notify: restart webserver 13 | 14 | - name: Delete APCu configuration file if this role will provide one. 15 | file: 16 | path: "{{ item }}/{{ php_apc_conf_filename }}" 17 | state: absent 18 | with_items: "{{ php_extension_conf_paths }}" 19 | when: php_enable_apc and php_package_install.changed 20 | notify: restart webserver 21 | 22 | - name: Delete OpCache configuration file if this role will provide one. 23 | file: 24 | path: "{{ item }}/{{ php_opcache_conf_filename }}" 25 | state: absent 26 | with_items: "{{ php_extension_conf_paths }}" 27 | when: php_opcache_enable and php_package_install.changed 28 | notify: restart webserver 29 | -------------------------------------------------------------------------------- /roles/ansible-role-php/tasks/setup-RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure PHP packages are installed. 3 | package: 4 | name: "{{ item }}" 5 | state: "{{ php_packages_state }}" 6 | enablerepo: "{{ php_enablerepo }}" 7 | with_items: "{{ php_packages }}" 8 | notify: restart webserver 9 | -------------------------------------------------------------------------------- /roles/ansible-role-php/templates/apc.ini.j2: -------------------------------------------------------------------------------- 1 | extension=apcu.so 2 | apc.shm_size={{ php_apc_shm_size }} 3 | apc.enable_cli={{ php_apc_enable_cli }} 4 | apc.rfc1867=1 5 | -------------------------------------------------------------------------------- /roles/ansible-role-php/templates/opcache.ini.j2: -------------------------------------------------------------------------------- 1 | zend_extension={{ php_opcache_zend_extension }} 2 | opcache.enable={{ php_opcache_enable }} 3 | opcache.enable_cli={{ php_opcache_enable_cli }} 4 | opcache.memory_consumption={{ php_opcache_memory_consumption }} 5 | opcache.interned_strings_buffer={{ php_opcache_interned_strings_buffer }} 6 | opcache.max_accelerated_files={{ php_opcache_max_accelerated_files }} 7 | opcache.max_wasted_percentage={{ php_opcache_max_wasted_percentage }} 8 | opcache.validate_timestamps={{ php_opcache_validate_timestamps }} 9 | opcache.revalidate_path={{ php_opcache_revalidate_path }} 10 | opcache.revalidate_freq={{ php_opcache_revalidate_freq }} 11 | opcache.max_file_size={{ php_opcache_max_file_size }} 12 | {% if php_opcache_blacklist_filename != '' %} 13 | opcache.blacklist_filename={{ php_opcache_blacklist_filename }} 14 | {% endif %} 15 | -------------------------------------------------------------------------------- /roles/ansible-role-php/templates/php-fpm.conf.j2: -------------------------------------------------------------------------------- 1 | ;;;;;;;;;;;;;;;;;;;;; 2 | ; FPM Configuration ; 3 | ;;;;;;;;;;;;;;;;;;;;; 4 | 5 | include={{ php_fpm_conf_path }}/pool.d/*.conf 6 | 7 | ;;;;;;;;;;;;;;;;;; 8 | ; Global Options ; 9 | ;;;;;;;;;;;;;;;;;; 10 | 11 | [global] 12 | error_log = /var/log/php-fpm.log 13 | -------------------------------------------------------------------------------- /roles/ansible-role-php/templates/www.conf.j2: -------------------------------------------------------------------------------- 1 | [www] 2 | listen = 127.0.0.1:9000 3 | listen.allowed_clients = 127.0.0.1 4 | user = {{ php_fpm_pool_user }} 5 | group = {{ php_fpm_pool_group }} 6 | 7 | pm = dynamic 8 | pm.max_children = 50 9 | pm.start_servers = 5 10 | pm.min_spare_servers = 5 11 | pm.max_spare_servers = 5 12 | pm.max_requests = 500 13 | -------------------------------------------------------------------------------- /roles/ansible-role-php/tests/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role tests 2 | 3 | To run the test playbook(s) in this directory: 4 | 5 | 1. Install and start Docker. 6 | 1. Download the test shim (see .travis.yml file for the URL) into `tests/test.sh`: 7 | - `wget -O tests/test.sh https://gist.githubusercontent.com/geerlingguy/73ef1e5ee45d8694570f334be385e181/raw/` 8 | 1. Make the test shim executable: `chmod +x tests/test.sh`. 9 | 1. Run (from the role root directory) `distro=[distro] playbook=[playbook] ./tests/test.sh` 10 | 11 | If you don't want the container to be automatically deleted after the test playbook is run, add the following environment variables: `cleanup=false container_id=$(date +%s)` 12 | -------------------------------------------------------------------------------- /roles/ansible-role-php/tests/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - src: geerlingguy.repo-remi 3 | - src: geerlingguy.git 4 | -------------------------------------------------------------------------------- /roles/ansible-role-php/tests/test-source.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | 4 | vars: 5 | php_enable_webserver: false 6 | php_install_from_source: true 7 | php_source_clone_dir: /root/php-src 8 | # Workaround for https://github.com/ansible/ansible-modules-core/issues/5457 9 | php_source_clone_depth: 0 10 | php_source_make_command: "make --jobs=2" 11 | php_source_version: "php-7.0.13" 12 | php_memory_limit: "192M" 13 | 14 | pre_tasks: 15 | - name: Update apt cache. 16 | apt: update_cache=yes cache_valid_time=600 17 | when: ansible_os_family == 'Debian' 18 | changed_when: false 19 | 20 | roles: 21 | - geerlingguy.git 22 | - role_under_test 23 | -------------------------------------------------------------------------------- /roles/ansible-role-php/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | 4 | vars: 5 | php_enable_webserver: false 6 | php_enable_php_fpm: true 7 | php_memory_limit: "192M" 8 | php_enablerepo: "remi,remi-php70" 9 | php_install_recommends: no 10 | 11 | pre_tasks: 12 | - name: Update apt cache. 13 | apt: update_cache=yes cache_valid_time=600 14 | when: ansible_os_family == 'Debian' 15 | changed_when: false 16 | 17 | # Ubuntu-specific tasks. 18 | - name: Add repository for PHP 7. 19 | apt_repository: repo='ppa:ondrej/php' 20 | when: ansible_distribution == 'Ubuntu' 21 | 22 | # Debian-specific tasks. 23 | - name: Add dependencies for PHP versions (Debian). 24 | apt: 25 | name: "{{ item }}" 26 | with_items: 27 | - apt-transport-https 28 | - ca-certificates 29 | when: ansible_distribution == "Debian" 30 | 31 | - name: Add Ondrej Sury's apt key (Debian). 32 | apt_key: 33 | url: https://packages.sury.org/php/apt.gpg 34 | state: present 35 | when: ansible_distribution == "Debian" 36 | 37 | - name: Add Ondrej Sury's repo (Debian). 38 | apt_repository: 39 | repo: "deb https://packages.sury.org/php/ {{ ansible_distribution_release }} main" 40 | state: present 41 | register: php_ondrej_debian_repo 42 | when: ansible_distribution == "Debian" 43 | 44 | - name: Update apt caches after repo is added (Debian). 45 | apt: update_cache=yes 46 | when: php_ondrej_debian_repo.changed and (ansible_distribution == "Debian") 47 | 48 | roles: 49 | - role: geerlingguy.repo-remi 50 | when: ansible_os_family == 'RedHat' and ansible_distribution != 'Fedora' 51 | - role_under_test 52 | -------------------------------------------------------------------------------- /roles/ansible-role-php/vars/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | __php_packages: 3 | - php7.0-common 4 | - php7.0-cli 5 | - php7.0-dev 6 | - php7.0-fpm 7 | - libpcre3-dev 8 | - php7.0-gd 9 | - php7.0-curl 10 | - php7.0-imap 11 | - php7.0-json 12 | - php7.0-opcache 13 | - php7.0-xml 14 | - php7.0-mbstring 15 | - php-sqlite3 16 | - php-apcu 17 | __php_webserver_daemon: "apache2" 18 | 19 | # Vendor-specific configuration paths on Debian/Ubuntu make my brain asplode. 20 | __php_conf_paths: 21 | - /etc/php/7.0/fpm 22 | - /etc/php/7.0/apache2 23 | - /etc/php/7.0/cli 24 | 25 | __php_extension_conf_paths: 26 | - /etc/php/7.0/fpm/conf.d 27 | - /etc/php/7.0/apache2/conf.d 28 | - /etc/php/7.0/cli/conf.d 29 | 30 | __php_apc_conf_filename: 20-apcu.ini 31 | __php_opcache_conf_filename: 05-opcache.ini 32 | __php_fpm_daemon: php7.0-fpm 33 | __php_fpm_conf_path: "/etc/php/7.0/fpm" 34 | __php_fpm_pool_conf_path: "{{ __php_fpm_conf_path }}/pool.d/www.conf" 35 | 36 | __php_fpm_pool_user: www-data 37 | __php_fpm_pool_group: www-data 38 | -------------------------------------------------------------------------------- /roles/ansible-role-php/vars/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | __php_packages: 3 | - php 4 | - php-cli 5 | - php-common 6 | - php-devel 7 | - php-fpm 8 | - php-gd 9 | - php-imap 10 | - php-ldap 11 | - php-mbstring 12 | - php-opcache 13 | - php-pdo 14 | - php-pear 15 | - php-pecl-apcu 16 | - php-xml 17 | - php-mysql 18 | - php-bcmath 19 | - php-xmlrpc 20 | __php_webserver_daemon: "nginx" 21 | 22 | __php_conf_paths: 23 | - /etc 24 | 25 | __php_extension_conf_paths: 26 | - /etc/php.d 27 | 28 | __php_apc_conf_filename: 50-apc.ini 29 | __php_opcache_conf_filename: 10-opcache.ini 30 | __php_fpm_daemon: php-fpm 31 | __php_fpm_conf_path: "/etc/fpm" 32 | __php_fpm_pool_conf_path: "/etc/php-fpm.d/www.conf" 33 | 34 | __php_fpm_pool_user: apache 35 | __php_fpm_pool_group: apache 36 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .molecule 3 | tests/.cache 4 | .cache 5 | __pycache__ 6 | *.retry 7 | 8 | .env 9 | .virtualenv 10 | *.pyc 11 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sudo: required 3 | language: python 4 | services: 5 | - docker 6 | 7 | install: 8 | - pip install molecule==1.25.0 ansible docker 9 | 10 | script: 11 | - molecule --version 12 | - ansible --version 13 | - molecule test 14 | notifications: 15 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ 16 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/files/sample.conf: -------------------------------------------------------------------------------- 1 | # This is an sample userparameters file. 2 | 3 | UserParameter=mysql.ping_to,mysqladmin -uroot ping | grep -c alive 4 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for zabbix-agent 3 | 4 | - name: restart zabbix-agent 5 | service: name={{ zabbix_agent_service }} 6 | state=restarted 7 | enabled=yes 8 | become: yes -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Werner Dijkerman 4 | description: Installing and maintaining zabbix-agent for RedHat/Debian/Ubuntu. 5 | company: myCompany.Dotcom 6 | license: license (GPLv3) 7 | min_ansible_version: 1.9 8 | platforms: 9 | - name: EL 10 | versions: 11 | - 5 12 | - 6 13 | - 7 14 | - name: Ubuntu 15 | versions: 16 | - lucid 17 | - precise 18 | - trusty 19 | - name: Debian 20 | versions: 21 | - squeeze 22 | - wheezy 23 | - name: opensuse 24 | versions: 25 | - 12.1 26 | - 12.2 27 | - 12.3 28 | - 13.1 29 | - 13.2 30 | categories: 31 | - monitoring 32 | galaxy_tags: 33 | - zabbix 34 | - zabbix-agent 35 | - monitoring 36 | dependencies: [] 37 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ansible: 3 | playbook: playbook.yml 4 | timeout: 30 5 | 6 | molecule: 7 | test: 8 | sequence: 9 | - destroy 10 | - syntax 11 | - create 12 | - converge 13 | - verify 14 | # - idempotence 15 | ignore_paths: 16 | - .git 17 | - .vagrant 18 | - .molecule 19 | - .env 20 | - .virtualenv 21 | 22 | driver: 23 | name: docker 24 | 25 | docker: 26 | containers: 27 | - name: zabbix-agent-centos 28 | ansible_groups: 29 | - group1 30 | image: milcom/centos7-systemd 31 | image_version: latest 32 | privileged: True 33 | - name: zabbix-agent-debian 34 | ansible_groups: 35 | - group2 36 | image: maint/debian-systemd 37 | image_version: latest 38 | privileged: True 39 | - name: zabbix-agent-ubuntu 40 | ansible_groups: 41 | - group2 42 | image: solita/ubuntu-systemd 43 | image_version: latest 44 | privileged: True 45 | - name: zabbix-agent-mint 46 | ansible_groups: 47 | - group2 48 | image: vcatechnology/linux-mint 49 | image_version: latest 50 | privileged: True 51 | # - name: zabbix-agent-suse 52 | # ansible_groups: 53 | # - group3 54 | # image: reszelaz/leap-systemd 55 | # image_version: latest 56 | # privileged: True 57 | # environment: 58 | # docker: container 59 | 60 | verifier: 61 | name: testinfra 62 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | pre_tasks: 4 | - name: "Installing packages on CentOS family" 5 | yum: 6 | name: "{{ item }}" 7 | state: installed 8 | with_items: 9 | - net-tools 10 | - which 11 | when: ansible_os_family == 'RedHat' 12 | - name: "Installing packages on Debian family" 13 | apt: 14 | name: "{{ item }}" 15 | state: installed 16 | with_items: 17 | - net-tools 18 | when: ansible_os_family == 'Debian' 19 | - name: "Installing packages on Suse family" 20 | shell: zypper install -y python-xml python-libxml2 net-tools which 21 | changed_when: False 22 | when: ansible_os_family == 'Suse' 23 | tags: 24 | - skip_ansible_lint 25 | 26 | roles: 27 | - role: ansible-zabbix-agent 28 | zabbix_agent_server: 192.168.3.33 29 | zabbix_agent_serveractive: 192.168.3.33 30 | zabbix_agent_listenip: 0.0.0.0 31 | zabbix_agent_tlsconnect: psk 32 | zabbix_agent_tlsaccept: psk 33 | zabbix_agent_tlspskidentity: my_Identity 34 | zabbix_agent_tlspskfile: /data/certs/zabbix.psk 35 | zabbix_agent_tlspsk_secret: 97defd6bd126d5ba7fa5f296595f82eac905d5eda270207a580ab7c0cb9e8eab 36 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 160 3 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/tasks/Suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Tasks specific for OpenSuse Systems 3 | 4 | - name: "Include Zabbix gpg ids" 5 | include_vars: zabbix.yml 6 | 7 | - name: "Install zypper repo dependency" 8 | zypper: 9 | name: "{{ item }}" 10 | state: present 11 | with_items: 12 | - python-libxml2 13 | - python-xml 14 | register: dependency 15 | 16 | - name: "Suse | Install basic repo file" 17 | zypper_repository: 18 | repo: "{{ suse[ansible_distribution][ansible_distribution_major_version]['url'] }}" 19 | name: "{{ suse[ansible_distribution][ansible_distribution_major_version]['name'] }}" 20 | state: present 21 | when: 22 | - zabbix_repo == "zabbix" 23 | become: yes 24 | tags: 25 | - zabbix-agent 26 | - init 27 | 28 | - name: "Suse | Install zabbix-agent" 29 | zypper: 30 | name: "{{ zabbix_agent_package }}" 31 | state: "{{ zabbix_agent_package_state }}" 32 | disable_gpg_check: yes 33 | become: yes 34 | tags: 35 | - zabbix-agent 36 | - init 37 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/tasks/userparameter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Installing sample file" 4 | copy: src=sample.conf 5 | dest="{{ agent_include }}/mysql.conf" 6 | owner=zabbix 7 | group=zabbix 8 | mode=0755 9 | notify: restart zabbix-agent 10 | become: yes -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/templates/zabbix-agent.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Zabbix Agent 3 | After=syslog.target 4 | After=network.target 5 | 6 | [Service] 7 | Environment="CONFFILE=/etc/zabbix/zabbix_agentd.conf" 8 | EnvironmentFile=-/etc/sysconfig/zabbix-agent 9 | Type=forking 10 | Restart=on-failure 11 | PIDFile={{zabbix_agent_pidfile}} 12 | KillMode=control-group 13 | ExecStart=/usr/sbin/zabbix_agentd -c $CONFFILE 14 | ExecStop=/bin/kill -SIGTERM $MAINPID 15 | RestartSec=10s 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | [group1] 3 | zabbix-agent-centos 4 | 5 | [group2] 6 | zabbix-agent-debian 7 | zabbix-agent-ubuntu 8 | 9 | [group3] 10 | zabbix-agent-suse -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - ansible-zabbix-agent -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | roles: 4 | - role: ansible-zabbix-agent 5 | agent_server: 192.168.3.33 6 | agent_serveractive: 192.168.3.33 7 | zabbix_version: 2.4 8 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/vars/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for zabbix-agent (Debian) 3 | 4 | zabbix_agent: zabbix-agent 5 | zabbix_agent_service: zabbix-agent 6 | zabbix_agent_conf: zabbix_agentd.conf 7 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/vars/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for zabbix-agent (RedHat) 3 | 4 | zabbix_agent: zabbix-agent 5 | zabbix_agent_service: zabbix-agent 6 | zabbix_agent_conf: zabbix_agentd.conf 7 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/vars/Suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for zabbix-agent (Suse) 3 | 4 | zabbix_agent: zabbix-agentd 5 | zabbix_agent_service: zabbix-agentd 6 | zabbix_agent_conf: zabbix-agentd.conf 7 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-agent/vars/zabbix.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | sign_keys: 4 | "34": 5 | stretch: 6 | sign_key: A14FE591 7 | wheezy: 8 | sign_key: 79EA5ED4 9 | jessie: 10 | sign_key: 79EA5ED4 11 | trusty: 12 | sign_key: 79EA5ED4 13 | xenial: 14 | sign_key: E709712C 15 | "32": 16 | wheezy: 17 | sign_key: 79EA5ED4 18 | jessie: 19 | sign_key: 79EA5ED4 20 | trusty: 21 | sign_key: 79EA5ED4 22 | xenial: 23 | sign_key: E709712C 24 | "30": 25 | wheezy: 26 | sign_key: 79EA5ED4 27 | jessie: 28 | sign_key: 79EA5ED4 29 | trusty: 30 | sign_key: 79EA5ED4 31 | xenial: 32 | sign_key: E709712C 33 | "24": 34 | wheezy: 35 | sign_key: 79EA5ED4 36 | jessie: 37 | sign_key: 79EA5ED4 38 | precise: 39 | sign_key: 79EA5ED4 40 | trusty: 41 | sign_key: 79EA5ED4 42 | "22": 43 | squeeze: 44 | sign_key: 79EA5ED4 45 | jessie: 46 | sign_key: 79EA5ED4 47 | precise: 48 | sign_key: 79EA5ED4 49 | trusty: 50 | sign_key: 79EA5ED4 51 | lucid: 52 | sign_key: 79EA5ED4 53 | 54 | suse: 55 | "openSUSE Leap": 56 | "42": 57 | name: server:monitoring 58 | url: http://download.opensuse.org/repositories/server:/monitoring/openSUSE_Leap_{{ ansible_distribution_version }}/ 59 | "openSUSE": 60 | "12": 61 | name: server_monitoring 62 | url: http://download.opensuse.org/repositories/server:/monitoring/openSUSE_{{ ansible_distribution_version }} 63 | "SLES": 64 | "11": 65 | name: server_monitoring 66 | url: http://download.opensuse.org/repositories/server:/monitoring/SLE_11_SP3/ 67 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/.gitignore: -------------------------------------------------------------------------------- 1 | .kitchen/ 2 | .idea/ 3 | .bundle/ 4 | vendor/ 5 | *.retry 6 | .molecule 7 | .cache 8 | __pycache__ 9 | .env 10 | .virtualenv 11 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sudo: required 3 | language: python 4 | services: 5 | - docker 6 | 7 | install: 8 | - pip install molecule ansible docker 9 | 10 | script: 11 | - molecule --version 12 | - ansible --version 13 | - molecule test 14 | notifications: 15 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | roles_path = ../:../../ 3 | hostfile = inventory 4 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for wdijkerman.zabbix 3 | 4 | - name: zabbix-server restarted 5 | service: name=zabbix-server state=restarted enabled=yes 6 | tags: zabbix-server 7 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/inventory: -------------------------------------------------------------------------------- 1 | [postgresql] 2 | zabbix-server-pgsql-centos ansible_connection=docker 3 | zabbix-server-pgsql-debian ansible_connection=docker 4 | zabbix-server-pgsql-ubuntu ansible_connection=docker 5 | 6 | [mysql] 7 | zabbix-server-mysql-centos ansible_connection=docker 8 | zabbix-server-mysql-debian ansible_connection=docker 9 | zabbix-server-mysql-ubuntu ansible_connection=docker 10 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Werner Dijkerman 4 | description: Installing and maintaining zabbix-server for RedHat/Debian/Ubuntu. 5 | company: myCompany.Dotcom 6 | license: license (GPLv3) 7 | min_ansible_version: 1.9 8 | platforms: 9 | - name: EL 10 | versions: 11 | - 6 12 | - 7 13 | - name: Ubuntu 14 | versions: 15 | - lucid 16 | - precise 17 | - trusty 18 | - name: Debian 19 | versions: 20 | - squeeze 21 | - wheezy 22 | categories: 23 | - monitoring 24 | dependencies: [] 25 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | pre_tasks: 4 | - name: "Installing which" 5 | yum: 6 | name: "{{ item }}" 7 | state: installed 8 | with_items: 9 | - net-tools 10 | - which 11 | - libselinux-python 12 | register: installation_dependencies 13 | when: ansible_distribution == 'CentOS' 14 | 15 | - name: "Installing which on NON-CentOS" 16 | apt: 17 | name: "{{ item }}" 18 | state: installed 19 | with_items: 20 | - net-tools 21 | register: installation_dependencies 22 | when: ansible_distribution != 'CentOS' 23 | 24 | - name: "Configure SUDO." 25 | lineinfile: 26 | dest: /etc/sudoers 27 | line: "Defaults !requiretty" 28 | state: present 29 | changed_when: installation_dependencies.changed 30 | 31 | - name: "Make sure the docs are installed." 32 | lineinfile: 33 | dest: /etc/yum.conf 34 | line: "tsflags=nodocs" 35 | state: absent 36 | changed_when: installation_dependencies.changed 37 | 38 | roles: 39 | - role: geerlingguy.postgresql 40 | when: inventory_hostname in groups['postgresql'] 41 | ignore_errors: True 42 | - role: geerlingguy.mysql 43 | when: inventory_hostname in groups['mysql'] 44 | ignore_errors: True 45 | - role: ansible-zabbix-server 46 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - src: geerlingguy.apache 3 | - src: geerlingguy.mysql 4 | - src: geerlingguy.postgresql 5 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 160 3 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for wdijkerman.zabbix 3 | - name: "Include OS-specific variables" 4 | include_vars: "{{ ansible_os_family }}.yml" 5 | tags: 6 | - zabbix-server 7 | - name: "Install the correct repository" 8 | include: "RedHat.yml" 9 | when: 10 | - ansible_os_family == "RedHat" 11 | tags: 12 | - zabbix-server 13 | - name: "Installing the mysql database" 14 | include: "mysql.yml" 15 | when: 16 | - database_type_long == "mysql" 17 | tags: 18 | - zabbix-server 19 | - name: "Configure zabbix-server" 20 | template: 21 | src: zabbix_server.conf.j2 22 | dest: /etc/zabbix/zabbix_server.conf 23 | owner: zabbix 24 | group: zabbix 25 | mode: 0644 26 | notify: 27 | - zabbix-server restarted 28 | tags: 29 | - zabbix-server 30 | - init 31 | - config 32 | 33 | 34 | - name: "Create include dir zabbix-server" 35 | file: 36 | path: "{{ zabbix_server_include }}" 37 | owner: zabbix 38 | group: zabbix 39 | state: directory 40 | mode: 0755 41 | tags: 42 | - zabbix-server 43 | - init 44 | - config 45 | 46 | - name: Trigger handlers immediately in case Jenkins was installed 47 | meta: flush_handlers 48 | tags: 49 | - zabbix-server 50 | 51 | - name: "Zabbix-server started" 52 | systemd: 53 | name: zabbix-server 54 | state: started 55 | enabled: yes 56 | tags: 57 | - zabbix-server 58 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/templates/zabbix-server.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Zabbix Server 3 | After=syslog.target 4 | After=network.target 5 | 6 | [Service] 7 | Environment="CONFFILE=/etc/zabbix/zabbix_server.conf" 8 | EnvironmentFile=-/etc/sysconfig/zabbix-server 9 | Type=forking 10 | Restart=on-failure 11 | PIDFile={{zabbix_server_pidfile}} 12 | KillMode=control-group 13 | ExecStart=/usr/sbin/zabbix_server -c $CONFFILE 14 | ExecStop=/bin/kill -SIGTERM $MAINPID 15 | RestartSec=10s 16 | TimeoutSec=0 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/tests/inventory: -------------------------------------------------------------------------------- 1 | [postgresql] 2 | zabbix-server-pgsql-centos ansible_connection=docker 3 | 4 | [mysql] 5 | zabbix-server-mysql-centos ansible_connection=docker 6 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - ansible-zabbix-agent -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/vars/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apache_user: www-data 3 | apache_group: www-data 4 | apache_log: apache2 5 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/vars/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apache_user: apache 3 | apache_group: apache 4 | apache_log: httpd 5 | -------------------------------------------------------------------------------- /roles/ansible-zabbix-server/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for dj-wasabi.zabbix 3 | -------------------------------------------------------------------------------- /roles/common/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for common 3 | -------------------------------------------------------------------------------- /roles/common/files/hosts: -------------------------------------------------------------------------------- 1 | 127.0.0.1 localhost 2 | 255.255.255.255 broadcasthost 3 | ::1 localhost 4 | 5 | 6 | 7 | -------------------------------------------------------------------------------- /roles/common/files/sources16.list: -------------------------------------------------------------------------------- 1 | deb http://mirrors.aliyun.com/ubuntu/ xenial main restricted universe multiverse 2 | deb http://mirrors.aliyun.com/ubuntu/ xenial-security main restricted universe multiverse 3 | deb http://mirrors.aliyun.com/ubuntu/ xenial-updates main restricted universe multiverse 4 | deb http://mirrors.aliyun.com/ubuntu/ xenial-backports main restricted universe multiverse 5 | ##测试版源 6 | deb http://mirrors.aliyun.com/ubuntu/ xenial-proposed main restricted universe multiverse 7 | # 源码 8 | deb-src http://mirrors.aliyun.com/ubuntu/ xenial main restricted universe multiverse 9 | deb-src http://mirrors.aliyun.com/ubuntu/ xenial-security main restricted universe multiverse 10 | deb-src http://mirrors.aliyun.com/ubuntu/ xenial-updates main restricted universe multiverse 11 | deb-src http://mirrors.aliyun.com/ubuntu/ xenial-backports main restricted universe multiverse 12 | ##测试版源 13 | deb-src http://mirrors.aliyun.com/ubuntu/ xenial-proposed main restricted universe multiverse 14 | # Canonical 合作伙伴和附加 15 | #deb http://archive.canonical.com/ubuntu/ xenial partner 16 | #deb http://extras.ubuntu.com/ubuntu/ xenial main 17 | -------------------------------------------------------------------------------- /roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for common 3 | - name: add host to zabbix-server 4 | zabbix_host: 5 | server_url: http://{{zabbix_front_ip}}:{{zabbix_front_port}} 6 | login_user: Admin 7 | login_password: zabbix 8 | host_name: "{{innerip}}" 9 | visible_name: "{{innerip}}" 10 | host_groups: 11 | - Linux servers 12 | link_templates: 13 | - Template OS Linux 14 | - Template ICMP Ping 15 | status: enabled 16 | state: present 17 | force: false 18 | inventory_mode: automatic 19 | interfaces: 20 | - type: 1 21 | main: 1 22 | useip: 1 23 | ip: "{{innerip}}" 24 | dns: "" 25 | port: 10050 26 | -------------------------------------------------------------------------------- /roles/common/tasks/centos.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: conf yum.conf 3 | template: src="yum.conf" dest="/etc" owner="root" group="root" mode="0755" 4 | tags: 5 | - common 6 | 7 | - name: use ali source 8 | shell: "wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo" 9 | tags: 10 | - common 11 | 12 | - name: yum install 13 | yum: 14 | name: "{{item}}" 15 | update_cache: true 16 | with_items: 17 | - epel-release 18 | tags: 19 | - common 20 | 21 | - name: yum clean all and makecache 22 | command: "{{item}}" 23 | with_items: 24 | - yum clean all 25 | - yum update -y 26 | # - yum makecache 27 | args: 28 | warn: no 29 | tags: 30 | - common 31 | 32 | - name: yum install 33 | yum: 34 | name: "{{item}}" 35 | update_cache: true 36 | with_items: 37 | - wget 38 | - telnet 39 | - unzip 40 | - python-pip 41 | tags: 42 | - common 43 | 44 | - name: pip install 45 | pip: 46 | name: "{{item}}" 47 | with_items: 48 | - oss2 49 | - passlib ## for http base auth 50 | - zabbix-api ## for zabbix api 51 | tags: 52 | - common 53 | -------------------------------------------------------------------------------- /roles/common/tasks/ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: copy sources.list 3 | copy: src="sources{{ansible_distribution_major_version}}.list" dest="/etc/apt/" force=yes 4 | tags: 5 | - common 6 | 7 | 8 | - apt: update_cache=yes cache_valid_time=72000 9 | ignore_errors: yes 10 | tags: 11 | - common 12 | 13 | - name: install softwares 14 | apt: name={{item}} state=present allow_unauthenticated=yes 15 | with_items: 16 | - wget 17 | - unzip 18 | - openssh-server 19 | tags: 20 | - common 21 | 22 | # - name: disable ufw 23 | # ufw: state='disabled' 24 | -------------------------------------------------------------------------------- /roles/common/templates/yum.conf: -------------------------------------------------------------------------------- 1 | [main] 2 | cachedir=/var/cache/yum/$basearch/$releasever 3 | keepcache=1 4 | debuglevel=2 5 | logfile=/var/log/yum.log 6 | exactarch=1 7 | obsoletes=1 8 | gpgcheck=1 9 | plugins=1 10 | installonly_limit=5 11 | bugtracker_url=http://bugs.centos.org/set_project.php?project_id=16&ref=http://bugs.centos.org/bug_report_page.php?category=yum 12 | distroverpkg=centos-release -------------------------------------------------------------------------------- /roles/common/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for common 3 | -------------------------------------------------------------------------------- /roles/hadoop/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | language: python 3 | python: "2.7" 4 | 5 | # Use the new container infrastructure 6 | become: false 7 | 8 | # Install ansible 9 | addons: 10 | apt: 11 | packages: 12 | - python-pip 13 | 14 | install: 15 | # Install ansible 16 | - pip install ansible 17 | 18 | # Check ansible version 19 | - ansible --version 20 | 21 | # Create ansible.cfg with correct roles_path 22 | - printf '[defaults]\nroles_path=../' >ansible.cfg 23 | 24 | script: 25 | # Basic role syntax check 26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check 27 | 28 | notifications: 29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ -------------------------------------------------------------------------------- /roles/hadoop/README.md: -------------------------------------------------------------------------------- 1 | Hadoop hdfs 2 | ========= 3 | 注意,所有的人机器的hostname必须不一样!!! 4 | 注意,所有的人机器的hostname必须不一样!!! 5 | 注意,所有的人机器的hostname必须不一样!!! 6 | 注意,所有的人机器的hostname必须不一样!!! 7 | 注意,所有的人机器的hostname必须不一样!!! 8 | -------------------------------------------------------------------------------- /roles/hadoop/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for hadoop2 3 | hadoop_full_version: 2.6.5 4 | hadoop_namenode_hostname: hadoopnamenode 5 | hadoop_tar_name: "hadoop-{{hadoop_full_version}}.tar.gz" 6 | hadoopnamenode_port: 8020 7 | hadoop_download_url: "http://mirrors.hust.edu.cn/apache/hadoop/common/hadoop-{{hadoop_full_version}}/{{hadoop_tar_name}}" 8 | hadoop_user: hadoop 9 | hadoop_group: hadoop 10 | hadoop_user_home: "/home/{{hadoop_user}}" 11 | hadoop_home: "{{hadoop_user_home}}/hadoop" 12 | hadoop_log_directory: "{{hadoop_home}}/log" 13 | -------------------------------------------------------------------------------- /roles/hadoop/files/authorized_keys: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key 2 | -------------------------------------------------------------------------------- /roles/hadoop/files/id_rsa.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key 2 | -------------------------------------------------------------------------------- /roles/hadoop/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart hadhoop datanode service 3 | service: 4 | name: hadoop-datanode 5 | state: restarted 6 | tags: 7 | hadoop 8 | 9 | - name: restart yarn-resourcemanager 10 | action: shell {{ hadoop_home }}/sbin/yarn-daemon.sh stop resourcemanager; {{ hadoop_home }}/sbin/yarn-daemon.sh start resourcemanager 11 | when: hadoop_is_namenode is defined and hadoop_is_namenode == "true" 12 | 13 | - name: restart hadhoop namenode service 14 | service: 15 | name: hadoop-namenode 16 | state: restarted 17 | tags: 18 | hadoop 19 | 20 | - name: restart yarn-nodemanager 21 | action: shell {{ hadoop_home }}/sbin/yarn-daemon.sh stop nodemanager; {{ hadoop_home }}/sbin/yarn-daemon.sh start nodemanager 22 | when: hadoop_is_datanode is defined and hadoop_is_datanode == "true" 23 | 24 | - name: refreshDFSNodes 25 | command: "{{ hadoop_home }}/bin/hdfs dfsadmin -refreshNodes" 26 | when: hadoop_is_namenode is defined and hadoop_is_namenode == "true" 27 | ignore_errors: yes 28 | 29 | - name: refreshYarnNodes 30 | command: "{{ hadoop_home }}/bin/yarn rmadmin -refreshNodes" 31 | when: hadoop_is_namenode is defined and hadoop_is_namenode == "true" 32 | ignore_errors: yes 33 | -------------------------------------------------------------------------------- /roles/hadoop/templates/etc/hadoop/configuration.xsl: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 |
namevaluedescription
37 | 38 | 39 |
40 |
41 | -------------------------------------------------------------------------------- /roles/hadoop/templates/etc/hadoop/container-executor.cfg: -------------------------------------------------------------------------------- 1 | yarn.nodemanager.linux-container-executor.group=#configured value of yarn.nodemanager.linux-container-executor.group 2 | banned.users=#comma separated list of users who can not run applications 3 | min.user.id=1000#Prevent other super-users 4 | allowed.system.users=##comma separated list of system users who CAN run applications 5 | -------------------------------------------------------------------------------- /roles/hadoop/templates/etc/hadoop/core-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | 21 | fs.defaultFS 22 | hdfs://{{hadoop_namenode_ip}}:{{hadoopnamenode_port}} 23 | 24 | 25 | io.file.buffer.size 26 | 131072 27 | 28 | 29 | hadoop.tmp.dir 30 | file://{{hadoop_home}}/tmp 31 | Abase for other temporary directories. 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /roles/hadoop/templates/etc/hadoop/httpfs-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set httpfs specific environment variables here. 17 | 18 | # Settings for the Embedded Tomcat that runs HttpFS 19 | # Java System properties for HttpFS should be specified in this variable 20 | # 21 | # export CATALINA_OPTS= 22 | 23 | # HttpFS logs directory 24 | # 25 | # export HTTPFS_LOG=${HTTPFS_HOME}/logs 26 | 27 | # HttpFS temporary directory 28 | # 29 | # export HTTPFS_TEMP=${HTTPFS_HOME}/temp 30 | 31 | # The HTTP port used by HttpFS 32 | # 33 | # export HTTPFS_HTTP_PORT=14000 34 | 35 | # The Admin port used by HttpFS 36 | # 37 | # export HTTPFS_ADMIN_PORT=`expr ${HTTPFS_HTTP_PORT} + 1` 38 | 39 | # The hostname HttpFS server runs on 40 | # 41 | # export HTTPFS_HTTP_HOSTNAME=`hostname -f` 42 | 43 | # Indicates if HttpFS is using SSL 44 | # 45 | # export HTTPFS_SSL_ENABLED=false 46 | 47 | # The location of the SSL keystore if using SSL 48 | # 49 | # export HTTPFS_SSL_KEYSTORE_FILE=${HOME}/.keystore 50 | 51 | # The password of the SSL keystore if using SSL 52 | # 53 | # export HTTPFS_SSL_KEYSTORE_PASS=password 54 | -------------------------------------------------------------------------------- /roles/hadoop/templates/etc/hadoop/httpfs-signature.secret: -------------------------------------------------------------------------------- 1 | hadoop httpfs secret 2 | -------------------------------------------------------------------------------- /roles/hadoop/templates/etc/hadoop/httpfs-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /roles/hadoop/templates/etc/hadoop/kms-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. See accompanying LICENSE file. 14 | # 15 | 16 | # Set kms specific environment variables here. 17 | 18 | # Settings for the Embedded Tomcat that runs KMS 19 | # Java System properties for KMS should be specified in this variable 20 | # 21 | # export CATALINA_OPTS= 22 | 23 | # KMS logs directory 24 | # 25 | # export KMS_LOG=${KMS_HOME}/logs 26 | 27 | # KMS temporary directory 28 | # 29 | # export KMS_TEMP=${KMS_HOME}/temp 30 | 31 | # The HTTP port used by KMS 32 | # 33 | # export KMS_HTTP_PORT=16000 34 | 35 | # The Admin port used by KMS 36 | # 37 | # export KMS_ADMIN_PORT=`expr ${KMS_HTTP_PORT} + 1` 38 | 39 | # The maximum number of Tomcat handler threads 40 | # 41 | # export KMS_MAX_THREADS=1000 42 | 43 | # The location of the SSL keystore if using SSL 44 | # 45 | # export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore 46 | 47 | # The password of the SSL keystore if using SSL 48 | # 49 | # export KMS_SSL_KEYSTORE_PASS=password 50 | -------------------------------------------------------------------------------- /roles/hadoop/templates/etc/hadoop/mapred-env.cmd: -------------------------------------------------------------------------------- 1 | @echo off 2 | @rem Licensed to the Apache Software Foundation (ASF) under one or more 3 | @rem contributor license agreements. See the NOTICE file distributed with 4 | @rem this work for additional information regarding copyright ownership. 5 | @rem The ASF licenses this file to You under the Apache License, Version 2.0 6 | @rem (the "License"); you may not use this file except in compliance with 7 | @rem the License. You may obtain a copy of the License at 8 | @rem 9 | @rem http://www.apache.org/licenses/LICENSE-2.0 10 | @rem 11 | @rem Unless required by applicable law or agreed to in writing, software 12 | @rem distributed under the License is distributed on an "AS IS" BASIS, 13 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | @rem See the License for the specific language governing permissions and 15 | @rem limitations under the License. 16 | 17 | set HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 18 | 19 | set HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA 20 | 21 | -------------------------------------------------------------------------------- /roles/hadoop/templates/etc/hadoop/mapred-env.sh: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # export JAVA_HOME=/home/y/libexec/jdk1.6.0/ 17 | 18 | export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=1000 19 | 20 | export HADOOP_MAPRED_ROOT_LOGGER=INFO,RFA 21 | 22 | #export HADOOP_JOB_HISTORYSERVER_OPTS= 23 | #export HADOOP_MAPRED_LOG_DIR="" # Where log files are stored. $HADOOP_MAPRED_HOME/logs by default. 24 | #export HADOOP_JHS_LOGGER=INFO,RFA # Hadoop JobSummary logger. 25 | #export HADOOP_MAPRED_PID_DIR= # The pid files are stored. /tmp by default. 26 | #export HADOOP_MAPRED_IDENT_STRING= #A string representing this instance of hadoop. $USER by default 27 | #export HADOOP_MAPRED_NICENESS= #The scheduling priority for daemons. Defaults to 0. 28 | -------------------------------------------------------------------------------- /roles/hadoop/templates/etc/hadoop/mapred-site.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 16 | 17 | 18 | 19 | 20 | 21 |         mapreduce.framework.name 22 | yarn 23 | 24 | 25 | mapreduce.jobhistory.address 26 | {{hadoop_namenode_hostname}}:10020 27 | 28 | 29 | mapreduce.jobhistory.webapp.address 30 | {{hadoop_namenode_hostname}}:19888 31 | 32 | 33 | -------------------------------------------------------------------------------- /roles/hadoop/templates/etc/hadoop/slaves: -------------------------------------------------------------------------------- 1 | {{hadoopdatanode_hostnames | map(attribute='hostname') | join('\n')}} 2 | -------------------------------------------------------------------------------- /roles/hadoop/templates/hadoop-datanode.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=dfs 3 | 4 | [Service] 5 | Type=forking 6 | User={{hadoop_user}} 7 | Group={{hadoop_group}} 8 | Restart=on-failure 9 | Environment=JAVA_HOME={{JAVA_HOME}} 10 | Environment=HADOOP_HOME={{ hadoop_home }} 11 | WorkingDirectory={{hadoop_home}} 12 | ExecStop="/bin/sh {{ hadoop_home }}/sbin/hadoop-daemon.sh --script hdfs start datanode" 13 | ExecStart="/bin/sh {{ hadoop_home }}/sbin/hadoop-daemon.sh --script hdfs stop datanode" 14 | PIDFile=/tmp/hadoop-datanode.pid 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /roles/hadoop/templates/hadoop-namenode.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=dfs 3 | 4 | [Service] 5 | Type=simple 6 | User={{hadoop_user}} 7 | Group={{hadoop_group}} 8 | Restart=on-failure 9 | Environment=JAVA_HOME={{JAVA_HOME}} 10 | Environment=HADOOP_HOME={{ hadoop_home }} 11 | WorkingDirectory={{hadoop_home}} 12 | ExecStop="/bin/sh {{ hadoop_home }}/sbin/hadoop-daemon.sh --script hdfs stop namenode" 13 | ExecStart="/bin/sh {{ hadoop_home }}/sbin/hadoop-daemon.sh --script hdfs start namenode" 14 | PIDFile=/tmp/hadoop-dfs.pid 15 | TimeoutStartSec=2min 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /roles/hadoop/templates/ssh.conf: -------------------------------------------------------------------------------- 1 | Host * 2 | StrictHostKeyChecking no 3 | -------------------------------------------------------------------------------- /roles/hadoop/templates/yarn-nodemanager.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=yarn-nodemanager 3 | 4 | [Service] 5 | Type=simple 6 | User={{hadoop_user}} 7 | Group={{hadoop_group}} 8 | Restart=on-failure 9 | WorkingDirectory={{hadoop_home}} 10 | ExecStart="/bin/sh shell {{ hadoop_home }}/sbin/yarn-daemon.sh start nodemanager" 11 | ExecStop="/bin/sh shell {{ hadoop_home }}/sbin/yarn-daemon.sh stop nodemanager" 12 | PIDFile=/tmp/yarn-nodemanager.pid 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /roles/hadoop/templates/yarn-resourcemanager.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=yarn-resourcemanager 3 | 4 | [Service] 5 | Type=simple 6 | User={{hadoop_user}} 7 | Group={{hadoop_group}} 8 | Restart=on-failure 9 | WorkingDirectory={{hadoop_home}} 10 | ExecStart="/bin/sh shell {{ hadoop_home }}/sbin/yarn-daemon.sh start resourcemanager" 11 | ExecStop="/bin/sh shell {{ hadoop_home }}/sbin/yarn-daemon.sh stop resourcemanager" 12 | PIDFile=/tmp/yarn-resourcemanager.pid 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /roles/hadoop/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost -------------------------------------------------------------------------------- /roles/hadoop/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - spark2 -------------------------------------------------------------------------------- /roles/hadoop/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for spark2 3 | -------------------------------------------------------------------------------- /roles/hbase/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | language: python 3 | python: "2.7" 4 | 5 | # Use the new container infrastructure 6 | become: false 7 | 8 | # Install ansible 9 | addons: 10 | apt: 11 | packages: 12 | - python-pip 13 | 14 | install: 15 | # Install ansible 16 | - pip install ansible 17 | 18 | # Check ansible version 19 | - ansible --version 20 | 21 | # Create ansible.cfg with correct roles_path 22 | - printf '[defaults]\nroles_path=../' >ansible.cfg 23 | 24 | script: 25 | # Basic role syntax check 26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check 27 | 28 | notifications: 29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ -------------------------------------------------------------------------------- /roles/hbase/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/hbase/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for hbase 3 | hbase_version: 1.2.6 4 | hbase_tar_name: "hbase-{{hbase_version}}-bin.tar.gz" 5 | hbase_download_url: "http://apache.stu.edu.tw/hbase/{{hbase_version}}/{{hbase_tar_name}}" 6 | hbase_user: hbase 7 | hbase_group: hbase 8 | hbase_user_home: "/home/{{hbase_user}}" 9 | hbase_home: "{{hbase_user_home}}/hbase" 10 | hbase_log_directory: "{{hbase_home}}/logs" 11 | hbase_tmp_dir: "{{hbase_user_home}}/tmp" 12 | hbase_local_dir: "{{hbase_user_home}}/local_storage" 13 | 14 | hadoop_native_version: 2.6.0 15 | hadoop_native_lib: "hadoop-native-64-{{hadoop_native_version}}.tar" 16 | hadoop_native_lib_url: "http://dl.bintray.com/sequenceiq/sequenceiq-bin/hadoop-native-64-2.6.0.tar" 17 | -------------------------------------------------------------------------------- /roles/hbase/files/authorized_keys: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key 2 | -------------------------------------------------------------------------------- /roles/hbase/files/hadoop-native-64-2.6.0.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zacker330/devops-platform/54ee0103cea6169db290c1c1af475ade98e3cdf0/roles/hbase/files/hadoop-native-64-2.6.0.tar -------------------------------------------------------------------------------- /roles/hbase/files/id_rsa.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key 2 | -------------------------------------------------------------------------------- /roles/hbase/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for hbase 3 | - name: Restart Hbase 4 | service: name=hbase state=restarted 5 | tags: 6 | - hbase 7 | -------------------------------------------------------------------------------- /roles/hbase/templates/conf/backup-masters: -------------------------------------------------------------------------------- 1 | {{hbase_backup_masters}} 2 | -------------------------------------------------------------------------------- /roles/hbase/templates/conf/regionservers: -------------------------------------------------------------------------------- 1 | {{hbase_regionservers | map(attribute='ip') | join('\n')}} 2 | -------------------------------------------------------------------------------- /roles/hbase/templates/hbase-master.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Hbase 3 | 4 | [Service] 5 | Type=simple 6 | User={{hbase_user}} 7 | Group={{hbase_group}} 8 | Restart=on-failure 9 | Environment=JAVA_HOME={{JAVA_HOME}} 10 | Environment=HBASE_HOME={{hbase_home}} 11 | Environment=HBASE_CONF_DIR={{hbase_home}}/conf 12 | WorkingDirectory={{hbase_home}} 13 | ExecStart={{hbase_home}}/bin/hbase-daemon.sh start master 14 | ExecStop={{hbase_home}}/bin/hbase-daemon.sh stop master 15 | PIDFile=/tmp/hbase-hbase-master.pid 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /roles/hbase/templates/hbase-regionserver.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Hbase 3 | 4 | [Service] 5 | Type=simple 6 | User={{hbase_user}} 7 | Group={{hbase_group}} 8 | Restart=on-failure 9 | Environment=JAVA_HOME={{JAVA_HOME}} 10 | Environment=HBASE_HOME={{hbase_home}} 11 | Environment=HBASE_CONF_DIR={{hbase_home}}/conf 12 | WorkingDirectory={{hbase_home}} 13 | ExecStart={{hbase_home}}/bin/hbase-daemon.sh start regionserver 14 | ExecStop={{hbase_home}}/bin/hbase-daemon.sh stop regionserver 15 | PIDFile=/tmp/hbase-hbase-master.pid 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /roles/hbase/templates/ssh.conf: -------------------------------------------------------------------------------- 1 | Host * 2 | StrictHostKeyChecking no 3 | -------------------------------------------------------------------------------- /roles/hbase/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost -------------------------------------------------------------------------------- /roles/hbase/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - hbase -------------------------------------------------------------------------------- /roles/hbase/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for hbase 3 | -------------------------------------------------------------------------------- /roles/jdk8/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/jdk8/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for jdk8 3 | jdk8_path: jdk-8u92-linux-x64.tar.gz 4 | jdk8_version_name: jdk1.8.0_92 5 | JAVA_HOME: /usr/lib/jvm/java 6 | JAVA_HOME_alias: 7 | - /usr/local/java 8 | -------------------------------------------------------------------------------- /roles/jdk8/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for jdk8 3 | -------------------------------------------------------------------------------- /roles/jdk8/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - stat: path="/usr/lib/jvm/{{jdk8_version_name}}/bin/java" 3 | register: jvm 4 | tags: 5 | - jdk 6 | 7 | - name: copy jdk 8 | copy: src="{{jdk8_path}}" dest="/tmp" 9 | tags: 10 | - jdk 11 | 12 | - name: mkdir for Java 13 | file: path="/usr/lib/jvm/" state=directory mode="u=rwx,go=rx" 14 | tags: 15 | - jdk 16 | 17 | - name: install JDK 18 | unarchive: src="/tmp/{{jdk8_path}}" dest="/usr/lib/jvm/" mode="go-w" remote_src=yes 19 | tags: 20 | - jdk 21 | 22 | - name: "{{JAVA_HOME}} -> /usr/lib/jvm/{{jdk8_version_name}}" 23 | file: src="/usr/lib/jvm/{{jdk8_version_name}}" dest="{{JAVA_HOME}}" state=link 24 | tags: 25 | - jdk 26 | 27 | - name: set JAVA_HOME 28 | lineinfile: dest='/etc/profile' line='export JAVA_HOME={{JAVA_HOME}}' state=present 29 | tags: 30 | - jdk 31 | 32 | - file: src="/usr/lib/jvm/{{jdk8_version_name}}" dest="{{item}}" state=link 33 | with_items: "{{JAVA_HOME_alias}}" 34 | tags: 35 | - jdk 36 | 37 | 38 | 39 | - lineinfile: dest='/etc/profile' line='export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar' state=present 40 | tags: 41 | - jdk 42 | 43 | - lineinfile: dest='/etc/profile' line='export PATH="$PATH:$JAVA_HOME/bin"' state=present 44 | tags: 45 | - jdk 46 | -------------------------------------------------------------------------------- /roles/jdk8/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for jdk8 3 | -------------------------------------------------------------------------------- /roles/jenkins-agent/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | language: python 3 | python: "2.7" 4 | 5 | # Use the new container infrastructure 6 | sudo: false 7 | 8 | # Install ansible 9 | addons: 10 | apt: 11 | packages: 12 | - python-pip 13 | 14 | install: 15 | # Install ansible 16 | - pip install ansible 17 | 18 | # Check ansible version 19 | - ansible --version 20 | 21 | # Create ansible.cfg with correct roles_path 22 | - printf '[defaults]\nroles_path=../' >ansible.cfg 23 | 24 | script: 25 | # Basic role syntax check 26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check 27 | 28 | notifications: 29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ -------------------------------------------------------------------------------- /roles/jenkins-agent/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/jenkins-agent/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for jenkins-agent 3 | -------------------------------------------------------------------------------- /roles/jenkins-agent/templates/swarm-client.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description={{ jenkins_swarm_task_name }} 3 | After=network.target 4 | After=docker.service 5 | 6 | [Service] 7 | Type=simple 8 | PIDFile=/var/run/{{ jenkins_swarm_service_name }}.pid 9 | ExecStartPre=/bin/rm -f /var/run/{{ jenkins_swarm_service_name }}.pid 10 | ExecStart=/usr/bin/java \ 11 | {% if jenkins_agent_jvm_args is defined %} 12 | {{ jenkins_agent_jvm_args|join(' ') }} \ 13 | {% endif %} 14 | {% if jenkins_agent_log_file is defined %} 15 | -logFile {{ jenkins_agent_log_file }} \ 16 | {% endif %} 17 | -jar {{ swarm_jar.dest }} \ 18 | -master 'http://{{ jenkins_agent_master }}:{{ jenkins_agent_master_port }}' \ 19 | {% if jenkins_agent_username is defined %} 20 | -username '{{ jenkins_agent_username }}' \ 21 | {% endif %} 22 | {% if jenkins_agent_password is defined %} 23 | -password '{{ jenkins_agent_password }}' \ 24 | {% endif %} 25 | -name '{{ jenkins_agent_name }}' \ 26 | {% if jenkins_agent_labels_file is defined %} 27 | -labelsFile '{{ jenkins_agent_labels_file }}' \ 28 | {% else %} 29 | -labels '{{ jenkins_agent_labels }}' \ 30 | {% endif %} 31 | {% if jenkins_agent_additional_args is defined %} 32 | {% for arg in jenkins_agent_additional_args %} 33 | -{{ arg }} \ 34 | {% endfor %} 35 | {% endif %} 36 | -executors '{{ jenkins_agent_num_executors }}' 37 | Restart=on-abort 38 | 39 | [Install] 40 | WantedBy=default.target 41 | -------------------------------------------------------------------------------- /roles/jenkins-agent/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost -------------------------------------------------------------------------------- /roles/jenkins-agent/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - jenkins-agent -------------------------------------------------------------------------------- /roles/jenkins-agent/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for jenkins-agent 3 | -------------------------------------------------------------------------------- /roles/jenkins-openresty-conf/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | language: python 3 | python: "2.7" 4 | 5 | # Use the new container infrastructure 6 | sudo: false 7 | 8 | # Install ansible 9 | addons: 10 | apt: 11 | packages: 12 | - python-pip 13 | 14 | install: 15 | # Install ansible 16 | - pip install ansible 17 | 18 | # Check ansible version 19 | - ansible --version 20 | 21 | # Create ansible.cfg with correct roles_path 22 | - printf '[defaults]\nroles_path=../' >ansible.cfg 23 | 24 | script: 25 | # Basic role syntax check 26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check 27 | 28 | notifications: 29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ -------------------------------------------------------------------------------- /roles/jenkins-openresty-conf/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/jenkins-openresty-conf/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for php-openresty 3 | jenkins_http_port: 8666 4 | openresty_jenkins_listen_port: 80 5 | -------------------------------------------------------------------------------- /roles/jenkins-openresty-conf/files/logserver.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDVDCCAjwCCQCDqUe898aVTTANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQGEwJD 3 | TjESMBAGA1UECAwJR3VhbmdEb25nMREwDwYDVQQHDAhTaGVuWmhlbjEOMAwGA1UE 4 | CgwFTWlkZWExDjAMBgNVBAsMBXNtYXJ0MRYwFAYDVQQDDA0xMjAuMjcuMTkzLjM4 5 | MB4XDTE3MDMyOTA3MTMxMFoXDTE4MDMyOTA3MTMxMFowbDELMAkGA1UEBhMCQ04x 6 | EjAQBgNVBAgMCUd1YW5nRG9uZzERMA8GA1UEBwwIU2hlblpoZW4xDjAMBgNVBAoM 7 | BU1pZGVhMQ4wDAYDVQQLDAVzbWFydDEWMBQGA1UEAwwNMTIwLjI3LjE5My4zODCC 8 | ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMEHwUshcS1Rd7hht140LSTu 9 | f6vCy/plMu/t31jlq5qLLlMI28/blgB0AFLvakfd5hZX/aCXVCoI92RvJ/NXX41y 10 | TfiRLCATtxJWw5/p9b19KBIfiSpTtcRV8OX++Xeg4CfCgDz/L1hh87h2iK/kLk5b 11 | f2yaRPR43aWbPxEraK4ywPDi+BQlJ8h1SLh7meeBHuidnUXamlfQW7qINTos3T7K 12 | fZ2EaSclnbWTU3nDA8SwDRAaEKXSBJPiMuOaoFat4/WzqCMlpV/j2YjX0yQc6bxY 13 | 9ysROfIr73OrIF1crpDMD29tkyCMpFroh+rCicVuLE3Zdlxf1qGeW31DYgO7SDsC 14 | AwEAATANBgkqhkiG9w0BAQUFAAOCAQEAa1piKj+r0oAN0j1i427AcEvf+NmrQzZL 15 | sNNdM2YTArB8cOLmlTfO5BqTkLaRP21jnXPJ6ar4vca+kqQysjiam/KSyHqR4ZvF 16 | f47QHsdLwBoqkTQ/NmppP5+ndsx2vg4ZgmW4Eky2DAsx4pM4mIGoXKt2XVCo6vq8 17 | Bkv9yFhLWSulWvEZYVD683CLUcuaPTe8FRL0iu7v3MEsEPIzxQdG7k6BK68nW961 18 | vJRmZRI8V9n/xOeBsx4stjsGshzMPAseBson+u3PN3Pb+hSXtV3q0fui8ROKHAxM 19 | AG70tH79Ttx6tLeoWeBzwGsNVByQkfebb4dPKNcmW3OjGOn0Npkf3g== 20 | -----END CERTIFICATE----- 21 | -------------------------------------------------------------------------------- /roles/jenkins-openresty-conf/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart nginx 3 | service: name="nginx" state="restarted" 4 | -------------------------------------------------------------------------------- /roles/jenkins-openresty-conf/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: template jenkins.conf 3 | template: 4 | src: jenkins.conf 5 | dest: "{{nginx_servers_conf_path}}/" 6 | owner: root 7 | group: root 8 | mode: "u=rxw,og=rx" 9 | force: true 10 | tags: 11 | - jenkins_openresty_conf 12 | notify: restart nginx 13 | -------------------------------------------------------------------------------- /roles/jenkins-openresty-conf/templates/jenkins.conf: -------------------------------------------------------------------------------- 1 | 2 | server { 3 | listen {{openresty_jenkins_listen_port}}; 4 | 5 | location /jenkins { 6 | proxy_pass http://{{jenkins_hostname}}:{{jenkins_http_port}}{{jenkins_url_prefix}}; 7 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 8 | proxy_set_header X-Forwarded-Proto $scheme; 9 | proxy_set_header X-Forwarded-Host $http_host; 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /roles/jenkins-openresty-conf/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost -------------------------------------------------------------------------------- /roles/jenkins-openresty-conf/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - php-openresty -------------------------------------------------------------------------------- /roles/jenkins-openresty-conf/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for php-openresty 3 | -------------------------------------------------------------------------------- /roles/jenkins/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart jenkins 3 | service: name=jenkins state=restarted 4 | 5 | - name: configure default users 6 | template: 7 | src: basic-security.groovy 8 | dest: "{{ jenkins_home }}/init.groovy.d/basic-security.groovy" 9 | register: jenkins_users_config 10 | -------------------------------------------------------------------------------- /roles/jenkins/templates/basic-security.groovy: -------------------------------------------------------------------------------- 1 | #!groovy 2 | import hudson.security.* 3 | import jenkins.model.* 4 | 5 | def instance = Jenkins.getInstance() 6 | 7 | println "--> Checking if security has been set already" 8 | 9 | if (!instance.isUseSecurity()) { 10 | println "--> creating local user 'admin'" 11 | 12 | def hudsonRealm = new HudsonPrivateSecurityRealm(false) 13 | hudsonRealm.createAccount('{{ jenkins_admin_username }}', '{{ jenkins_admin_password }}') 14 | instance.setSecurityRealm(hudsonRealm) 15 | 16 | def strategy = new FullControlOnceLoggedInAuthorizationStrategy() 17 | instance.setAuthorizationStrategy(strategy) 18 | instance.save() 19 | } 20 | -------------------------------------------------------------------------------- /roles/jenkins/templates/jenkins.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Jenkins 3 | After=network.target 4 | 5 | [Service] 6 | User={{jenkins_user}} 7 | Group={{jenkins_group}} 8 | Environment="JAVA_HOME={{JAVA_HOME}}" 9 | Environment="JAVA_OPTS=-XX:PermSize=1024M -XX:MaxPermSize=4048M -Xmn2048M -Xms1024M -Xmx4048M -Djenkins.install.runSetupWizard=false -Duser.timezone=Asia/Shanghai -DJENKINS_HOME={{ jenkins_home }} -Djava.io.tmpdir={{ jenkins_home_tmp }} " 10 | 11 | ExecStart={{JAVA_HOME}}/bin/java $JAVA_OPTS $TIMEZONE -jar {{ jenkins_home }}/jenkins.war --httpPort={{ jenkins_http_port }} --prefix={{ jenkins_url_prefix }} 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /roles/kafka-ansible-role/.gitignore: -------------------------------------------------------------------------------- 1 | **/kafka_*.tgz 2 | **/zookeeper-*.tar.gz 3 | **/jdk*.tar.gz 4 | **/scala-*.tgz 5 | provision/files/kafka-monitor/*.jar 6 | 7 | .gradle 8 | build/ 9 | .vagrant 10 | 11 | # Ignore Gradle GUI config 12 | gradle-app.setting 13 | 14 | # Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored) 15 | !gradle-wrapper.jar 16 | 17 | # Cache of project 18 | .gradletasknamecache 19 | 20 | # # Work around https://youtrack.jetbrains.com/issue/IDEA-116898 21 | # gradle/wrapper/gradle-wrapper.properties 22 | # User-specific stuff: 23 | .idea/workspace.xml 24 | .idea/tasks.xml 25 | .idea/dictionaries 26 | .idea/vcs.xml 27 | .idea/jsLibraryMappings.xml 28 | 29 | # Sensitive or high-churn files: 30 | .idea/dataSources.ids 31 | .idea/dataSources.xml 32 | .idea/dataSources.local.xml 33 | .idea/sqlDataSources.xml 34 | .idea/dynamic.xml 35 | .idea/uiDesigner.xml 36 | 37 | # Gradle: 38 | .idea/gradle.xml 39 | .idea/libraries 40 | 41 | # Mongo Explorer plugin: 42 | .idea/mongoSettings.xml 43 | 44 | ## File-based project format: 45 | *.iws 46 | 47 | # ansible 48 | *.retry 49 | 50 | ## Plugin-specific files: 51 | 52 | # IntelliJ 53 | /out/ 54 | 55 | # mpeltonen/sbt-idea plugin 56 | .idea_modules/ 57 | 58 | # JIRA plugin 59 | atlassian-ide-plugin.xml 60 | 61 | # Crashlytics plugin (for Android Studio and IntelliJ) 62 | com_crashlytics_export_strings.xml 63 | crashlytics.properties 64 | crashlytics-build.properties 65 | fabric.properties 66 | -------------------------------------------------------------------------------- /roles/kafka-ansible-role/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart kafka 4 | service: 5 | name: "{{kafka_service_name}}" 6 | state: restarted 7 | enabled: true 8 | tags: 9 | - kafka 10 | -------------------------------------------------------------------------------- /roles/kafka-ansible-role/tasks/configure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Needed to ensure some services start properly 3 | # - name: Set hostname 4 | # become: yes 5 | # lineinfile: 6 | # dest: /etc/hosts 7 | # line: "127.0.0.1 {{ ansible_hostname }}" 8 | # tags: 9 | # - kafka 10 | 11 | - name: Create log4j.properties 12 | template: 13 | dest: "{{kafka_conf_dir}}/log4j.properties" 14 | src: log4j.properties.j2 15 | owner: "{{ kafka_user }}" 16 | group: "{{ kafka_group }}" 17 | mode: "ug=rw,o=r" 18 | notify: 19 | - restart kafka 20 | tags: 21 | - kafka 22 | 23 | - name: Create server.properties 24 | template: 25 | dest: "{{ kafka_conf_dir }}/server{{kafka_id}}.properties" 26 | mode: "ug=rwx,o=rx" 27 | src: server.properties.j2 28 | owner: "{{ kafka_user }}" 29 | group: "{{ kafka_group }}" 30 | tags: 31 | - kafka 32 | 33 | - name: "Create the kafka systemd service file" 34 | template: 35 | src: "kafka.service" 36 | dest: "/etc/systemd/system/{{kafka_service_name}}.service" 37 | owner: "{{ kafka_user }}" 38 | group: "{{ kafka_group }}" 39 | mode: "ug=rwx,o=rx" 40 | force: yes 41 | notify: 42 | - restart kafka 43 | tags: 44 | - kafka 45 | 46 | - name: reload systemctl daemon 47 | command: systemctl daemon-reload 48 | tags: 49 | - kafka 50 | 51 | - name: "start and enable {{kafka_service_name}}" 52 | service: 53 | name: "{{kafka_service_name}}" 54 | state: started 55 | enabled: true 56 | tags: 57 | - kafka 58 | - restarted_kafka 59 | 60 | - name: Wait for Kafka port 61 | wait_for: 62 | host: "{{kafka_bind_ip}}" 63 | port: "{{ kafka_brokerport }}" 64 | state: started 65 | timeout: 120 66 | tags: 67 | - kafka 68 | -------------------------------------------------------------------------------- /roles/kafka-ansible-role/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # - name: "create group if not exists" 3 | # group: 4 | # name: "{{kafka_group}}" 5 | # state: present 6 | # tags: 7 | # - kafka 8 | # 9 | # - name: "check user is exists" 10 | # shell: "id {{kafka_user}} >& /dev/null" 11 | # register: kafka_user_exist 12 | # ignore_errors: true 13 | # tags: 14 | # - kafka 15 | # 16 | # - name: "create user if not exists" 17 | # user: 18 | # name: "{{kafka_user}}" 19 | # group: "{{kafka_group}}" 20 | # when: kafka_user_exist != 0 21 | # tags: 22 | # - kafka 23 | 24 | - name: Install kafka 25 | include: build.yml 26 | tags: 27 | - kafka 28 | 29 | - name: Install kafka 30 | include: configure.yml 31 | tags: 32 | - kafka 33 | -------------------------------------------------------------------------------- /roles/kafka-ansible-role/templates/kafka.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kafka{{kafka_id}} 3 | Before= 4 | After=network.target 5 | 6 | [Service] 7 | User={{ kafka_user }} 8 | Group={{ kafka_group }} 9 | Environment=JAVA_HOME={{ JAVA_HOME }} 10 | CHDIR= {{ kafka_data_dir }} 11 | ExecStart={{ KAFKA_HOME }}/bin/kafka-server-start.sh {{ kafka_conf_dir }}/server{{kafka_id}}.properties 12 | ExecStop={{ KAFKA_HOME }}/bin/kafka-server-stop.sh 13 | 14 | Restart=on-abort 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /roles/mysql-ansible/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | language: python 3 | python: "2.7" 4 | 5 | # Use the new container infrastructure 6 | sudo: false 7 | 8 | # Install ansible 9 | addons: 10 | apt: 11 | packages: 12 | - python-pip 13 | 14 | install: 15 | # Install ansible 16 | - pip install ansible 17 | 18 | # Check ansible version 19 | - ansible --version 20 | 21 | # Create ansible.cfg with correct roles_path 22 | - printf '[defaults]\nroles_path=../' >ansible.cfg 23 | 24 | script: 25 | # Basic role syntax check 26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check 27 | 28 | notifications: 29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ -------------------------------------------------------------------------------- /roles/mysql-ansible/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/mysql-ansible/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for mysql-deploy 3 | mysql_version: 5.6.19 4 | mysql_home: /usr/local/mysql 5 | mysql_cnf_path: /etc/my.cnf 6 | mysql_data_dir: "{{mysql_home}}/data/opendata" 7 | mysql_pid_file_path: "{{mysql_data_dir}}/mysqld.pid" 8 | mysql_tmp_dir: "{{mysql_home}}/data/opendata/tmp" 9 | mysql_logbin_dir: "{{mysql_home}}/data/opendata/binlogs" 10 | mysql_log_dir: "{{mysql_home}}/data/logs" 11 | innodb_mysql_log_dir: "{{mysql_home}}/data/logs/open_innodb_logs" 12 | mysql_bin_dir: "{{mysql_home}}/bin" 13 | mysql_port: 3307 14 | mysql_user: mysql 15 | mysql_group: mysql 16 | mysql_client_socket_path: 17 | mysql_root_username: root 18 | mysql_root_password: mysqlrootpa2s 19 | 20 | 21 | 22 | 23 | mysql_login_unix_socket: "{{ mysql_tmp_dir }}/mysqld.sock" 24 | ansible_mysql_src_package_path: "mysql-{{mysql_version}}.tar.gz" 25 | -------------------------------------------------------------------------------- /roles/mysql-ansible/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for mysql-role-ansible 3 | - name: restart mysqld 4 | service: 5 | name: mysqld 6 | state: restarted 7 | # enable: true 8 | tags: 9 | - mysql 10 | - start_mysql 11 | -------------------------------------------------------------------------------- /roles/mysql-ansible/templates/my.cnf.j2: -------------------------------------------------------------------------------- 1 | [client] 2 | socket = {{mysql_login_unix_socket}} 3 | default-character-set = utf8mb4 4 | 5 | 6 | [mysqld] 7 | port = {{mysql_port}} 8 | basedir = {{ mysql_home }} 9 | datadir = {{ mysql_data_dir }} 10 | tmpdir = {{ mysql_tmp_dir }} 11 | socket = {{mysql_login_unix_socket}} 12 | pid-file={{ mysql_tmp_dir }}/open.pid 13 | log-bin={{ mysql_logbin_dir }}/mysqld_bin 14 | slow_query_log_file = {{ mysql_log_dir }}/open_mysql.slow 15 | log-error = {{ mysql_log_dir }}/open_error.log 16 | innodb_data_home_dir = {{ mysql_data_dir }} 17 | innodb_log_group_home_dir = {{ innodb_mysql_log_dir }} 18 | user={{mysql_user}} 19 | server_id =100 20 | skip-external-locking 21 | key_buffer_size =256M 22 | max_allowed_packet = 128M 23 | table_open_cache = 256 24 | sort_buffer_size =10M 25 | join_buffer_size=10M 26 | net_buffer_length = 8K 27 | read_buffer_size = 10M 28 | read_rnd_buffer_size = 10M 29 | myisam_sort_buffer_size = 8M 30 | character-set-server=utf8mb4 31 | lower_case_table_names=1 32 | wait_timeout=31536000 33 | innodb_log_files_in_group=3 34 | interactive-timeout=31536000 35 | query_cache_size=512M 36 | query_cache_limit=1268435456 37 | max_connections=1000 38 | long_query_time=0.5 39 | slow_query_log=ON 40 | innodb_data_file_path = ibdata1:1G:autoextend 41 | innodb_file_per_table 42 | innodb_lock_wait_timeout = 10 43 | innodb_buffer_pool_size=512M 44 | thread_cache_size=1000 45 | innodb_log_file_size = 512M 46 | innodb_log_buffer_size = 32M 47 | innodb_flush_log_at_trx_commit = 2 48 | innodb-open-files=1000 49 | max_heap_table_size=96M 50 | tmp_table_size=96M 51 | -------------------------------------------------------------------------------- /roles/mysql-ansible/templates/mysqld.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=MySQL Community Server 3 | After=network.target 4 | After=syslog.target 5 | 6 | [Install] 7 | WantedBy=multi-user.target 8 | Alias=mysql.service 9 | 10 | 11 | [Service] 12 | User={{mysql_user}} 13 | Group={{mysql_group}} 14 | 15 | # Execute pre and post scripts as root 16 | PermissionsStartOnly=true 17 | 18 | # Needed to create system tables etc. 19 | #ExecStartPre={{mysql_bin_dir}}/mysql-systemd-start pre 20 | 21 | # Start main service 22 | ExecStart={{mysql_bin_dir}}/mysqld_safe --datadir="{{mysql_data_dir}}" --pid-file="{{mysql_pid_file_path}}" -e=/etc/my.cnf 23 | 24 | # Don't signal startup success before a ping works 25 | #ExecStartPost=/usr/bin/mysql-systemd-start post 26 | 27 | # Give up if ping don't get an answer 28 | TimeoutSec=600 29 | 30 | Restart=always 31 | PrivateTmp=false 32 | -------------------------------------------------------------------------------- /roles/mysql-ansible/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost -------------------------------------------------------------------------------- /roles/mysql-ansible/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - mysql-role-ansible -------------------------------------------------------------------------------- /roles/mysql-ansible/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for mysql-role-ansible 3 | -------------------------------------------------------------------------------- /roles/openresty/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/openresty/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for openresty 3 | openresty_version: openresty-1.11.2.2 4 | openresty_download_url: "https://openresty.org/download/{{openresty_version}}.tar.gz" 5 | nginx_conf_path: /usr/local/nginx/nginx/conf 6 | nginx_stream_conf_path: "{{nginx_conf_path}}/streams" 7 | nginx_servers_conf_path: "{{nginx_conf_path}}/servers" 8 | nginx_log_path: /var/log/nginx 9 | nginx_log_cut_path: /var/log/nginx/log_cut 10 | nginx_user: www 11 | nginx_group: www 12 | is_install_python_passlib: False 13 | -------------------------------------------------------------------------------- /roles/openresty/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart nginx 3 | service: name="nginx" state="restarted" 4 | -------------------------------------------------------------------------------- /roles/openresty/templates/log_cut.sh: -------------------------------------------------------------------------------- 1 | /bin/mv {{nginx_log_path}}/access.log {{nginx_log_cut_path}}/access_`date +'%Y_%m_%d_%H_%M_%S_%s'`.log 2 | /etc/init.d/nginx reload 3 | -------------------------------------------------------------------------------- /roles/openresty/templates/nginx.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=The NGINX HTTP and reverse proxy server 3 | After=syslog.target network.target remote-fs.target nss-lookup.target 4 | 5 | [Service] 6 | User=root 7 | Group=root 8 | Type=forking 9 | #PIDFile=/usr/local/openresty/nginx/nginx.pid 10 | ExecStartPre=/usr/local/openresty/nginx/sbin/nginx -t 11 | ExecStart=/usr/local/openresty/nginx/sbin/nginx 12 | ExecReload=/bin/kill -s HUP $MAINPID 13 | ExecStop=/bin/kill -s QUIT $MAINPID 14 | PrivateTmp=true 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /roles/openresty/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for openresty 3 | -------------------------------------------------------------------------------- /roles/openresty/vars/ubuntu16.yml: -------------------------------------------------------------------------------- 1 | --- 2 | packages: 3 | - "libreadline-dev" 4 | - "libpcre3-dev" 5 | - "libssl-dev" 6 | - "perl" 7 | - "build-essential" 8 | -------------------------------------------------------------------------------- /roles/os-init/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | language: python 3 | python: "2.7" 4 | 5 | # Use the new container infrastructure 6 | sudo: false 7 | 8 | # Install ansible 9 | addons: 10 | apt: 11 | packages: 12 | - python-pip 13 | 14 | install: 15 | # Install ansible 16 | - pip install ansible 17 | 18 | # Check ansible version 19 | - ansible --version 20 | 21 | # Create ansible.cfg with correct roles_path 22 | - printf '[defaults]\nroles_path=../' >ansible.cfg 23 | 24 | script: 25 | # Basic role syntax check 26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check 27 | 28 | notifications: 29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ -------------------------------------------------------------------------------- /roles/os-init/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/os-init/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for os-init 3 | -------------------------------------------------------------------------------- /roles/os-init/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for os-init 3 | -------------------------------------------------------------------------------- /roles/os-init/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Change nofile limits. 3 | lineinfile: dest=/etc/security/limits.conf line={{ item }} 4 | with_items: 5 | - '* - nofile 165535' 6 | - '* soft nofile 165535' 7 | - '* hard nofile 165535' 8 | tags: limits 9 | 10 | 11 | - name: Shutdwon selinux. 12 | replace: dest=/etc/selinux/config regexp=^SELINUX=enforcing replace=SELINUX=disabled 13 | tags: selinux 14 | 15 | - block: 16 | - name: Shutdown transparent hugepage. 17 | shell: echo never >> /sys/kernel/mm/transparent_hugepage/enabled && echo never >> /sys/kernel/mm/transparent_hugepage/defrag 18 | 19 | - lineinfile: dest=/etc/rc.local line={{ item }} 20 | with_items: 21 | - echo never >> /sys/kernel/mm/transparent_hugepage/enabled 22 | - echo never >> /sys/kernel/mm/transparent_hugepage/defrag 23 | tags: hugepage 24 | 25 | - name: Ensure gcc packages are installed. 26 | yum: 27 | name: "{{ item }}" 28 | state: installed 29 | with_items: 30 | - gcc 31 | - gcc-c++ 32 | - libtool 33 | - make 34 | - automake 35 | tags: gcc 36 | 37 | - name: Set vm.overcommit_memory to 1 in /etc/sysctl.conf. 38 | sysctl: 39 | name: vm.overcommit_memory 40 | value: 1 41 | state: present 42 | tags: overcommit_memory 43 | 44 | - name: Set vm.swappiness to 1 in /etc/sysctl.conf. 45 | sysctl: 46 | name: vm.swappiness 47 | value: 1 48 | state: present 49 | tags: swappiness 50 | 51 | - name : update bash 52 | yum : name=bash state=latest 53 | -------------------------------------------------------------------------------- /roles/os-init/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost -------------------------------------------------------------------------------- /roles/os-init/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - os-init -------------------------------------------------------------------------------- /roles/os-init/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for os-init 3 | -------------------------------------------------------------------------------- /roles/pinpoint-hbase-init/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/pinpoint-hbase-init/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for pinpoint-hbase-init 3 | pinpoint_scripts_save_path: "/tmp" 4 | -------------------------------------------------------------------------------- /roles/pinpoint-hbase-init/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for pinpoint-hbase-init -------------------------------------------------------------------------------- /roles/pinpoint-hbase-init/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for pinpoint-hbase-init 3 | - name: stat hbase had init 4 | stat: 5 | path: "/usr/local/.__pinpoint_hbase_had_init" 6 | register: __pinpoint_hbase_had_init 7 | tags: 8 | - pinpoint-hbase-init 9 | - pinpoint 10 | 11 | 12 | - name: "copy hbase-create.hbase to {{pinpoint_scripts_save_path}}" 13 | copy: 14 | src: "hbase-create.hbase" 15 | dest: "{{pinpoint_scripts_save_path}}" 16 | owner: "{{username}}" 17 | group: "{{usergroup}}" 18 | mode: "ug=rwx,o=rx" 19 | when: __pinpoint_hbase_had_init.stat.exists == False 20 | tags: 21 | - pinpoint-hbase-init 22 | - pinpoint 23 | 24 | 25 | - name: "run {{hbase_home}}/bin/hbase shell {{pinpoint_scripts_save_path}}/hbase-create.hbase" 26 | shell: "{{hbase_home}}/bin/hbase shell {{pinpoint_scripts_save_path}}/hbase-create.hbase" 27 | when: __pinpoint_hbase_had_init.stat.exists == False 28 | tags: 29 | - pinpoint-hbase-init 30 | - pinpoint 31 | 32 | - file: 33 | path: "/usr/local/.__pinpoint_hbase_had_init" 34 | state: "touch" 35 | tags: 36 | - pinpoint-hbase-init 37 | - pinpoint 38 | -------------------------------------------------------------------------------- /roles/pinpoint-hbase-init/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/pinpoint-hbase-init/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - pinpoint-hbase-init -------------------------------------------------------------------------------- /roles/pinpoint-hbase-init/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for pinpoint-hbase-init -------------------------------------------------------------------------------- /roles/pinpoint-server-ansible/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/pinpoint-server-ansible/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/pinpoint-server-ansible/tasks/install-tomcat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: mkdir app folder 3 | file: 4 | path: "/app" 5 | state: directory 6 | owner: "{{username}}" 7 | group: "{{usergroup}}" 8 | mode: "ug=rwx,o=rx" 9 | tags: 10 | - pinpoint 11 | 12 | - name: stat 13 | stat: 14 | path: "/app/{{tomcat_file}}" 15 | register: __tomcat_file 16 | tags: 17 | - pinpoint 18 | 19 | - name: download tomcat from "{{tomcat_file_url}}" 20 | get_url: 21 | headers: "User-Agent:Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 Chrome/54.0.2840.59 Safari/537.36" 22 | url: "{{ tomcat_file_url }}" 23 | dest: /app 24 | owner: "{{username}}" 25 | group: "{{usergroup}}" 26 | mode: "ug=rwx,o=rx" 27 | when: __tomcat_file.stat.exists == False 28 | tags: 29 | - pinpoint 30 | 31 | - name: "unarchive /app/{{ tomcat_file }}" 32 | unarchive: 33 | src: "/app/{{ tomcat_file }}" 34 | dest: "/app/" 35 | remote_src: yes 36 | owner: "{{username}}" 37 | group: "{{usergroup}}" 38 | mode: "ug=rwx,o=rx" 39 | tags: 40 | - pinpoint 41 | -------------------------------------------------------------------------------- /roles/pinpoint-server-ansible/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include: install-tomcat.yml 4 | 5 | - name: Include vars of stuff.yml into the 'stuff' variable (2.2). 6 | include_vars: 7 | file: collector.yml 8 | when: pinpoint_server_role == 'collector' 9 | 10 | - include: install-collector.yml 11 | when: pinpoint_server_role == 'collector' 12 | 13 | 14 | - include: install-web.yml 15 | when: pinpoint_server_role == 'web' 16 | -------------------------------------------------------------------------------- /roles/pinpoint-server-ansible/templates/hbase.properties: -------------------------------------------------------------------------------- 1 | hbase.client.host={{zookeeper_hosts[0] | default('localhost')}} 2 | hbase.client.port={{zookeeper_client_port | default('2181')}} 3 | 4 | # hbase default:/hbase 5 | hbase.zookeeper.znode.parent=/hbase 6 | 7 | 8 | 9 | # hbase timeout option================================================================================== 10 | # hbase default:true 11 | hbase.ipc.client.tcpnodelay=true 12 | # hbase default:60000 13 | hbase.rpc.timeout=10000 14 | # hbase default:Integer.MAX_VALUE 15 | hbase.client.operation.timeout=10000 16 | 17 | # hbase socket read timeout. default: 200000 18 | hbase.ipc.client.socket.timeout.read=20000 19 | # socket write timeout. hbase default: 600000 20 | hbase.ipc.client.socket.timeout.write=30000 21 | 22 | #================================================================================== 23 | # hbase client thread pool option 24 | hbase.client.thread.max=64 25 | hbase.client.threadPool.queueSize=5120 26 | # prestartAllCoreThreads 27 | hbase.client.threadPool.prestart=false 28 | 29 | #================================================================================== 30 | # hbase parallel scan options 31 | hbase.client.parallel.scan.enable=true 32 | hbase.client.parallel.scan.maxthreads=64 33 | hbase.client.parallel.scan.maxthreadsperscan=16 34 | -------------------------------------------------------------------------------- /roles/pinpoint-server-ansible/templates/pinpoint-collector.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Tomcat 3 | After=network.target 4 | 5 | [Service] 6 | Type=forking 7 | User={{username}} 8 | Group={{usergroup}} 9 | Environment=JAVA_HOME={{JAVA_HOME}} 10 | Environment=CATALINA_PID={{pinpoint_collector_home}}/tomcat.pid 11 | Environment=CATALINA_HOME={{pinpoint_collector_home}} 12 | Environment=CATALINA_BASE={{pinpoint_collector_home}} 13 | Environment='CATALINA_OPTS=-Xms512M -Xmx1024M -server -XX:+UseParallelGC' 14 | Environment='JAVA_OPTS=-Djava.awt.headless=true -Djava.security.egd=file:/dev/./urandom' 15 | 16 | # EnvironmentFile=/etc/profile 17 | ExecStart={{pinpoint_collector_home}}/bin/startup.sh 18 | ExecStop={{pinpoint_collector_home}}/bin/shutdown.sh 19 | 20 | [Install] 21 | WantedBy=multi-user.target 22 | -------------------------------------------------------------------------------- /roles/pinpoint-server-ansible/templates/pinpoint-web.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Tomcat 3 | After=network.target 4 | 5 | [Service] 6 | Type=forking 7 | User={{username}} 8 | Group={{usergroup}} 9 | Environment=JAVA_HOME={{JAVA_HOME}} 10 | Environment=CATALINA_PID={{pinpoint_web_home}}/tomcat.pid 11 | Environment=CATALINA_HOME={{pinpoint_web_home}} 12 | Environment=CATALINA_BASE={{pinpoint_web_home}} 13 | Environment='CATALINA_OPTS=-Xms512M -Xmx1024M -server -XX:+UseParallelGC' 14 | Environment='JAVA_OPTS=-Djava.awt.headless=true -Djava.security.egd=file:/dev/./urandom' 15 | 16 | # EnvironmentFile=/etc/profile 17 | ExecStart={{pinpoint_web_home}}/bin/startup.sh 18 | ExecStop={{pinpoint_web_home}}/bin/shutdown.sh 19 | 20 | [Install] 21 | WantedBy=multi-user.target 22 | -------------------------------------------------------------------------------- /roles/pinpoint-server-ansible/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/pinpoint-server-ansible/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - pinpoint-collector-ansible -------------------------------------------------------------------------------- /roles/pinpoint-server-ansible/vars/collector.yml: -------------------------------------------------------------------------------- 1 | tomcat_catalina_port: "{{pinpoint_collector_catalina_port}}" 2 | -------------------------------------------------------------------------------- /roles/redis-ansible/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | language: python 3 | python: "2.7" 4 | 5 | # Use the new container infrastructure 6 | sudo: false 7 | 8 | # Install ansible 9 | addons: 10 | apt: 11 | packages: 12 | - python-pip 13 | 14 | install: 15 | # Install ansible 16 | - pip install ansible 17 | 18 | # Check ansible version 19 | - ansible --version 20 | 21 | # Create ansible.cfg with correct roles_path 22 | - printf '[defaults]\nroles_path=../' >ansible.cfg 23 | 24 | script: 25 | # Basic role syntax check 26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check 27 | 28 | notifications: 29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ -------------------------------------------------------------------------------- /roles/redis-ansible/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: redis restart 3 | service: 4 | name: "{{ redis_service_name }}" 5 | state: restarted 6 | tags: 7 | - redis 8 | -------------------------------------------------------------------------------- /roles/redis-ansible/templates/redis-server.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description={{redis_service_name}} 3 | After=network.target 4 | 5 | [Service] 6 | Type=Forking 7 | PIDFile={{redis_pidfile}} 8 | User={{redis_user}} 9 | Group={{redis_group}} 10 | ExecStart={{redis_home}}/bin/redis-server {{redis_conf_path}} 11 | ExecReload=/bin/kill -USR2 $MAINPID 12 | ExecStop={{redis_home}}/bin/redis-cli -h {{redis_bind}} -p {{redis_port}} shutdown 13 | Restart=always 14 | 15 | [Install] 16 | WantedBy=multi-user.target 17 | -------------------------------------------------------------------------------- /roles/rocket-chat/.gitignore: -------------------------------------------------------------------------------- 1 | tests/.vagrant/ 2 | -------------------------------------------------------------------------------- /roles/rocket-chat/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults/main.yml: Defaults for RocketChat.Ansible 3 | rocket_chat_automatic_upgrades: false 4 | rocket_chat_upgrade_backup: true 5 | rocket_chat_upgrade_backup_path: "{{ rocket_chat_application_path }}" 6 | rocket_chat_application_path: /var/lib/rocket.chat 7 | rocket_chat_version: "0.59.0-rc.5" 8 | rocket_chat_tarball_remote: https://rocket.chat/releases/{{ rocket_chat_version }}/download 9 | rocket_chat_tarball_sha256sum: f87ad01656e345c1f0e5c449e09c33d5819912e96b080eb2a93935de772726c1. 10 | rocket_chat_tarball_check_checksum: false 11 | rocket_chat_tarball_fetch_timeout: 100 12 | rocket_chat_tarball_validate_remote_cert: true 13 | rocket_chat_service_user: rocketchat 14 | rocket_chat_service_group: rocketchat 15 | rocket_chat_service_host: "{{ innerip }}" 16 | rocket_chat_service_port: 3000 17 | rocket_chat_node_version: 4.5.0 18 | rocket_chat_node_path: /usr/local/n/versions/node/{{ rocket_chat_node_version }}/bin 19 | rocket_chat_node_orig_npm: /usr/bin/npm 20 | 21 | ## 22 | rocket_chat_file_upload_folder: /rocket_chat_file 23 | 24 | 25 | # MongoDB settings 26 | rocket_chat_mongodb_server: 127.0.0.1 27 | rocket_chat_mongodb_port: 27017 28 | 29 | # nginx settings 30 | rocket_chat_include_nginx: true 31 | rocket_chat_ssl_generate_certs: false 32 | rocket_chat_ssl_key_path: /etc/nginx/rocket_chat.key 33 | rocket_chat_ssl_cert_path: /etc/nginx/rocket_chat.crt 34 | rocket_chat_ssl_deploy_data: false 35 | rocket_chat_ssl_key_file: ~ 36 | rocket_chat_ssl_cert_file: ~ 37 | rocket_chat_nginx_enable_pfs: true 38 | rocket_chat_nginx_generate_pfs_key: true 39 | rocket_chat_nginx_pfs_key_numbits: 2048 40 | rocket_chat_nginx_pfs_key_path: /etc/nginx/rocket_chat.pem 41 | rocket_chat_nginx_pfs_file: ~ 42 | -------------------------------------------------------------------------------- /roles/rocket-chat/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers/main.yml: Handlers for RocketChat.Ansible 3 | - name: Reload the Nginx service 4 | service: name=nginx state=reloaded 5 | delegate_to: "{{delegated_nginx_host}}" 6 | 7 | 8 | - name: Upgrade Rocket.Chat 9 | include: upgrade.yml 10 | when: rocket_chat_deploy_state.stat.exists 11 | tags: 12 | - upgrade 13 | 14 | - name: Update the Rocket.Chat service configuration 15 | shell: "{{ rocket_chat_service_update_command }}" 16 | when: rocket_chat_service_update_command is defined 17 | 18 | - name: Restart the Rocket.Chat service 19 | service: name=rocketchat state=restarted 20 | -------------------------------------------------------------------------------- /roles/rocket-chat/tasks/repo_RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks/repo_RedHat.yml: RedHat based distro repository configuration for RocketChat.Ansible 3 | 4 | - name: Ensure the EPEL repository is present 5 | yum: 6 | name: epel-release 7 | state: present 8 | 9 | - name: Ensure the EPEL repository GPG key is imported 10 | rpm_key: 11 | key: /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 12 | state: present 13 | -------------------------------------------------------------------------------- /roles/rocket-chat/tasks/upgrade.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks/upgrade.yml: Rocket.Chat upgrade procedures for RocketChat.Ansible 3 | 4 | - name: Ensure automatic upgrades are permitted [UPGRADE] 5 | fail: 6 | msg: >- 7 | It doesn't look like you've permitted automatic upgrades. 8 | A new version of Rocket.Chat was released. 9 | To permit automatic upgrades set 'rocket_chat_automatic_upgrades' to true 10 | when: not rocket_chat_automatic_upgrades|bool 11 | 12 | - name: Ensure the back up directory exists [UPGRADE] 13 | file: 14 | path: "{{ rocket_chat_upgrade_backup_path }}" 15 | state: directory 16 | when: rocket_chat_upgrade_backup|bool 17 | 18 | - name: Back up the current Rocket.Chat instance [UPGRADE] 19 | shell: >- 20 | mv {{ rocket_chat_application_path }}/bundle 21 | {{ rocket_chat_upgrade_backup_path }}/backup_{{ ansible_date_time.date }} 22 | when: rocket_chat_upgrade_backup|bool 23 | 24 | - name: Delete the current Rocket.Chat instance [UPGRADE] 25 | file: 26 | path: "{{ rocket_chat_application_path }}/bundle" 27 | state: absent 28 | when: not rocket_chat_upgrade_backup|bool 29 | 30 | - name: Set the Rocket.Chat upgrade status [UPGRADE] 31 | set_fact: 32 | rocket_chat_upgraded: true 33 | -------------------------------------------------------------------------------- /roles/rocket-chat/templates/mongod.conf.j2: -------------------------------------------------------------------------------- 1 | # mongod.conf 2 | 3 | # for documentation of all options, see: 4 | # http://docs.mongodb.org/manual/reference/configuration-options/ 5 | 6 | {% if rocket_chat_mongodb_fork is defined %} 7 | # whether to fork the process or not 8 | fork = {{ rocket_chat_mongodb_fork }} 9 | {% endif %} 10 | {% if rocket_chat_mongodb_pidfile_path is defined %} 11 | 12 | pidfilepath = {{ rocket_chat_mongodb_pidfile_path }} 13 | {% endif %} 14 | {% if rocket_chat_mongodb_logpath is defined %} 15 | 16 | logpath = {{ rocket_chat_mongodb_logpath }} 17 | {% endif %} 18 | {% if rocket_chat_mongodb_unixsocketprefix is defined %} 19 | 20 | unixSocketPrefix = {{ rocket_chat_mongodb_unixsocketprefix }} 21 | {% endif %} 22 | {% if rocket_chat_mongodb_dbpath is defined %} 23 | 24 | dbpath = {{ rocket_chat_mongodb_dbpath }} 25 | {% endif %} 26 | 27 | {% if ansible_os_family == "Debian" %} 28 | # where and how to store data. 29 | storage: 30 | dbPath: /var/lib/mongodb 31 | journal: 32 | enabled: true 33 | 34 | # where to write logging data. 35 | systemLog: 36 | destination: file 37 | logAppend: true 38 | path: /var/log/mongodb/mongod.log 39 | # network interfaces 40 | net: 41 | port: {{ rocket_chat_mongodb_port }} 42 | bindIp: {{ rocket_chat_mongodb_server }} 43 | {% endif %} 44 | 45 | # replication 46 | {{ rocket_chat_mongodb_repl_lines }} 47 | -------------------------------------------------------------------------------- /roles/rocket-chat/templates/nginx.conf.j2: -------------------------------------------------------------------------------- 1 | user {{ rocket_chat_nginx_process_user }}; 2 | worker_processes auto; 3 | error_log /var/log/nginx/error.log; 4 | pid /run/nginx.pid; 5 | 6 | events { 7 | worker_connections 1024; 8 | } 9 | 10 | http { 11 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 12 | '$status $body_bytes_sent "$http_referer" ' 13 | '"$http_user_agent" "$http_x_forwarded_for"'; 14 | 15 | access_log /var/log/nginx/access.log main; 16 | 17 | sendfile on; 18 | tcp_nopush on; 19 | tcp_nodelay on; 20 | keepalive_timeout 65; 21 | types_hash_max_size 2048; 22 | 23 | include /etc/nginx/mime.types; 24 | default_type application/octet-stream; 25 | 26 | gzip on; 27 | gzip_vary on; 28 | gzip_http_version 1.1; 29 | gzip_comp_level 9; 30 | gzip_proxied any; 31 | gzip_min_length 1024; 32 | gzip_buffers 16 8k; 33 | gzip_types text/plain text/css text/javascript application/x-javascript application/xml text/xml application/json application/javascript application/xml+rss text/x-js; 34 | gzip_disable "MSIE [1-6].(?!.*SV1)"; 35 | gzip_static on; 36 | 37 | # Load modular configuration files from the /etc/nginx/conf.d directory. 38 | # See http://nginx.org/en/docs/ngx_core_module.html#include 39 | # for more information. 40 | include /etc/nginx/conf.d/*.conf; 41 | } 42 | -------------------------------------------------------------------------------- /roles/rocket-chat/templates/rocket_chat.conf.j2: -------------------------------------------------------------------------------- 1 | upstream rocket_chat { 2 | server {{rocket_chat_service_host}}:{{ rocket_chat_service_port }}; 3 | } 4 | server { 5 | listen {{rocket_chat_nginx_listen_port}}; 6 | # server_name {{ rocket_chat_nginx_listen_host }}; 7 | 8 | location / { 9 | proxy_pass http://rocket_chat; 10 | proxy_http_version 1.1; 11 | proxy_set_header Host $host:$server_port; 12 | proxy_set_header Referer $http_referer; 13 | proxy_set_header X-Real-IP $remote_addr; 14 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 15 | proxy_set_header X-Nginx-Proxy true; 16 | 17 | proxy_set_header Upgrade $http_upgrade; 18 | proxy_set_header Connection "upgrade"; 19 | 20 | proxy_redirect off; 21 | 22 | proxy_send_timeout 86400; 23 | proxy_read_timeout 86400; 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /roles/rocket-chat/templates/rocketchat.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Rocket.Chat Server 3 | After=syslog.target 4 | After=network.target 5 | 6 | [Service] 7 | Type=simple 8 | Restart=always 9 | StandardOutput=syslog 10 | SyslogIdentifier=RocketChat 11 | User={{ rocket_chat_service_user }} 12 | Group={{ rocket_chat_service_group }} 13 | Environment=MONGO_URL=mongodb://{{ rocket_chat_mongodb_server }}:{{ rocket_chat_mongodb_port }}/rocketchat 14 | #Environment=MONGO_OPLOG_URL=mongodb://{{ rocket_chat_mongodb_server }}:{{ rocket_chat_mongodb_port }}/local 15 | Environment=ROOT_URL=http://{{ rocket_chat_domain_service_host }} 16 | Environment=PORT={{ rocket_chat_service_port }} 17 | WorkingDirectory={{ rocket_chat_application_path }} 18 | ExecStart={{ rocket_chat_node_path }}/node {{ rocket_chat_application_path }}/bundle/main.js 19 | 20 | [Install] 21 | WantedBy=multi-user.target 22 | -------------------------------------------------------------------------------- /roles/rocket-chat/tests/Dockerfile.centos-7: -------------------------------------------------------------------------------- 1 | FROM centos:7 2 | # Install systemd -- See https://hub.docker.com/_/centos/ 3 | RUN yum -y swap -- remove fakesystemd -- install systemd systemd-libs 4 | RUN yum -y update; \ 5 | (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \ 6 | rm -f /lib/systemd/system/multi-user.target.wants/*; \ 7 | rm -f /etc/systemd/system/*.wants/*; \ 8 | rm -f /lib/systemd/system/local-fs.target.wants/*; \ 9 | rm -f /lib/systemd/system/sockets.target.wants/*udev*; \ 10 | rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \ 11 | rm -f /lib/systemd/system/basic.target.wants/*; \ 12 | rm -f /lib/systemd/system/anaconda.target.wants/*; 13 | # Install Ansible 14 | RUN yum -y install epel-release 15 | RUN yum -y install git ansible sudo 16 | RUN yum clean all 17 | # Disable requiretty 18 | RUN sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers 19 | # Install Ansible inventory file 20 | RUN echo -e '[local]\nlocalhost ansible_connection=local' > /etc/ansible/hosts 21 | VOLUME [ "/sys/fs/cgroup" ] 22 | CMD ["/usr/sbin/init"] 23 | -------------------------------------------------------------------------------- /roles/rocket-chat/tests/Dockerfile.debian-8: -------------------------------------------------------------------------------- 1 | # Dockerfile.ubuntu 2 | FROM debian:8 3 | 4 | RUN systemctl mask -- \ 5 | -.mount \ 6 | dev-mqueue.mount \ 7 | dev-hugepages.mount \ 8 | etc-hosts.mount \ 9 | etc-hostname.mount \ 10 | etc-resolv.conf.mount \ 11 | proc-bus.mount \ 12 | proc-irq.mount \ 13 | proc-kcore.mount \ 14 | proc-sys-fs-binfmt_misc.mount \ 15 | proc-sysrq\\\\x2dtrigger.mount \ 16 | sys-fs-fuse-connections.mount \ 17 | sys-kernel-config.mount \ 18 | sys-kernel-debug.mount \ 19 | tmp.mount \ 20 | \ 21 | && systemctl mask -- \ 22 | console-getty.service \ 23 | display-manager.service \ 24 | getty-static.service \ 25 | getty\@tty1.service \ 26 | hwclock-save.service \ 27 | ondemand.service \ 28 | systemd-logind.service \ 29 | systemd-remount-fs.service \ 30 | \ 31 | && ln -sf /lib/systemd/system/multi-user.target /etc/systemd/system/default.target \ 32 | \ 33 | && ln -sf /lib/systemd/system/halt.target /etc/systemd/system/sigpwr.target 34 | 35 | RUN apt-get update -qq -y 36 | RUN apt-get install -qq -y git python-pip python-dev libssl-dev \ 37 | libffi-dev rsyslog systemd systemd-cron sudo 38 | 39 | # Install Ansible 40 | RUN sed -i 's/^\($ModLoad imklog\)/#\1/' /etc/rsyslog.conf 41 | RUN pip install --upgrade ansible 42 | # Install Ansible inventory file 43 | # RUN mkdir -p /etc/ansible # Not really sure why this won't run but it doesn't 44 | RUN bash -c 'mkdir -p /etc/ansible' 45 | RUN echo "[local]\nlocalhost ansible_connection=local" > /etc/ansible/hosts 46 | 47 | VOLUME ["/sys/fs/cgroup"] 48 | VOLUME ["/run"] 49 | CMD ["/sbin/init"] 50 | -------------------------------------------------------------------------------- /roles/rocket-chat/tests/Dockerfile.ubuntu-14.04: -------------------------------------------------------------------------------- 1 | # Dockerfile.ubuntu 2 | FROM ubuntu-upstart:14.04 3 | # Install Ansible 4 | #RUN mv /sbin/initctl.distrib /sbin/initctl 5 | RUN apt-get update -y 6 | RUN apt-get install -y python-software-properties software-properties-common 7 | RUN add-apt-repository -y ppa:ansible/ansible 8 | RUN apt-get update -y 9 | RUN apt-get install -y ansible git-core 10 | # Install Ansible inventory file 11 | RUN echo "[local]\nlocalhost ansible_connection=local" > /etc/ansible/hosts 12 | -------------------------------------------------------------------------------- /roles/rocket-chat/tests/Dockerfile.ubuntu-16.04: -------------------------------------------------------------------------------- 1 | # Dockerfile.ubuntu 2 | FROM ubuntu:16.04 3 | 4 | RUN systemctl mask -- \ 5 | -.mount \ 6 | dev-mqueue.mount \ 7 | dev-hugepages.mount \ 8 | etc-hosts.mount \ 9 | etc-hostname.mount \ 10 | etc-resolv.conf.mount \ 11 | proc-bus.mount \ 12 | proc-irq.mount \ 13 | proc-kcore.mount \ 14 | proc-sys-fs-binfmt_misc.mount \ 15 | proc-sysrq\\\\x2dtrigger.mount \ 16 | sys-fs-fuse-connections.mount \ 17 | sys-kernel-config.mount \ 18 | sys-kernel-debug.mount \ 19 | tmp.mount \ 20 | \ 21 | && systemctl mask -- \ 22 | console-getty.service \ 23 | display-manager.service \ 24 | getty-static.service \ 25 | getty\@tty1.service \ 26 | hwclock-save.service \ 27 | ondemand.service \ 28 | systemd-logind.service \ 29 | systemd-remount-fs.service \ 30 | \ 31 | && ln -sf /lib/systemd/system/multi-user.target /etc/systemd/system/default.target \ 32 | \ 33 | && ln -sf /lib/systemd/system/halt.target /etc/systemd/system/sigpwr.target 34 | 35 | RUN apt-get update -qq -y 36 | RUN apt-get install -qq -y python-software-properties software-properties-common \ 37 | rsyslog systemd systemd-cron sudo 38 | RUN sed -i 's/^\($ModLoad imklog\)/#\1/' /etc/rsyslog.conf 39 | #ADD etc/rsyslog.d/50-default.conf /etc/rsyslog.d/50-default.conf 40 | 41 | # Install Ansible 42 | RUN add-apt-repository -y ppa:ansible/ansible 43 | RUN apt-get update -y 44 | RUN apt-get install -y ansible git-core 45 | # Install Ansible inventory file 46 | RUN echo "[local]\nlocalhost ansible_connection=local" > /etc/ansible/hosts 47 | 48 | VOLUME ["/sys/fs/cgroup"] 49 | VOLUME ["/run"] 50 | CMD ["/sbin/init"] 51 | 52 | 53 | -------------------------------------------------------------------------------- /roles/rocket-chat/tests/provision.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Apply the Rocket.Chat role to all chat_servers 4 | hosts: "{{ host_name | default('chat_servers') }}" 5 | 6 | roles: 7 | - role: "{{ role_name | default('../../RocketChat.Server') }}" 8 | rocket_chat_tarball_check_checksum: false 9 | rocket_chat_service_host: localhost 10 | rocket_chat_automatic_upgrades: true 11 | -------------------------------------------------------------------------------- /roles/rocket-chat/vars/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rocket_chat_dep_packages: 3 | - git 4 | - graphicsmagick 5 | - nodejs 6 | - npm 7 | - make 8 | - wget 9 | 10 | rocket_chat_mongodb_packages: 11 | - mongodb-org-server 12 | - mongodb-org-shell 13 | 14 | rocket_chat_mongodb_repl_lines: >- 15 | replication: 16 | replSetName: "001-rs" 17 | 18 | rocket_chat_nginx_process_user: www-data 19 | -------------------------------------------------------------------------------- /roles/rocket-chat/vars/Debian_8.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rocket_chat_service_update_command: systemctl daemon-reload 3 | rocket_chat_service_template: 4 | src: rocketchat.service.j2 5 | dest: /etc/systemd/system/rocketchat.service 6 | 7 | rocket_chat_mongodb_apt_repo: "deb http://repo.mongodb.org/apt/debian wheezy/mongodb-org/3.0 main" 8 | -------------------------------------------------------------------------------- /roles/rocket-chat/vars/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rocket_chat_dep_packages: 3 | - nodejs 4 | - git 5 | - GraphicsMagick 6 | - make 7 | - wget 8 | 9 | 10 | rocket_chat_nginx_process_user: nginx 11 | -------------------------------------------------------------------------------- /roles/rocket-chat/vars/RedHat_7.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rocket_chat_service_update_command: systemctl daemon-reload 3 | rocket_chat_service_template: 4 | src: rocketchat.service.j2 5 | dest: /usr/lib/systemd/system/rocketchat.service 6 | rocket_chat_tarball_validate_remote_cert: false 7 | -------------------------------------------------------------------------------- /roles/rocket-chat/vars/Ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rocket_chat_dep_packages: 3 | - git 4 | - graphicsmagick 5 | - nodejs 6 | - npm 7 | - make 8 | - wget 9 | 10 | rocket_chat_mongodb_packages: 11 | - mongodb-org-server 12 | - mongodb-org-shell 13 | 14 | rocket_chat_mongodb_repl_lines: >- 15 | replication: 16 | replSetName: "001-rs" 17 | 18 | rocket_chat_nginx_process_user: www-data 19 | -------------------------------------------------------------------------------- /roles/rocket-chat/vars/Ubuntu_14.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rocket_chat_service_update_command: initctl reload-configuration 3 | rocket_chat_service_template: 4 | src: rocketchat_upstart.j2 5 | dest: /etc/init/rocketchat.conf 6 | 7 | rocket_chat_mongodb_apt_repo: "deb http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/3.0 multiverse" 8 | rocket_chat_tarball_validate_remote_cert: false 9 | -------------------------------------------------------------------------------- /roles/rocket-chat/vars/Ubuntu_16.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rocket_chat_service_update_command: systemctl daemon-reload 3 | rocket_chat_service_template: 4 | src: rocketchat.service.j2 5 | dest: /etc/systemd/system/rocketchat.service 6 | 7 | rocket_chat_mongodb_apt_repo: "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse" 8 | rocket_chat_mongodb_gpg_key: EA312927 9 | -------------------------------------------------------------------------------- /roles/swarm-agent/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | language: python 3 | python: "2.7" 4 | 5 | # Use the new container infrastructure 6 | sudo: false 7 | 8 | # Install ansible 9 | addons: 10 | apt: 11 | packages: 12 | - python-pip 13 | 14 | install: 15 | # Install ansible 16 | - pip install ansible 17 | 18 | # Check ansible version 19 | - ansible --version 20 | 21 | # Create ansible.cfg with correct roles_path 22 | - printf '[defaults]\nroles_path=../' > ansible.cfg 23 | 24 | script: 25 | # Basic role syntax check 26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check -vvv 27 | 28 | notifications: 29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ 30 | -------------------------------------------------------------------------------- /roles/swarm-agent/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for jenkins-agent 3 | # - name: Reload Systemd 4 | # systemd: 5 | # daemon_reload: yes 6 | # name: "{{ jenkins_swarm_service_name }}" 7 | # enabled: yes 8 | # state: started 9 | # when: ansible_service_mgr == "systemd" 10 | # become: true 11 | 12 | - name: Reload Systemd 13 | command: "systemctl daemon-reload" 14 | when: ansible_service_mgr == "systemd" 15 | tags: 16 | - jenkins 17 | 18 | - name: Restart Swarm Client service 19 | service: 20 | name: "{{ jenkins_swarm_service_name }}" 21 | enabled: yes 22 | state: restarted 23 | become: true 24 | 25 | - name: Install Jenkins as a service 26 | win_shell: "{{ win_swarm_client_wrapper_path }} install" 27 | notify: Start the Jenkins service 28 | 29 | - name: Start the Jenkins service 30 | win_service: 31 | name: "{{ jenkins_swarm_task_name }}" 32 | start_mode: auto 33 | state: started 34 | -------------------------------------------------------------------------------- /roles/swarm-agent/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | galaxy_info: 4 | author: Nic Patterson & Chris O'Brian 5 | description: Provision a Jenkins agent 6 | company: Concur 7 | license: BSD 8 | min_ansible_version: 2.2 9 | platforms: 10 | - name: EL 11 | versions: 12 | - 6 13 | - 7 14 | - name: Debian 15 | versions: 16 | - jessie 17 | - wheezy 18 | - name: Ubuntu 19 | versions: 20 | - xenial 21 | - yakkety 22 | galaxy_tags: 23 | - development 24 | - system 25 | - jenkins 26 | -------------------------------------------------------------------------------- /roles/swarm-agent/tasks/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add a jenkins user and add it to the docker group 3 | user: 4 | name: jenkins 5 | groups: docker 6 | append: yes 7 | state: present 8 | become: true 9 | 10 | - name: Create Swarm directory 11 | file: 12 | path: "{{ jenkins_swarm_path }}" 13 | state: directory 14 | mode: 0755 15 | become: true 16 | 17 | - name: Download the Jenkins Swarm Client 18 | get_url: 19 | url: "{{ jenkins_swarm_download_url }}" 20 | dest: "{{ jenkins_swarm_path }}" 21 | owner: jenkins 22 | become: true 23 | notify: 24 | - Restart Swarm Client service 25 | # register: swarm_jar 26 | tags: 27 | - config 28 | 29 | - name: "Ensure folders folder exists" 30 | file: 31 | path: "{{ item }}" 32 | state: directory 33 | mode: 0755 34 | become: true 35 | with_items: 36 | - "{{ jenkins_swarm_systemd_path }}" 37 | - "{{ jenkins_swarm_path }}" 38 | tags: 39 | - config 40 | 41 | - name: "Ensure {{ jenkins_agent_labels_file }} folder exists" 42 | file: 43 | path: "{{ jenkins_agent_labels_file }}" 44 | state: touch 45 | mode: 0755 46 | become: true 47 | when: jenkins_agent_labels_file is defined 48 | notify: Restart Swarm Client service 49 | tags: 50 | - config 51 | 52 | - name: Create the Swarm Client service def 53 | template: 54 | src: swarm-client.service.j2 55 | dest: "/lib/systemd/system/{{ jenkins_swarm_service_name }}.service" 56 | mode: 0755 57 | become: true 58 | notify: 59 | - Reload Systemd 60 | - Restart Swarm Client service 61 | tags: 62 | - config 63 | -------------------------------------------------------------------------------- /roles/swarm-agent/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Running Jenkins Agent role for {{ ansible_os_family|lower() }}" 3 | include: "{{ ansible_os_family|lower() }}.yml" 4 | -------------------------------------------------------------------------------- /roles/swarm-agent/tasks/windows.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create Swarm directory 3 | win_file: 4 | path: "{{ win_base_jenkins_path }}" 5 | state: directory 6 | 7 | - name: Download the Jenkins Swarm Client 8 | win_get_url: 9 | url: "{{ jenkins_swarm_download_url }}" 10 | dest: "{{ win_swarm_client_jar_path }}" 11 | force: no 12 | notify: Install Jenkins as a service 13 | 14 | - name: Download the Jenkins Service Wrapper 15 | win_get_url: 16 | url: "{{ jenkins_swarm_wrapper_download_url }}" 17 | dest: "{{ win_swarm_client_wrapper_path }}" 18 | force: no 19 | notify: Install Jenkins as a service 20 | 21 | - name: Create the Service Wrapper config 22 | win_template: 23 | src: "swarm-service.xml.j2" 24 | dest: "{{ win_swarm_client_wrapper_config_path }}" 25 | notify: Install Jenkins as a service 26 | -------------------------------------------------------------------------------- /roles/swarm-agent/templates/swarm-client-linux.j2: -------------------------------------------------------------------------------- 1 | {{ ansible_managed|comment }} 2 | MASTER="{{ jenkins_agent_master|default('') }}" 3 | JAR="{{ swarm_jar.dest }}" 4 | MASTER_PORT="{{ jenkins_agent_master_port }}" 5 | SWARM_USERNAME="{{ jenkins_agent_username }}" 6 | SWARM_PASSWORD="{{ jenkins_agent_password }}" 7 | NAME="{{ jenkins_agent_name }}" 8 | NUM_EXECUTORS="{{ jenkins_agent_num_executors }}" 9 | LABELS="{{ jenkins_agent_labels }}" 10 | -------------------------------------------------------------------------------- /roles/swarm-agent/templates/swarm-client.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description={{ jenkins_swarm_task_name }} 3 | After=network.target 4 | [Service] 5 | Type=simple 6 | User={{jenkins_user}} 7 | Group={{jenkins_group}} 8 | PIDFile=/var/run/{{ jenkins_swarm_service_name }}.pid 9 | ExecStartPre=/bin/rm -f /var/run/{{ jenkins_swarm_service_name }}.pid 10 | ExecStart={{JAVA_HOME}}/bin/java \ 11 | -jar {{ swarm_jar.dest }} \ 12 | -master 'http://{{ jenkins_agent_master }}:{{ jenkins_agent_master_port }}' \ 13 | {% if jenkins_agent_jvm_args is defined %} 14 | {{ jenkins_agent_jvm_args|join(' ') }} \ 15 | {% endif %} 16 | {% if jenkins_agent_username is defined %} 17 | -username '{{ jenkins_agent_username }}' \ 18 | {% endif %} 19 | {% if jenkins_agent_password is defined %} 20 | -password '{{ jenkins_agent_password }}' \ 21 | {% endif %} 22 | -name '{{ jenkins_agent_name }}' \ 23 | {% if jenkins_agent_labels_file is defined %} 24 | -labelsFile '{{ jenkins_agent_labels_file }}' \ 25 | {% else %} 26 | -labels '{{ jenkins_agent_labels }}' \ 27 | {% endif %} 28 | {% if jenkins_agent_mode is defined %} 29 | -mode '{{ jenkins_agent_mode }}' \ 30 | {% endif %} 31 | {% if jenkins_agent_additional_args is defined %} 32 | {% for arg in jenkins_agent_additional_args %} 33 | {{ arg }} \ 34 | {% endfor %} 35 | {% endif %} 36 | -executors '{{ jenkins_agent_num_executors }}' 37 | Restart=on-abort 38 | ## 39 | [Install] 40 | WantedBy=default.target 41 | -------------------------------------------------------------------------------- /roles/swarm-agent/templates/swarm-service.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | {{ jenkins_swarm_task_name|replace(' ', '_')|lower }} 3 | {{ jenkins_swarm_task_name }} 4 | This service runs Jenkins continuous integration system. 5 | 6 | java 7 | -Xrs -Xms256m -jar "%BASE%\{{ jenkins_swarm_client_jar }}" -master "http://{{ jenkins_agent_master|default('') }}:{{ jenkins_agent_master_port|default('80') }}" -executors {{ jenkins_agent_num_executors }} -fsroot "C:\\jenkins" -name "{{ jenkins_agent_name }}" -username "{{ jenkins_agent_username }}" -password "{{ jenkins_agent_password }}" -labels "{{ jenkins_agent_labels }}" -disableSslVerification -disableClientsUniqueId 8 | rotate 9 | 10 | -------------------------------------------------------------------------------- /roles/swarm-agent/templates/swarm-win.ps1.j2: -------------------------------------------------------------------------------- 1 | {{ ansible_managed|comment }} 2 | Set-StrictMode -Version 2 3 | $WarningPreference='stop' 4 | $ErrorActionPreference='stop' 5 | 6 | javaw -jar "{{ win_swarm_client_jar_path }}" ` 7 | -master "http://{{ jenkins_agent_master|default('') }}:{{ jenkins_agent_master_port|default('80') }}" ` 8 | -name "{{ jenkins_agent_name }}" ` 9 | -labels "{{ jenkins_agent_labels }}" ` 10 | -username "{{ jenkins_agent_username }}" ` 11 | -password "{{ jenkins_agent_password }}" ` 12 | -disableClientsUniqueId ` 13 | -executors "{{ jenkins_agent_num_executors }}" ` 14 | -fsroot "C:\\jenkins" 15 | -------------------------------------------------------------------------------- /roles/swarm-agent/tests/inventory: -------------------------------------------------------------------------------- 1 | 127.0.0.1 2 | -------------------------------------------------------------------------------- /roles/swarm-agent/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | roles: 4 | - ansible-jenkins-agent 5 | -------------------------------------------------------------------------------- /roles/tomcat8/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/tomcat8/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for tomcat8 3 | tomcat8_download_url: http://apache.fayea.com/tomcat/tomcat-8/v8.0.37/bin/apache-tomcat-8.0.37.tar.gz 4 | apache_tomcat8_version: apache-tomcat-8.0.37 5 | #apache_tomcat8_path: 6 | tomcat_debug_port: 8787 7 | log_path: /var/log/tomcat8/ 8 | -------------------------------------------------------------------------------- /roles/tomcat8/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for tomcat8 3 | - name: start tomcat 4 | service: name="{{tomcat_service_name}}" state=started -------------------------------------------------------------------------------- /roles/tomcat8/templates/conf/context.xml: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | WEB-INF/web.xml 24 | ${catalina.base}/conf/web.xml 25 | 26 | 27 | 30 | 31 | 33 | 36 | 37 | -------------------------------------------------------------------------------- /roles/tomcat8/templates/tomcat: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # /etc/init.d/tomcat 4 | # 5 | # This is the init script for starting up the 6 | # Jakarta Tomcat server 7 | # 8 | # description: Starts and stops the Tomcat daemon. 9 | # 10 | 11 | export JAVA_HOME=/usr/lib/jvm/java 12 | export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar 13 | export PATH="$PATH:$JAVA_HOME/bin" 14 | 15 | export JAVA_OPTS="$JAVA_OPTS -Duser.timezone=GMT+08" 16 | #export CATALINA_OPTS="-server -Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address={{tomcat_debug_port}}" 17 | 18 | # SET CATALINA_OPTS=-server -Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address={{tomcat_debug_port}} 19 | 20 | tomcat={{tomcat_home}} 21 | startup=$tomcat/bin/startup.sh 22 | shutdown=$tomcat/bin/shutdown.sh 23 | 24 | 25 | 26 | start() { 27 | echo -n $"Starting Tomcat service: " 28 | sh $startup 29 | echo $? 30 | } 31 | 32 | stop() { 33 | echo -n $"Stopping Tomcat service: " 34 | sh $shutdown 35 | echo $? 36 | } 37 | 38 | restart() { 39 | stop 40 | sleep 4 41 | start 42 | } 43 | 44 | status() { 45 | # if ! ps -ef |grep -q "{{tomcat_service_name}}.* st[a]rt"; then 46 | # exit 1 47 | # fi 48 | ps -ef |grep -q "{{tomcat_service_name}}.* st[a]rt" && exit 0 || exit 1 49 | } 50 | 51 | # Handle the different input options 52 | case "$1" in 53 | start) 54 | start 55 | ;; 56 | stop) 57 | stop 58 | ;; 59 | status) 60 | status 61 | ;; 62 | restart) 63 | restart 64 | ;; 65 | *) 66 | echo $"Usage: $0 {start|stop|restart|status}" 67 | exit 1 68 | esac 69 | 70 | exit 0 71 | -------------------------------------------------------------------------------- /roles/tomcat8/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for tomcat8 3 | -------------------------------------------------------------------------------- /roles/zabbix-front/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | language: python 3 | python: "2.7" 4 | 5 | # Use the new container infrastructure 6 | sudo: false 7 | 8 | # Install ansible 9 | addons: 10 | apt: 11 | packages: 12 | - python-pip 13 | 14 | install: 15 | # Install ansible 16 | - pip install ansible 17 | 18 | # Check ansible version 19 | - ansible --version 20 | 21 | # Create ansible.cfg with correct roles_path 22 | - printf '[defaults]\nroles_path=../' >ansible.cfg 23 | 24 | script: 25 | # Basic role syntax check 26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check 27 | 28 | notifications: 29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ -------------------------------------------------------------------------------- /roles/zabbix-front/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/zabbix-front/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for zabbix3_2 3 | zabbix_version: 3.2.7 4 | zabbix_user: zabbix 5 | zabbix_group: zabbix 6 | zabbix_home: "/opt/zabbix" 7 | zabbix_tar_file_name: "zabbix-{{zabbix_version}}.tar.gz" 8 | zabbix_tar_exact_path: "{{zabbix_home}}/zabbix-{{zabbix_version}}" 9 | zabbix_frontends_path: "{{zabbix_tar_exact_path}}/frontends/php" 10 | zabbix_log_dir: /var/log/zabbix 11 | zabbix_front_port: 12 | zabbix_download_url: "http://repo.zabbix.com/zabbix/3.2/ubuntu/pool/main/z/zabbix/zabbix_{{zabbix_version}}.orig.tar.gz" 13 | zabbixserver_packages: 14 | - mysql-devel 15 | - libxml2-devel 16 | - net-snmp-devel 17 | - libcurl-devel 18 | - OpenIPMI 19 | - OpenIPMI-devel 20 | - rpm-build 21 | -------------------------------------------------------------------------------- /roles/zabbix-front/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart nginx 3 | service: name="nginx" state="restarted" 4 | delegate_to: "{{delegated_nginx_host}}" 5 | -------------------------------------------------------------------------------- /roles/zabbix-front/templates/zabbix-setup.php: -------------------------------------------------------------------------------- 1 | ansible.cfg 23 | 24 | script: 25 | # Basic role syntax check 26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check 27 | 28 | notifications: 29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ -------------------------------------------------------------------------------- /roles/zookeeper-ansible-role/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/zookeeper-ansible-role/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | zk_install_dir: /app 3 | ZK_HOME: "{{zk_install_dir}}/zookeeper" 4 | zookeeper_version: 3.4.6 5 | zookeeper_client_port: 2181 6 | zookeeper_connect_port: 2888 7 | zookeeper_election_port: 3888 8 | zookeeper_conf_dir: "{{ZK_HOME}}/conf" 9 | zookeeper_data_dir: "{{ZK_HOME}}/data" 10 | zookeeper_bin_dir: "{{ZK_HOME}}/bin" 11 | zookeeper_tmp_dir: "{{ZK_HOME}}/tmp" 12 | zookeeper_log_dir: "{{ZK_HOME}}/log" 13 | zookeeper_service_name: "zookeeper{{myid}}" 14 | zookeeper_group: "{{usergroup}}" 15 | zookeeper_user: "{{username}}" 16 | zookeeper_java_opts: "-Xmx1000m -Xms1000m" 17 | #java_opts: "-Xmx{{ (ansible_memtotal_mb / 2) | int }}m -Xms{{ (ansible_memtotal_mb / 2) | int }}m" 18 | # note this is for application logs, not the actual zookeeper data logs 19 | zookeeper_log_level: WARN 20 | zookeeper_maxClientCnxns: 3000 21 | -------------------------------------------------------------------------------- /roles/zookeeper-ansible-role/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart zookeeper 3 | service: 4 | name: "{{zookeeper_service_name}}" 5 | state: restarted 6 | enabled: true 7 | tags: 8 | - zk 9 | -------------------------------------------------------------------------------- /roles/zookeeper-ansible-role/tasks/configure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup myid 3 | become: yes 4 | template: 5 | owner: "{{ zookeeper_user }}" 6 | group: "{{ zookeeper_group }}" 7 | dest: "{{ zookeeper_data_dir }}/myid" 8 | src: myid.j2 9 | tags: 10 | - zk 11 | 12 | - name: Setup zoo.cfg file 13 | template: 14 | owner: "{{ zookeeper_user }}" 15 | group: "{{ zookeeper_group }}" 16 | dest: "{{ zookeeper_conf_dir }}/zoo.cfg" 17 | mode: "ug=rwx,o=r" 18 | src: zoo.cfg.j2 19 | tags: 20 | - zk 21 | 22 | - name: Create Zookeeper log4j config 23 | template: 24 | owner: "{{ zookeeper_user }}" 25 | group: "{{ zookeeper_group }}" 26 | dest: "{{ zookeeper_conf_dir }}/log4j.properties" 27 | mode: "ug=rx,o=r" 28 | src: log4j.properties.j2 29 | tags: 30 | - zk 31 | 32 | - name: Setup Zookeeper environment config 33 | template: 34 | owner: "{{ zookeeper_user }}" 35 | group: "{{ zookeeper_group }}" 36 | dest: "{{ zookeeper_conf_dir }}/environment" 37 | mode: "u=rx,og=r" 38 | src: environment.j2 39 | tags: 40 | - zk 41 | 42 | 43 | - name: start and enable zookeeper_service 44 | become: yes 45 | service: 46 | name: "{{zookeeper_service_name}}" 47 | state: started 48 | enabled: true 49 | tags: 50 | - zk 51 | 52 | - name: Wait for Zookeeper port 53 | wait_for: 54 | port: "{{ zookeeper_client_port }}" 55 | state: started 56 | timeout: 30 57 | tags: 58 | - zk 59 | -------------------------------------------------------------------------------- /roles/zookeeper-ansible-role/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - group: name={{zookeeper_group}} system=yes 3 | tags: 4 | - zk 5 | - user: name={{zookeeper_user}} group={{zookeeper_group}} append=yes 6 | tags: 7 | - zk 8 | 9 | ## zookeeper 启动要加入此hosts 否则EndOfStreamException: 10 | ## Unable to read additional data from client sessionid 0x0, likely client has closed socket 11 | # - name: add localhost into /etc/hosts 12 | # lineinfile: line="127.0.0.1 localhost.localdomain localhost" dest="/etc/hosts" 13 | 14 | 15 | - name: Install Zookeeper 16 | include: build.yml 17 | tags: 18 | - zk 19 | 20 | - name: Configure Zookeeper 21 | include: configure.yml 22 | tags: 23 | - zk 24 | -------------------------------------------------------------------------------- /roles/zookeeper-ansible-role/templates/environment.j2: -------------------------------------------------------------------------------- 1 | 2 | # Modified from http://packages.ubuntu.com/saucy/zookeeperd 3 | NAME=zookeeper 4 | ZOOKEEPER_HOME={{ zk_install_dir }} 5 | ZOOCFGDIR={{ zookeeper_conf_dir }} 6 | ZOODIR={{ zk_install_dir }} 7 | 8 | CLASSPATH="$ZOOCFGDIR:$ZOODIR/zookeeper-{{ zookeeper_version }}.jar:{{ zookeeper_classpath | default('$ZOODIR/lib/*') }}" 9 | 10 | ZOOCFG="$ZOOCFGDIR/zoo.cfg" 11 | ZOO_LOG_DIR={{ zookeeper_log_dir }} 12 | USER=$NAME 13 | GROUP=$NAME 14 | PIDDIR=/var/run/$NAME 15 | PIDFILE=$PIDDIR/$NAME.pid 16 | SCRIPTNAME=/etc/init.d/$NAME 17 | JAVA="{{JAVA_HOME}}/bin/java" 18 | ZOOMAIN="org.apache.zookeeper_server.quorum.QuorumPeerMain" 19 | ZOO_LOG4J_PROP="INFO,ROLLINGFILE" 20 | JMXLOCALONLY=false 21 | JAVA_OPTS="{{ zookeeper_java_opts | default() }}" 22 | -------------------------------------------------------------------------------- /roles/zookeeper-ansible-role/templates/myid.j2: -------------------------------------------------------------------------------- 1 | {{ myid }} 2 | -------------------------------------------------------------------------------- /roles/zookeeper-ansible-role/templates/zookeeper.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Zookeeper 3 | After=network.target 4 | 5 | [Service] 6 | User={{zookeeper_user}} 7 | Group={{zookeeper_group}} 8 | Environment=JAVA_HOME={{JAVA_HOME}} 9 | Environment="ZOO_LOG_DIR={{zookeeper_log_dir}}" 10 | ExecStart={{ZK_HOME}}/bin/zkServer.sh start-foreground 11 | ExecStop={{ZK_HOME}}/bin/zkServer.sh stop 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /scripts/zabbix.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: UTF-8 -*- 3 | from pyzabbix import ZabbixAPI 4 | zapi = ZabbixAPI("http://192.168.61.14:8542") 5 | zapi.login("Admin", "zabbix") 6 | 7 | 8 | for h in zapi.host.get(output="extend"): 9 | print(h['hostid']) 10 | -------------------------------------------------------------------------------- /vars/app/common.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nginx_conf_path: /usr/local/nginx/nginx/conf 3 | nginx_stream_conf_path: "{{nginx_conf_path}}/streams" 4 | nginx_servers_path: /usr/local/nginx/nginx/conf/servers 5 | nginx_user: www 6 | nginx_group: www 7 | delegated_nginx_host: nginx 8 | zabbix_version: 3.2.7 9 | zabbix_major_version: 3.2 10 | 11 | 12 | mysql_home: /usr/local/mysql 13 | mysql_cnf_path: /etc/my.cnf 14 | mysql_data_dir: "{{mysql_home}}/data/opendata" 15 | mysql_pid_file_path: "{{mysql_data_dir}}/mysqld.pid" 16 | mysql_tmp_dir: "{{mysql_home}}/data/opendata/tmp" 17 | mysql_login_unix_socket: "{{ mysql_tmp_dir }}/mysqld.sock" 18 | --------------------------------------------------------------------------------