├── deploy ├── library │ └── __init__.py ├── roles │ ├── r-shiny │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── defaults │ │ │ └── main.yml │ │ └── templates │ │ │ └── shiny-server.conf.j2 │ ├── kerberos_server │ │ ├── defaults │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── templates │ │ │ ├── kdc.conf │ │ │ └── krb5.conf │ ├── neo4j │ │ ├── templates │ │ │ ├── neo4j.log.j2 │ │ │ ├── neo4j.pid.j2 │ │ │ ├── neo4j-env.sh.j2 │ │ │ └── neo4j.upstart.j2 │ │ ├── meta │ │ │ └── main.yml │ │ └── handlers │ │ │ └── main.yml │ ├── mesos_master │ │ ├── templates │ │ │ ├── myid │ │ │ ├── quorum.j2 │ │ │ ├── marathon.j2 │ │ │ ├── zk.j2 │ │ │ ├── zk.marathon.j2 │ │ │ ├── mesos_credentials.j2 │ │ │ └── zoo.cfg │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── post_deploy_mesos_master.yml │ │ └── handlers │ │ │ └── main.yml │ ├── python27-scl │ │ ├── tests │ │ │ ├── inventory │ │ │ └── test.yml │ │ ├── vars │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── templates │ │ │ └── scl_enable-python27.sh.j2 │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── README.md │ ├── python36-scl │ │ ├── tests │ │ │ ├── inventory │ │ │ └── test.yml │ │ ├── vars │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── templates │ │ │ └── scl_enable-python36.sh.j2 │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── README.md │ ├── iptables │ │ ├── defaults │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ └── rhel_7.yml │ ├── revolution-r │ │ ├── templates │ │ │ └── revor.sh.j2 │ │ └── defaults │ │ │ └── main.yml │ ├── eod │ │ ├── templates │ │ │ ├── admin_settings.cfg.j2 │ │ │ ├── akeys │ │ │ ├── 20-no-show-proxy-dialog.pkla │ │ │ ├── install_eod_server.exp │ │ │ ├── node.cfg.j2 │ │ │ ├── inittab │ │ │ ├── exceed-connection-server │ │ │ ├── cluster.cfg.j2 │ │ │ ├── custom.conf │ │ │ ├── xorg.conf │ │ │ └── system-auth-ac │ │ ├── tasks │ │ │ ├── upgrade_eod.yaml │ │ │ └── install_eod.yaml │ │ ├── handlers │ │ │ └── main.yaml │ │ └── defaults │ │ │ └── main.yaml │ ├── nodejs │ │ ├── meta │ │ │ └── main.yml │ │ ├── defaults │ │ │ └── main.yml │ │ ├── templates │ │ │ └── nvm.sh.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── pentaho │ │ ├── meta │ │ │ └── main.yml │ │ ├── templates │ │ │ └── pentaho.desktop.j2 │ │ └── defaults │ │ │ └── main.yml │ ├── sqitch │ │ └── defaults │ │ │ └── main.yml │ ├── r-core │ │ ├── defaults │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── gocd_server │ │ ├── meta │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── templates │ │ │ └── go-server.j2 │ │ └── defaults │ │ │ └── main.yml │ ├── devtools │ │ ├── templates │ │ │ ├── devtools-2.sh.j2 │ │ │ └── devtools-2.repo.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── devtools6 │ │ ├── templates │ │ │ ├── devtools-6.sh.j2 │ │ │ └── devtools-6.repo.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── devtools7 │ │ ├── templates │ │ │ ├── devtools-7.sh.j2 │ │ │ └── devtools-7.repo.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── apache │ │ ├── meta │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── citus │ │ ├── meta │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── citus_cpu.conf.j2 │ │ │ ├── citus_limits.conf.j2 │ │ │ ├── citus_memory.conf.j2 │ │ │ └── pgpass.j2 │ │ └── defaults │ │ │ └── main.yml │ ├── kibana │ │ ├── defaults │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── templates │ │ │ ├── kibana.conf │ │ │ └── kibana.repo.j2 │ ├── logstash │ │ ├── defaults │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── templates │ │ │ └── logstash.repo │ │ └── tasks │ │ │ └── main.yml │ ├── nginx │ │ ├── meta │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── nginx.j2 │ │ │ ├── default.conf.j2 │ │ │ └── nginx.conf.j2 │ │ └── defaults │ │ │ └── main.yml │ ├── sas │ │ ├── meta │ │ │ └── main.yml │ │ ├── defaults │ │ │ └── main.yml │ │ └── templates │ │ │ └── sas_init.d_template.j2 │ ├── spark │ │ ├── meta │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── spark-bin.j2 │ │ │ ├── spark-env.sh.j2 │ │ │ └── log4j.properties │ │ └── defaults │ │ │ └── main.yml │ ├── glusterfs │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── maven │ │ ├── meta │ │ │ └── main.yml │ │ ├── templates │ │ │ └── maven.sh.j2 │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── intellij │ │ ├── meta │ │ │ └── main.yml │ │ ├── files │ │ │ └── idea.png │ │ ├── templates │ │ │ └── idea.desktop.j2 │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── kibana-config │ │ ├── meta │ │ │ └── main.yml │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── logstash-config │ │ ├── meta │ │ │ └── main.yml │ │ ├── files │ │ │ └── 12-example-filter.conf │ │ ├── templates │ │ │ ├── 01-logstash-courier-input.conf │ │ │ ├── 30-elasticsearch-output.conf.j2 │ │ │ ├── 30-elasticsearch-output.1.5.conf.j2 │ │ │ ├── 10-syslog.conf │ │ │ ├── 02-tcp-input.conf │ │ │ └── 20-elasticsearch-input.conf.j2 │ │ └── defaults │ │ │ └── main.yml │ ├── r-shiny-config │ │ ├── meta │ │ │ └── main.yml │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── r-studio-server │ │ ├── meta │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── stata │ │ ├── templates │ │ │ ├── stataupdate.do.j2 │ │ │ ├── configure_stata.sh │ │ │ └── sysprofile.do.j2 │ │ └── defaults │ │ │ └── main.yaml │ ├── jdbc │ │ ├── meta │ │ │ └── main.yml │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── postgresql-client │ │ ├── templates │ │ │ └── postgres.sh.j2 │ │ └── defaults │ │ │ └── main.yml │ ├── postgresql-server │ │ ├── meta │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── environment.conf.j2 │ │ │ ├── pg_ident.conf.j2 │ │ │ ├── pg_hba.conf.j2 │ │ │ ├── config_param.j2 │ │ │ └── pgplus_env.sh.j2 │ │ ├── vars │ │ │ └── rhel_7.yml │ │ └── handlers │ │ │ └── main.yml │ ├── r-studio-server-config │ │ ├── templates │ │ │ ├── secure-proxy-user-header.j2 │ │ │ ├── ip-rules.j2 │ │ │ ├── rstudio_server.sh.j2 │ │ │ ├── rsession.conf.j2 │ │ │ ├── rserver.conf.j2 │ │ │ └── login.html.j2 │ │ ├── meta │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── scala │ │ ├── meta │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── sbt.sh.j2 │ │ │ └── scala.sh.j2 │ │ └── defaults │ │ │ └── main.yml │ ├── ntp │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── postgresql-server-config │ │ ├── meta │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ └── create_functions.yml │ ├── redis │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── jdk │ │ ├── templates │ │ │ ├── jdk.sh.j2 │ │ │ └── deployment.config │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── log-courier │ │ ├── defaults │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── log-courier.repo │ │ │ └── log-courier.conf.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── ruby │ │ ├── tasks │ │ │ └── .main.yml.swo │ │ └── defaults │ │ │ └── main.yml │ ├── elasticsearch-config │ │ ├── meta │ │ │ └── main.yml │ │ └── templates │ │ │ ├── 99-elasticsearch.conf.j2 │ │ │ ├── role_mapping.yml.j2 │ │ │ └── shield_logging.yml.j2 │ ├── gauss │ │ ├── defaults │ │ │ └── main.yaml │ │ ├── tasks │ │ │ └── main.yaml │ │ └── templates │ │ │ └── install_gauss.exp │ ├── odbc │ │ ├── meta │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── freetds.conf │ │ │ ├── odbc.ini │ │ │ └── odbcinst.ini │ │ └── tasks │ │ │ └── main.yaml │ ├── elasticsearch │ │ ├── meta │ │ │ └── main.yml │ │ ├── defaults │ │ │ └── main.yml │ │ └── templates │ │ │ └── elasticsearch.repo │ ├── pycharm │ │ ├── meta │ │ │ └── main.yml │ │ ├── defaults │ │ │ └── main.yaml │ │ └── tasks │ │ │ └── main.yml │ ├── math_lm │ │ ├── templates │ │ │ └── mathpass │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── openldap │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── templates │ │ │ ├── ldap.conf.j2 │ │ │ └── slapd.access.j2 │ ├── pem-client │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── python │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── python-build │ │ └── defaults │ │ │ └── main.yml │ ├── shared │ │ └── handlers │ │ │ ├── rhel6.yml │ │ │ ├── rhel7.yml │ │ │ └── main.yml │ ├── epel │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── gocd │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yaml │ │ ├── templates │ │ │ ├── deployment.properties │ │ │ └── go-agent │ │ └── defaults │ │ │ └── main.yaml │ ├── r │ │ ├── meta │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── mesos_agent │ │ ├── templates │ │ │ ├── zk.j2 │ │ │ ├── default_container_info.j2 │ │ │ └── default_container_dns.j2 │ │ ├── meta │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── citus_coordinator │ │ ├── defaults │ │ │ └── main.yml │ │ └── templates │ │ │ └── archive-wal-file.j2 │ ├── kerberos │ │ ├── defaults │ │ │ └── main.yml │ │ ├── templates │ │ │ └── krb5.conf │ │ └── tasks │ │ │ └── main.yml │ ├── sublime │ │ ├── defaults │ │ │ └── main.yaml │ │ └── templates │ │ │ └── sublime.desktop.j2 │ ├── r-libs │ │ ├── meta │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── talend │ │ ├── templates │ │ │ ├── TOS_DI-linux-gtk-x86.ini.j2 │ │ │ └── talend.desktop.j2 │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── pdfgrep │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── monitor-research │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── stattransfer │ │ ├── templates │ │ │ ├── configure_stattransfer.sh │ │ │ └── license1282.txt │ │ ├── defaults │ │ │ └── main.yaml │ │ └── tasks │ │ │ └── main.yaml │ ├── open_office │ │ ├── tasks │ │ │ └── main.yaml │ │ └── defaults │ │ │ └── main.yaml │ ├── ultra-edit │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── citus_coordinator_standby │ │ ├── defaults │ │ │ └── main.yml │ │ └── handlers │ │ │ └── main.yml │ ├── puppet_disable │ │ └── tasks │ │ │ └── main.yml │ ├── common │ │ └── defaults │ │ │ └── main.yml │ ├── umask │ │ └── tasks │ │ │ └── main.yml │ ├── r-studio-desktop │ │ └── tasks │ │ │ └── main.yml │ ├── postgresql-dbs │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── emacs │ │ └── tasks │ │ │ └── main.yaml │ ├── evince │ │ └── tasks │ │ │ └── main.yaml │ ├── python-libs │ │ └── defaults │ │ │ └── main.yml │ ├── psql-authnz │ │ └── tasks │ │ │ └── main.yml │ ├── tesseract │ │ └── defaults │ │ │ └── main.yaml │ ├── julia │ │ └── tasks │ │ │ └── main.yml │ ├── clouseau │ │ └── tasks │ │ │ └── main.yml │ ├── kerberos_apache │ │ └── tasks │ │ │ └── main.yml │ └── matlab │ │ └── defaults │ │ └── main.yml ├── create_databases.yml ├── monitor_research_environment.yml ├── config_analytics_terminal.yml ├── config_citus_server.yml ├── deploy_r_terminal.yml ├── config_elasticsearch.yml ├── deploy_neo4j_server.yml ├── deploy_elastic_search.yml ├── group_vars │ ├── citus_coordinator │ │ └── main.yml │ ├── citus_coordinator_standby │ │ └── main.yml │ └── citus_worker │ │ └── main.yml ├── upgrade_gocd.yml ├── ansible.cfg ├── templates │ ├── proxy.html │ ├── enclave_proxy.php │ ├── enclave_proxy_restricted.php │ └── proxy.conf ├── deploy_file_server.yml ├── deploy_analytics_terminal.yml ├── deploy_admin_terminal.yml ├── deploy_mesos_master.yml ├── deploy_db_server.yml ├── config_nginx_proxy_server.yml ├── config_proxy_server.yml ├── deploy_kerberos_server.yml ├── deploy_elk.yml ├── audit_resources.yml ├── deploy_development_terminal.yml ├── upgrade_citus_enterprise.yml ├── deploy_db_terminal.yml └── config_file_server.yml ├── test ├── files │ ├── gocd │ │ ├── test_reqs.txt │ │ ├── remove_test_pipelines.py │ │ └── wait_for_pipeline.py │ └── mesos │ │ ├── simple_app │ │ ├── simple_app_aci │ │ ├── rootfs │ │ │ ├── etc │ │ │ │ └── passwd │ │ │ └── bin │ │ │ │ └── simple_app │ │ └── manifest │ │ ├── simple_app.aci │ │ ├── test.sh │ │ ├── simple_app.json │ │ └── simple_app.c ├── ansible.cfg ├── test_mesos.yml └── test_postgres.yml ├── .devcontainer ├── docker-compose-networks.yml ├── docker-compose-host-template.yml ├── Makefile ├── Dockerfile-base ├── bashrc ├── welcome.txt ├── scripts │ └── rebuild-docker-compose.sh └── devcontainer.json ├── Dockerfile ├── Dockerfile7 ├── CONTRIBUTING.md ├── CHANGELOG.md └── .gitignore /deploy/library/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /deploy/roles/r-shiny/handlers/main.yml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /deploy/roles/kerberos_server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /deploy/roles/neo4j/templates/neo4j.log.j2: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /deploy/roles/neo4j/templates/neo4j.pid.j2: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /deploy/roles/mesos_master/templates/myid: -------------------------------------------------------------------------------- 1 | {{ zookeeper_id }} -------------------------------------------------------------------------------- /deploy/roles/python27-scl/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /deploy/roles/python36-scl/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /deploy/roles/iptables/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | is_terminal: False -------------------------------------------------------------------------------- /deploy/roles/revolution-r/templates/revor.sh.j2: -------------------------------------------------------------------------------- 1 | export R_ARCH="" 2 | -------------------------------------------------------------------------------- /deploy/roles/eod/templates/admin_settings.cfg.j2: -------------------------------------------------------------------------------- 1 | [USER] 2 | IsAdmin=1 3 | -------------------------------------------------------------------------------- /deploy/roles/nodejs/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - umask 4 | -------------------------------------------------------------------------------- /deploy/roles/pentaho/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - jdbc 4 | -------------------------------------------------------------------------------- /deploy/roles/sqitch/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | install_sqitch: True 3 | -------------------------------------------------------------------------------- /deploy/roles/python27-scl/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for python36-scl -------------------------------------------------------------------------------- /deploy/roles/python36-scl/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for python36-scl -------------------------------------------------------------------------------- /deploy/roles/r-core/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | r_core_state: present 3 | 4 | -------------------------------------------------------------------------------- /deploy/roles/gocd_server/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: jdk 4 | -------------------------------------------------------------------------------- /deploy/roles/python27-scl/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for python36-scl -------------------------------------------------------------------------------- /deploy/roles/python36-scl/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for python36-scl -------------------------------------------------------------------------------- /test/files/gocd/test_reqs.txt: -------------------------------------------------------------------------------- 1 | beautifulsoup4 2 | lxml 3 | httplib2 4 | requests 5 | -------------------------------------------------------------------------------- /deploy/roles/devtools/templates/devtools-2.sh.j2: -------------------------------------------------------------------------------- 1 | source /opt/rh/devtoolset-2/enable 2 | -------------------------------------------------------------------------------- /deploy/roles/devtools6/templates/devtools-6.sh.j2: -------------------------------------------------------------------------------- 1 | source /opt/rh/devtoolset-6/enable 2 | -------------------------------------------------------------------------------- /deploy/roles/devtools7/templates/devtools-7.sh.j2: -------------------------------------------------------------------------------- 1 | source /opt/rh/devtoolset-7/enable 2 | -------------------------------------------------------------------------------- /deploy/roles/apache/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/roles/citus/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/roles/kibana/defaults/main.yml: -------------------------------------------------------------------------------- 1 | kibana_version: 4.5 2 | kibana_use_repository: True 3 | -------------------------------------------------------------------------------- /deploy/roles/kibana/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/roles/logstash/defaults/main.yml: -------------------------------------------------------------------------------- 1 | logstash_user: logstash 2 | logstash_version: 2.3 3 | -------------------------------------------------------------------------------- /deploy/roles/nginx/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/roles/sas/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/roles/spark/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/create_databases.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: db_server 3 | roles: 4 | - postgresql-dbs 5 | -------------------------------------------------------------------------------- /deploy/roles/citus/templates/citus_cpu.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | CPUQuota={{ citus_cpu_limit }} 3 | -------------------------------------------------------------------------------- /deploy/roles/glusterfs/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/roles/logstash/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/roles/maven/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: shared 4 | - role: jdk 5 | -------------------------------------------------------------------------------- /deploy/roles/neo4j/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: shared 4 | - role: jdk 5 | -------------------------------------------------------------------------------- /deploy/roles/r-shiny/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/roles/intellij/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: shared 4 | - role: jdk 5 | -------------------------------------------------------------------------------- /deploy/roles/kibana-config/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/roles/logstash-config/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/roles/r-shiny-config/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/roles/r-studio-server/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/roles/stata/templates/stataupdate.do.j2: -------------------------------------------------------------------------------- 1 | update all, from("{{ stata_update_directory }}") 2 | -------------------------------------------------------------------------------- /test/files/mesos/simple_app: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cfpb/aurora/HEAD/test/files/mesos/simple_app -------------------------------------------------------------------------------- /test/files/mesos/simple_app_aci/rootfs/etc/passwd: -------------------------------------------------------------------------------- 1 | vagrant:x:500:500:vagrant:/home/vagrant:/bin/bash 2 | -------------------------------------------------------------------------------- /deploy/roles/citus/templates/citus_limits.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | LimitNOFILE={{ citus_nofile_limit }} 3 | -------------------------------------------------------------------------------- /deploy/roles/jdbc/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: postgresql-client 4 | - role: jdk 5 | -------------------------------------------------------------------------------- /deploy/roles/postgresql-client/templates/postgres.sh.j2: -------------------------------------------------------------------------------- 1 | export PATH={{ postgres_install_dir }}/bin:$PATH 2 | -------------------------------------------------------------------------------- /deploy/roles/postgresql-server/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/roles/r-studio-server-config/templates/secure-proxy-user-header.j2: -------------------------------------------------------------------------------- 1 | {{ rstudio_secret_header }} 2 | -------------------------------------------------------------------------------- /deploy/roles/scala/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | - role: jdk 6 | -------------------------------------------------------------------------------- /deploy/roles/ntp/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml - Defaults for NTP 2 | --- 3 | ntp_timezone: America/New_York 4 | -------------------------------------------------------------------------------- /deploy/roles/postgresql-server-config/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/roles/r-studio-server-config/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | -------------------------------------------------------------------------------- /deploy/roles/redis/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | - role: epel 6 | -------------------------------------------------------------------------------- /deploy/roles/scala/templates/sbt.sh.j2: -------------------------------------------------------------------------------- 1 | export SBT_HOME=/usr/local/sbt/default 2 | PATH=$PATH:$SBT_HOME/bin 3 | -------------------------------------------------------------------------------- /deploy/roles/stata/templates/configure_stata.sh: -------------------------------------------------------------------------------- 1 | STATA={{ stata_directory }} 2 | 3 | export PATH=$STATA:$PATH 4 | -------------------------------------------------------------------------------- /test/files/mesos/simple_app.aci: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cfpb/aurora/HEAD/test/files/mesos/simple_app.aci -------------------------------------------------------------------------------- /deploy/roles/jdk/templates/jdk.sh.j2: -------------------------------------------------------------------------------- 1 | export JAVA_HOME={{ java_home_path }} 2 | export PATH=$JAVA_HOME/bin:$PATH 3 | -------------------------------------------------------------------------------- /deploy/roles/scala/templates/scala.sh.j2: -------------------------------------------------------------------------------- 1 | export SCALA_HOME=/usr/local/scala/default 2 | PATH=$PATH:$SCALA_HOME/bin 3 | -------------------------------------------------------------------------------- /deploy/roles/citus/templates/citus_memory.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | {{ citus_memory_limit_service }}={{ citus_mem_limit }} 3 | -------------------------------------------------------------------------------- /deploy/roles/intellij/files/idea.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cfpb/aurora/HEAD/deploy/roles/intellij/files/idea.png -------------------------------------------------------------------------------- /deploy/roles/log-courier/defaults/main.yml: -------------------------------------------------------------------------------- 1 | logcourier_log_file_locations: 2 | - /var/log/log-courier/log-courier.log 3 | -------------------------------------------------------------------------------- /deploy/roles/mesos_master/templates/quorum.j2: -------------------------------------------------------------------------------- 1 | {{ ((groups['mesos_master']|length / 2)|round(0, 'floor') + 1)|int }} 2 | -------------------------------------------------------------------------------- /deploy/roles/postgresql-server/templates/environment.conf.j2: -------------------------------------------------------------------------------- 1 | 2 | [Service] 3 | Environment=PGDATA={{ pp_datadir }} 4 | -------------------------------------------------------------------------------- /deploy/roles/python27-scl/templates/scl_enable-python27.sh.j2: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | 3 | source scl_source enable python27 4 | -------------------------------------------------------------------------------- /deploy/roles/python36-scl/templates/scl_enable-python36.sh.j2: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | 3 | source scl_source enable rh-python36 4 | -------------------------------------------------------------------------------- /deploy/roles/ruby/tasks/.main.yml.swo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cfpb/aurora/HEAD/deploy/roles/ruby/tasks/.main.yml.swo -------------------------------------------------------------------------------- /deploy/roles/spark/templates/spark-bin.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | exec "{{ spark_main_dir }}/bin/$(basename "$0")" "$@" 4 | -------------------------------------------------------------------------------- /deploy/roles/elasticsearch-config/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | - role: jdk 6 | -------------------------------------------------------------------------------- /deploy/roles/gauss/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | gauss_bin_directory: /usr/local/gauss15 3 | guass_local_bin_directory: /artifacts -------------------------------------------------------------------------------- /deploy/roles/neo4j/templates/neo4j-env.sh.j2: -------------------------------------------------------------------------------- 1 | export NEO4J_HOME={{ neo4j_home_dir }} 2 | export PATH=$NEO4J_HOME:/bin:$PATH 3 | -------------------------------------------------------------------------------- /deploy/roles/python27-scl/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - python36-scl -------------------------------------------------------------------------------- /deploy/roles/python36-scl/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - python36-scl -------------------------------------------------------------------------------- /deploy/roles/odbc/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - {role: epel, when: "use_epel and not 'production' in group_names"} 4 | -------------------------------------------------------------------------------- /deploy/roles/elasticsearch/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | dependencies: 4 | - role: shared 5 | - role: umask 6 | - role: jdk 7 | -------------------------------------------------------------------------------- /deploy/roles/mesos_master/templates/marathon.j2: -------------------------------------------------------------------------------- 1 | LIBPROCESS_IP={{ ansible_ssh_host }} 2 | LIBPROCESS_PORT={{ marathon_scheduler_port }} 3 | -------------------------------------------------------------------------------- /deploy/roles/neo4j/handlers/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | - name: restart neo4j 4 | shell: "{{ neo4j_main_dir}}/neo4j restart" 5 | -------------------------------------------------------------------------------- /deploy/roles/pycharm/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - {role: python, when: "install_python and custom_repo"} 4 | - role: jdk 5 | -------------------------------------------------------------------------------- /deploy/roles/r-studio-server-config/templates/ip-rules.j2: -------------------------------------------------------------------------------- 1 | {% for ip in ip_whitelist %} 2 | allow {{ ip }} 3 | {% endfor %} 4 | deny all -------------------------------------------------------------------------------- /deploy/roles/logstash-config/files/12-example-filter.conf: -------------------------------------------------------------------------------- 1 | filter { 2 | date { 3 | match => [ "timestamp", "ISO8601" ] 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /deploy/roles/math_lm/templates/mathpass: -------------------------------------------------------------------------------- 1 | {{math_lm_server_name}} {{math_lm_math_id}} {{math_lm_activation_key}} {{math_lm_password}} -------------------------------------------------------------------------------- /deploy/roles/maven/templates/maven.sh.j2: -------------------------------------------------------------------------------- 1 | export MAVEN_HOME={{ maven_install_dir }}/maven 2 | export PATH={{ maven_install_dir }}/maven/bin:$PATH 3 | -------------------------------------------------------------------------------- /deploy/roles/openldap/handlers/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | - name: restart slapd 4 | service: 5 | name: slapd 6 | state: restarted 7 | -------------------------------------------------------------------------------- /.devcontainer/docker-compose-networks.yml: -------------------------------------------------------------------------------- 1 | networks: 2 | ansible_nw: 3 | ipam: 4 | config: 5 | - subnet: 10.0.1.0/24 6 | -------------------------------------------------------------------------------- /deploy/roles/apache/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart apache 3 | service: 4 | name: "{{ apache_service }}" 5 | state: restarted 6 | -------------------------------------------------------------------------------- /deploy/roles/eod/templates/akeys: -------------------------------------------------------------------------------- 1 | {% if eod_use_license_server != 1 %} 2 | {% for key in eod_license_keys %} 3 | {{ key }} 4 | {% endfor %} 5 | {% endif %} -------------------------------------------------------------------------------- /deploy/roles/pem-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | pem_installer_bin: pem_client-5.0.3-4-linux-x64.run 3 | pem_install_dir: /opt/PEM 4 | install_pem: True -------------------------------------------------------------------------------- /deploy/roles/python/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for python 3 | 4 | python_pip_bin: /usr/local/bin/pip2.7 5 | python_pip_ld_path: [] 6 | -------------------------------------------------------------------------------- /deploy/roles/r-core/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - {role: epel, when: "use_epel and not 'production' in group_names"} 4 | - shared 5 | -------------------------------------------------------------------------------- /test/files/mesos/simple_app_aci/rootfs/bin/simple_app: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cfpb/aurora/HEAD/test/files/mesos/simple_app_aci/rootfs/bin/simple_app -------------------------------------------------------------------------------- /deploy/roles/logstash-config/templates/01-logstash-courier-input.conf: -------------------------------------------------------------------------------- 1 | input { 2 | courier { 3 | port => 5043 4 | transport => "tcp" 5 | } 6 | } -------------------------------------------------------------------------------- /deploy/monitor_research_environment.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: research_environment 3 | become: yes 4 | become_method: sudo 5 | roles: 6 | - monitor-research -------------------------------------------------------------------------------- /deploy/roles/jdk/templates/deployment.config: -------------------------------------------------------------------------------- 1 | deployment.system.config=file:{{ jre_lib_path }}/deployment.properties 2 | deployment.system.config.mandatory=false -------------------------------------------------------------------------------- /deploy/roles/python-build/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for python-build 3 | 4 | python_pip_bin: /usr/local/bin/pip2.7 5 | python_pip_ld_path: [] 6 | -------------------------------------------------------------------------------- /deploy/roles/shared/handlers/rhel6.yml: -------------------------------------------------------------------------------- 1 | - name: restart postgres 2 | become: True 3 | service: 4 | name: "{{ pp_servicename }}" 5 | state: restarted 6 | -------------------------------------------------------------------------------- /deploy/roles/nginx/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart nginx 3 | service: 4 | name: nginx 5 | state: restarted 6 | when: nginx_state != "stopped" 7 | -------------------------------------------------------------------------------- /deploy/roles/epel/defaults/main.yml: -------------------------------------------------------------------------------- 1 | epel_path: http://dl.fedoraproject.org/pub/epel 2 | epel_rpm: epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm 3 | -------------------------------------------------------------------------------- /deploy/roles/log-courier/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - {role: epel, when: "use_epel and not 'production' in group_names"} 4 | - shared 5 | - common 6 | -------------------------------------------------------------------------------- /deploy/roles/gocd/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart go-agent 3 | service: 4 | name: go-agent 5 | state: restarted 6 | when: restart_gocd_server 7 | -------------------------------------------------------------------------------- /deploy/roles/r/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - {role: epel, when: "use_epel and not 'production' in group_names"} 4 | - devtools 5 | - shared 6 | - common 7 | -------------------------------------------------------------------------------- /deploy/roles/mesos_agent/templates/zk.j2: -------------------------------------------------------------------------------- 1 | zk://{% for host in groups['mesos_master'] %}{{ hostvars[host]['ansible_ssh_host'] }}:2181{% if not loop.last %},{% endif %}{% endfor %}/mesos -------------------------------------------------------------------------------- /deploy/roles/citus_coordinator/defaults/main.yml: -------------------------------------------------------------------------------- 1 | citus_worker_nodes: 2 | - citus_worker_1 3 | - citus_worker_2 4 | - citus_worker_3 5 | 6 | pp_datadir: /var/lib/pgsql/10/data 7 | -------------------------------------------------------------------------------- /deploy/roles/gocd_server/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart go-server 3 | service: 4 | name: go-server 5 | state: restarted 6 | when: restart_gocd_server 7 | -------------------------------------------------------------------------------- /deploy/roles/iptables/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml - Establishes dependency on the shared role, specifically for the 2 | # iptables handler 3 | --- 4 | dependencies: 5 | - role: shared 6 | -------------------------------------------------------------------------------- /deploy/roles/kerberos/defaults/main.yml: -------------------------------------------------------------------------------- 1 | kerberos_enabled: True 2 | 3 | krb_log_file_locations: 4 | - /var/log/krb5libs.log 5 | - /var/log/krb5kdc.log 6 | - /var/log/kadmind.log 7 | -------------------------------------------------------------------------------- /deploy/roles/mesos_agent/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml - Establishes dependency on the shared role, specifically for the 2 | # iptables handler 3 | --- 4 | dependencies: 5 | - role: shared 6 | -------------------------------------------------------------------------------- /deploy/roles/mesos_master/templates/zk.j2: -------------------------------------------------------------------------------- 1 | zk://{% for host in groups['mesos_master'] %}{{ hostvars[host]['ansible_ssh_host'] }}:2181{% if not loop.last %},{% endif %}{% endfor %}/mesos 2 | -------------------------------------------------------------------------------- /deploy/roles/openldap/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml - Establishes dependency on the shared role, specifically for the 2 | # iptables handler 3 | --- 4 | dependencies: 5 | - role: shared 6 | -------------------------------------------------------------------------------- /deploy/roles/sublime/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sublime_local_artifact_directory: /artifacts 3 | sublime_install_directory: /usr/local/sublime 4 | sublime_remote_artifact_directory: /tmp -------------------------------------------------------------------------------- /test/files/mesos/test.sh: -------------------------------------------------------------------------------- 1 | gcc -o simple_app simple_app.c 2 | curl -F "name=file" -F "filename=simple_app" -F "file=@simple_app" http://10.0.1.31:8080/v2/artifacts/binaries/simple_app 3 | -------------------------------------------------------------------------------- /deploy/config_analytics_terminal.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: analytics_terminal 3 | become: yes 4 | become_method: sudo 5 | roles: 6 | - r-studio-server-config 7 | - r-shiny-config 8 | -------------------------------------------------------------------------------- /deploy/roles/kerberos_server/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml - Establishes dependency on the shared role, specifically for the 2 | # iptables handler 3 | --- 4 | dependencies: 5 | - role: shared 6 | -------------------------------------------------------------------------------- /deploy/roles/ruby/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Allows us to parameterize rpm install on travis 3 | rpm_options: "--ruby" 4 | # Allows bypassing of ruby install when needed 5 | install_ruby: True -------------------------------------------------------------------------------- /deploy/roles/shared/handlers/rhel7.yml: -------------------------------------------------------------------------------- 1 | - name: restart postgres 2 | become: True 3 | systemd: 4 | name: "{{ pp_servicename }}" 5 | state: restarted 6 | daemon_reload: yes 7 | -------------------------------------------------------------------------------- /deploy/roles/mesos_master/templates/zk.marathon.j2: -------------------------------------------------------------------------------- 1 | zk://{% for host in groups['mesos_master'] %}{{ hostvars[host]['ansible_ssh_host'] }}:2181{% if not loop.last %},{% endif %}{% endfor %}/marathon -------------------------------------------------------------------------------- /deploy/roles/nodejs/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nvm_parent_path: /opt 3 | nvm_install_path: "{{ nvm_parent_path }}/nvm" 4 | nvm_exe_path: "{{ nvm_install_path }}/nvm.sh" 5 | node_version: "6.2.0" 6 | -------------------------------------------------------------------------------- /deploy/roles/r-studio-server-config/templates/rstudio_server.sh.j2: -------------------------------------------------------------------------------- 1 | {% if jre_lib_path is defined %} 2 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{{ jre_lib_path }}/amd64/server 3 | {% endif %} 4 | -------------------------------------------------------------------------------- /deploy/roles/elasticsearch-config/templates/99-elasticsearch.conf.j2: -------------------------------------------------------------------------------- 1 | # Ensure proper resource limits are set for Elasticsearch 2 | elasticsearch - nofile 65565 3 | elasticsearch - memlock unlimited 4 | -------------------------------------------------------------------------------- /deploy/roles/mesos_agent/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart mesos-agent 3 | service: 4 | name: mesos-slave 5 | state: restarted 6 | tags: 7 | - mesos 8 | - mesos-agent 9 | -------------------------------------------------------------------------------- /deploy/roles/r-libs/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - {role: epel, when: "use_epel and not 'production' in group_names"} 4 | - {role: devtools, tags: r-libs} 5 | - shared 6 | - common 7 | -------------------------------------------------------------------------------- /deploy/roles/talend/templates/TOS_DI-linux-gtk-x86.ini.j2: -------------------------------------------------------------------------------- 1 | -vmargs 2 | -Xms{{ talend_heap_min }} 3 | -Xmx{{ talend_heap_max }} 4 | -XX:MaxPermSize={{ talend_max_perm_size }} 5 | -Dfile.encoding=UTF-8 6 | -------------------------------------------------------------------------------- /deploy/roles/mesos_master/meta/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml - Establishes dependency on the shared role, specifically for the 2 | # iptables handler 3 | --- 4 | dependencies: 5 | - role: shared 6 | - role: jdk 7 | -------------------------------------------------------------------------------- /deploy/roles/elasticsearch/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | es_version: 2.x 3 | es_full_version: 2.3.4 4 | es_data_directory: /opt/elasticsearch/data 5 | es_transport_ports: "9300:9400" 6 | es_http_ports: "9200:9299" 7 | -------------------------------------------------------------------------------- /deploy/roles/postgresql-server-config/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart postgres 3 | become: yes 4 | become_method: sudo 5 | service: 6 | name: "{{ pp_servicename }}" 7 | state: restarted 8 | -------------------------------------------------------------------------------- /deploy/roles/nginx/templates/nginx.j2: -------------------------------------------------------------------------------- 1 | Cmnd_Alias NGINX_RESTART = /sbin/service nginx restart 2 | Cmnd_Alias NGINX_RELOAD = /sbin/service nginx reload 3 | 4 | {{ deploy_user }} ALL=NOPASSWD: NGINX_RESTART, NGINX_RELOAD 5 | -------------------------------------------------------------------------------- /deploy/roles/pdfgrep/defaults/main.yml: -------------------------------------------------------------------------------- 1 | pdfgrep_install_directory: /usr/bin/pdfgrep 2 | pdfgrep_local_artifact_directory: /artifacts 3 | pdfgrep_install_package_name: pdfgrep-2.0.1.tar.gz 4 | -------------------------------------------------------------------------------- /deploy/config_citus_server.yml: -------------------------------------------------------------------------------- 1 | # config_citus_server.yml 2 | --- 3 | - hosts: citus 4 | become: yes 5 | become_method: sudo 6 | vars: 7 | pp_install: false 8 | roles: 9 | - postgresql-server-config 10 | -------------------------------------------------------------------------------- /deploy/roles/devtools6/templates/devtools-6.repo.j2: -------------------------------------------------------------------------------- 1 | [devtools-6-centos-$releasever] 2 | name=testing devtools for CentOS $releasever 3 | baseurl=http://mirror.centos.org/centos/$releasever/sclo/$basearch/rh 4 | gpgcheck=0 5 | -------------------------------------------------------------------------------- /deploy/roles/devtools7/templates/devtools-7.repo.j2: -------------------------------------------------------------------------------- 1 | [devtools-7-centos-$releasever] 2 | name=testing devtools for CentOS $releasever 3 | baseurl=http://mirror.centos.org/centos/$releasever/sclo/$basearch/rh 4 | gpgcheck=0 5 | -------------------------------------------------------------------------------- /deploy/roles/monitor-research/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | research_monitor_output_file_locaiton: outputlocation 3 | date_stamp: "{{lookup('pipe','date +%Y%m%d%H%M%S')}}" 4 | 5 | research_nightly_monitor_locations: searchlocations -------------------------------------------------------------------------------- /deploy/roles/r-studio-server-config/templates/rsession.conf.j2: -------------------------------------------------------------------------------- 1 | {{ template_masthead }} 2 | 3 | {% if rstudio_pro %} 4 | allow-package-installation=0 5 | {% endif %} 6 | session-timeout-minutes={{ rstudio_session_timeout }} 7 | -------------------------------------------------------------------------------- /deploy/roles/devtools/templates/devtools-2.repo.j2: -------------------------------------------------------------------------------- 1 | [devtools-2-centos-$releasever] 2 | name=testing 2 devtools for CentOS $releasever 3 | baseurl=http://people.centos.org/tru/devtools-2/$releasever/$basearch/RPMS 4 | gpgcheck=0 5 | -------------------------------------------------------------------------------- /deploy/roles/eod/templates/20-no-show-proxy-dialog.pkla: -------------------------------------------------------------------------------- 1 | [No Show Proxy Dialog] 2 | Identity=unix-user:* 3 | Action=org.freedesktop.packagekit.system-network-proxy-configure 4 | ResultAny=no 5 | ResultInactive=no 6 | ResultActive=no 7 | -------------------------------------------------------------------------------- /deploy/roles/postgresql-server/vars/rhel_7.yml: -------------------------------------------------------------------------------- 1 | pgdg_repository: "https://download.postgresql.org/pub/repos/yum/10/redhat/rhel-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm" 2 | pp_initdb: "/usr/pgsql-10/bin/postgresql-10-setup initdb" 3 | -------------------------------------------------------------------------------- /deploy/roles/r-shiny/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | install_r_packages: true 3 | 4 | r_shiny_server_package: "http://download3.rstudio.org/centos5.9/x86_64/shiny-server-1.5.3.838-rh5-x86_64.rpm" 5 | r_shiny_server_package_state: "present" 6 | -------------------------------------------------------------------------------- /deploy/roles/stattransfer/templates/configure_stattransfer.sh: -------------------------------------------------------------------------------- 1 | 2 | PATH_OUTPUT=`env | grep PATH` 3 | CHECK={{stattransfer_install_directory}} 4 | 5 | if [[ ! "$PATH_OUTPUT" =~ "$CHECK" ]] 6 | then 7 | export PATH=$CHECK:$PATH 8 | fi 9 | -------------------------------------------------------------------------------- /deploy/roles/mesos_agent/templates/default_container_info.j2: -------------------------------------------------------------------------------- 1 | { 2 | "type": "MESOS", 3 | "volumes": [ 4 | { 5 | "host_path": ".private/tmp", 6 | "container_path": "/tmp", 7 | "mode": "RW" 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /deploy/roles/open_office/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install all rpms 3 | yum: 4 | name: "{{item}}" 5 | state: present 6 | disable_gpg_check: yes 7 | with_items: 8 | - "{{open_office_rpms}}" 9 | when: custom_repo 10 | -------------------------------------------------------------------------------- /deploy/roles/ultra-edit/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ue_uninstall_list: 3 | - UltraEdit-16.1-0.18.x86_64 4 | - UltraEdit-3.3-0.12.x86_64 5 | - UltraEdit-16.1-0.22.x86_64 6 | - UltraEdit-15.1-0.8.x86_64 7 | 8 | ue_install_version: UltraEdit-16.1-0.22.x86_64 -------------------------------------------------------------------------------- /deploy/roles/citus_coordinator_standby/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | replication_user: replication 3 | replication_password: replication 4 | pp_servicename: postgresql-10 5 | pp_datadir: /var/lib/pgsql/10/data 6 | pp_xlogdir: /var/lib/pgsql/10/data 7 | pg_wal: pg_wal 8 | -------------------------------------------------------------------------------- /deploy/roles/jdbc/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jdbc_postgres_url: https://jdbc.postgresql.org/download/postgresql-9.4-1206-jdbc42.jar 3 | jdbc_sql_url: https://download.microsoft.com/download/0/2/A/02AAE597-3865-456C-AE7F-613F99F850A8/enu/sqljdbc_6.0.8112.100_enu.tar.gz 4 | -------------------------------------------------------------------------------- /deploy/roles/math_lm/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | update_math_lm_activation_key: False 3 | mathlm_location: /usr/local/Wolfram/MathLM 4 | 5 | math_lm_server_name: research_gauss 6 | math_lm_math_id: math_id 7 | math_lm_activation_key: key 8 | math_lm_password: password 9 | -------------------------------------------------------------------------------- /deploy/roles/maven/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | maven_version: 3.3.9 3 | maven_url: http://mirrors.advancedhosters.com/apache/maven/maven-3/{{ maven_version }}/binaries/apache-maven-{{ maven_version }}-bin.tar.gz 4 | maven_install_dir: /usr/local 5 | 6 | -------------------------------------------------------------------------------- /deploy/roles/kibana/templates/kibana.conf: -------------------------------------------------------------------------------- 1 | # kibana.conf 2 | start on runlevel [2345] 3 | stop on runlevel [016] 4 | 5 | limit nofile 65550 65550 6 | 7 | # Run Kibana, which is in /opt/kibana 8 | exec /opt/kibana/bin/kibana 9 | 10 | respawn 11 | respawn limit 10 5 12 | -------------------------------------------------------------------------------- /deploy/roles/python/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml - Install Python 2.7, either from a repo or a built RPM 2 | --- 3 | - name: Install Python27 from repo 4 | yum: 5 | name: python27 6 | state: latest 7 | disable_gpg_check: yes 8 | tags: 9 | - python2 10 | -------------------------------------------------------------------------------- /deploy/roles/python36-scl/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for python36-scl 3 | 4 | python3_pip_bin: /opt/rh/rh-python36/root/usr/bin/pip3.6 5 | 6 | # Should the role set SCL python v3.6 as the default system python for users 7 | set_python36_default_python: False 8 | -------------------------------------------------------------------------------- /test/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | force_color=1 3 | forks=50 4 | gathering=smart 5 | host_key_checking=False 6 | vault_password_file=.vault_password 7 | 8 | [ssh_connection] 9 | control_path=%(directory)s/%%h-%%r 10 | 11 | [privilege_escalation] 12 | become_ask_pass=False 13 | -------------------------------------------------------------------------------- /deploy/roles/kibana/templates/kibana.repo.j2: -------------------------------------------------------------------------------- 1 | [kibana-{{ kibana_version }}] 2 | name=Kibana repository for 4.5.x packages 3 | baseurl=https://packages.elastic.co/kibana/{{ kibana_version }}/centos 4 | gpgcheck=1 5 | gpgkey=https://packages.elastic.co/GPG-KEY-elasticsearch 6 | enabled=1 7 | -------------------------------------------------------------------------------- /deploy/roles/pentaho/templates/pentaho.desktop.j2: -------------------------------------------------------------------------------- 1 | [Desktop Entry] 2 | Name=Pentaho {{ pentaho_version }} 3 | Version={{ pentaho_version }} 4 | Type=Application 5 | Terminal=false 6 | Categories=Development; 7 | Icon=/opt/data-integration/spoon.ico 8 | Exec=/opt/data-integration/spoon.sh 9 | -------------------------------------------------------------------------------- /deploy/roles/puppet_disable/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Disable pe-puppet 2 | service: 3 | name: pe-puppet 4 | state: stopped 5 | enabled: no 6 | 7 | - name: Disable pe-mcollective 8 | service: 9 | name: pe-mcollective 10 | state: stopped 11 | enabled: no 12 | -------------------------------------------------------------------------------- /deploy/roles/intellij/templates/idea.desktop.j2: -------------------------------------------------------------------------------- 1 | [Desktop Entry] 2 | Name=IntelliJ IDEA 3 | Type=Application 4 | Exec=idea.sh 5 | Terminal=false 6 | Icon=idea 7 | Comment=Integrated Development Environment 8 | NoDisplay=false 9 | Categories=Development;IDE; 10 | Name[en]=IntelliJ IDEA 11 | -------------------------------------------------------------------------------- /deploy/deploy_r_terminal.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: analytics_terminal 3 | become: yes 4 | become_method: sudo 5 | roles: 6 | - r-core 7 | - r-libs 8 | - r-shiny 9 | - r-shiny-config 10 | - r-studio-server 11 | - r-studio-server-config 12 | - r-studio-desktop 13 | -------------------------------------------------------------------------------- /deploy/roles/common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # resolve-gids not yet supported 2 | #gluster_mount_options: "defaults,resolve-gids,acl,_netdev" 3 | gluster_mount_options: "defaults,acl,_netdev" 4 | shared_links: [] 5 | 6 | skip_glusterfs: false 7 | 8 | log_file_locations: 9 | - /var/log/messages 10 | -------------------------------------------------------------------------------- /deploy/roles/stata/templates/sysprofile.do.j2: -------------------------------------------------------------------------------- 1 | 2 | set odbcmgr {{ stata_odbc_mgr }} 3 | cap set processors {{ stata_processor }} 4 | sysdir set SITE {{ stata_site }} 5 | 6 | {% for printer in stata_printers %} 7 | printer define {{ printer }} 8 | {% endfor %} 9 | -------------------------------------------------------------------------------- /deploy/roles/neo4j/templates/neo4j.upstart.j2: -------------------------------------------------------------------------------- 1 | # Located in /etc/init/neo4j.conf 2 | # Neo4J upstart script 3 | start on runlevel [2345] 4 | stop on runlevel [016] 5 | 6 | console output 7 | 8 | respawn 9 | 10 | exec su -s /bin/sh -c 'exec "$0" "$@"' neo4j -- /opt/neo4j/neo4j-community-3.0.4/bin/neo4j 11 | -------------------------------------------------------------------------------- /deploy/roles/jdk/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jdk_packages: 3 | - java-1.8.0-openjdk 4 | - java-1.8.0-openjdk-devel 5 | - libselinux-python 6 | 7 | java_deploy_options: [] 8 | jre_lib_path: /usr/lib/jvm/java-1.8.0/jre/lib 9 | java_home_path: /usr/lib/jvm/java-1.8.0 10 | java_keystore_password: changeit 11 | -------------------------------------------------------------------------------- /deploy/roles/r-shiny-config/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | r_shiny_pro: False 3 | shiny_http_headers: "X-EXAMPLE" 4 | shiny_required_users: "admin" 5 | shiny_required_groups: "{{ restricted_group_name }}" 6 | shiny_auth_proxy: False 7 | shiny_auth_proxy_header: "" 8 | shiny_admin_port: 4151 9 | shiny_groups: "" 10 | -------------------------------------------------------------------------------- /deploy/roles/mesos_master/templates/mesos_credentials.j2: -------------------------------------------------------------------------------- 1 | { 2 | "credentials": [ 3 | {% for credential in mesos_credentials %} 4 | { 5 | "principal": "{{ credential.principal }}", 6 | "secret": "{{ credential.secret }}" 7 | }{% if not loop.last %},{% endif %} 8 | {% endfor %} 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /deploy/roles/odbc/templates/freetds.conf: -------------------------------------------------------------------------------- 1 | {% for entry in freetds_entries %} 2 | [{{entry.hostname}}] 3 | host = {{entry.fqdn}} 4 | port = {{entry.port}} 5 | tds Version = 7.1 6 | try domain login = yes 7 | try server login = no 8 | {% if entry.use_ntlmv2 %} 9 | use ntlmv2 10 | {% endif %} 11 | 12 | {% endfor %} 13 | -------------------------------------------------------------------------------- /deploy/roles/postgresql-server/templates/pg_ident.conf.j2: -------------------------------------------------------------------------------- 1 | # This is a mapping file to assign Kerberos principals to Postgres roles. 2 | # See: https://www.postgresql.org/docs/10/auth-username-maps.html 3 | {% for ident in pp_ident_mappings %} 4 | {{ ident.type }} {{ ident.match }} {{ ident.pg_role }} 5 | {% endfor %} 6 | -------------------------------------------------------------------------------- /deploy/config_elasticsearch.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: elasticsearch 3 | become: yes 4 | become_method: sudo 5 | roles: 6 | - elasticsearch-config 7 | 8 | - hosts: elk 9 | become: yes 10 | become_method: sudo 11 | roles: 12 | - elasticsearch-config 13 | - logstash-config 14 | - kibana-config 15 | -------------------------------------------------------------------------------- /deploy/roles/logstash/templates/logstash.repo: -------------------------------------------------------------------------------- 1 | [logstash-{{ logstash_version }}] 2 | name=logstash repository for {{ logstash_version }}.x packages 3 | baseurl=https://packages.elasticsearch.org/logstash/{{ logstash_version }}/centos 4 | gpgcheck=1 5 | gpgkey=https://packages.elasticsearch.org/GPG-KEY-elasticsearch 6 | enabled=1 7 | -------------------------------------------------------------------------------- /deploy/roles/logstash-config/templates/30-elasticsearch-output.conf.j2: -------------------------------------------------------------------------------- 1 | output { 2 | elasticsearch { 3 | hosts => "{{ ansible_ssh_host }}:9200" 4 | index => "logstash-%{+YYYY.MM.dd}" 5 | user => "{{ logstash_user }}" 6 | password => "{{ logstash_password }}" 7 | } 8 | stdout { codec => rubydebug } 9 | } 10 | -------------------------------------------------------------------------------- /deploy/roles/odbc/templates/odbc.ini: -------------------------------------------------------------------------------- 1 | [ODBC Data Sources] 2 | {{odbc_ms_sql_hostname}} = MS SQLServer 3 | 4 | {% for odbc_entry in odbc_entries %} 5 | [{{odbc_entry.hostname}}] 6 | {% for key, value in odbc_entry.items() %} 7 | {% if key != 'hostname' %} 8 | {{key}}={{value}} 9 | {% endif %} 10 | {% endfor %} 11 | 12 | {% endfor %} -------------------------------------------------------------------------------- /deploy/roles/pycharm/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | pycharm_download_basename: pycharm-community-4.5.5.tar.gz 3 | pycharm_download_url: "http://download.jetbrains.com/python/{{ pycharm_download_basename }}" 4 | pycharm_path: pycharm-community-4.5.5 5 | pycharm_install_directory: /usr/local/pycharm 6 | pycharm_local_bin_directory: /artifacts 7 | -------------------------------------------------------------------------------- /deploy/roles/python27-scl/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for python27-scl 3 | 4 | python_scl_pip_bin: /opt/rh/python27/root/usr/bin/pip2.7 5 | python_scl_pip_ld_path: /opt/rh/python27/root/usr/lib64 6 | 7 | # Should the role set SCL python v2.7 as the default system python for users 8 | set_python27_default_python: False 9 | -------------------------------------------------------------------------------- /deploy/roles/umask/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install libselinux-python for Ansible 2 | yum: 3 | name: libselinux-python 4 | state: present 5 | 6 | - name: Ensure umask is correct 7 | become: no 8 | lineinfile: 9 | dest: ~/.bashrc 10 | line: umask 0022 11 | regexp: ^(.*)umask(.*)$ 12 | state: present 13 | -------------------------------------------------------------------------------- /deploy/roles/gocd/meta/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - {role: python, when: "install_python and custom_repo"} 4 | - {role: python27-scl, when: "not custom_repo"} 5 | - role: postgresql-client 6 | - role: jdk 7 | - {role: epel, when: "use_epel and not 'production' in group_names"} 8 | - role: odbc 9 | - role: jdbc 10 | -------------------------------------------------------------------------------- /deploy/roles/spark/templates/spark-env.sh.j2: -------------------------------------------------------------------------------- 1 | export SPARK_HOME={{ spark_main_dir }} 2 | export PATH=$SPARK_HOME/bin:$PATH 3 | export MESOS_NATIVE_JAVA_LIBRARY={{ mesos_native_lib_path }} 4 | export SPARK_EXECUTOR_URI={{ spark_executor_uri }} 5 | export SPARK_USER={{ spark_execute_user }} 6 | export LIBPROCESS_PORT={{ spark_libprocess_port }} 7 | -------------------------------------------------------------------------------- /deploy/roles/math_lm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create new mathpass file and rename existing with timestamp 3 | template: 4 | src: mathpass 5 | dest: "{{mathlm_location}}" 6 | owner: root 7 | group: root 8 | mode: "u=rw,g=r,o=r" 9 | backup: yes 10 | when: update_math_lm_activation_key 11 | tags: math_lm 12 | -------------------------------------------------------------------------------- /deploy/roles/pentaho/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #pentaho_zip_path: . 3 | #pentaho_zip_file: pdi-ce-6.0.1.0-386.zip 4 | pentaho_url: http://osdn.net/frs/g_redir.php?m=kent&f=pentaho%2FData+Integration%2F6.0%2Fpdi-ce-6.0.1.0-386.zip 5 | pentaho_version: 6.0.1.0-386 6 | pentaho_heap_size: "512m" 7 | pentaho_perm_size: "256m" 8 | install_pentaho: true 9 | -------------------------------------------------------------------------------- /deploy/roles/elasticsearch/templates/elasticsearch.repo: -------------------------------------------------------------------------------- 1 | [elasticsearch-{{ es_version }}] 2 | name=Elasticsearch repository for {{ es_version }}.x packages 3 | baseurl=http://packages.elastic.co/elasticsearch/{{ es_version }}/centos 4 | gpgcheck=1 5 | gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch 6 | enabled=1 7 | autorefresh=1 8 | type=rpm-md 9 | -------------------------------------------------------------------------------- /deploy/roles/sublime/templates/sublime.desktop.j2: -------------------------------------------------------------------------------- 1 | [Desktop Entry] 2 | Name=Sublime Text 3 | GenericName=Text Editor 4 | Comment=Edit text 5 | Exec=subl 6 | Icon=sublime_text 7 | Type=Application 8 | Terminal=false 9 | Categories=Application;TextEditor;TextEditors 10 | Encoding=UTF-8 11 | StartupWMClass=SublimeText 12 | X-Desktop-File-Install-Version=0.15 13 | -------------------------------------------------------------------------------- /deploy/roles/gocd/templates/deployment.properties: -------------------------------------------------------------------------------- 1 | deployment.security.askgrantdialog.notinca.locked 2 | deployment.security.askgrantdialog.notinca=false 3 | deployment.security.validation.ocsp=true 4 | deployment.security.validation.ocsp.locked 5 | deployment.security.validation.crl=true 6 | deployment.security.validation.crl.locked 7 | deployment.system.config.mandatory=false -------------------------------------------------------------------------------- /deploy/roles/log-courier/templates/log-courier.repo: -------------------------------------------------------------------------------- 1 | [driskell-log-courier] 2 | name=Copr repo for log-courier owned by driskell 3 | baseurl=https://copr-be.cloud.fedoraproject.org/results/driskell/log-courier/epel-6-$basearch/ 4 | skip_if_unavailable=True 5 | gpgcheck=1 6 | gpgkey=https://copr-be.cloud.fedoraproject.org/results/driskell/log-courier/pubkey.gpg 7 | enabled=1 -------------------------------------------------------------------------------- /deploy/roles/nodejs/templates/nvm.sh.j2: -------------------------------------------------------------------------------- 1 | # 2 | # location: /etc/profile.d 3 | # purpose: install nvm to this place so all users can use it 4 | # 5 | # @see http://stackoverflow.com/a/19040346 6 | # 7 | 8 | export NVM_DIR={{ nvm_install_path }} 9 | 10 | LC_ALL=en_US.UTF-8 \ 11 | LANG=en_US.UTF-8 \ 12 | LANGUAGE=en_US.UTF-8 \ 13 | source {{ nvm_exe_path }} 14 | -------------------------------------------------------------------------------- /deploy/roles/postgresql-server/templates/pg_hba.conf.j2: -------------------------------------------------------------------------------- 1 | {{ template_masthead }} 2 | # TYPE DATABASE USER ADDRESS METHOD 3 | 4 | {% for setting in pg_hba_settings %} 5 | {{ setting.context }} {{ setting.db }} {{ setting.user }} {{ setting.address }} {{ setting.ip_mask }} {{ setting.auth_method }} {{ setting.auth_options }} 6 | {% endfor %} 7 | -------------------------------------------------------------------------------- /deploy/roles/talend/templates/talend.desktop.j2: -------------------------------------------------------------------------------- 1 | [Desktop Entry] 2 | Name=Talend Open Studio {{ talend_version }} 3 | Version={{ talend_version }} 4 | Type=Application 5 | Terminal=false 6 | Categories=Development; 7 | Icon=/opt/{{ talend_zip_name }}/{{ talend_icon_path }} 8 | Exec=/opt/{{ talend_zip_name }}/{{ talend_executable }} -vm {{ talend_java_path }} -data talend/workspace 9 | -------------------------------------------------------------------------------- /deploy/roles/gocd/templates/go-agent: -------------------------------------------------------------------------------- 1 | export GO_SERVER={{ go_server_address }} 2 | export GO_SERVER_PORT={{ go_server_port }} 3 | export GO_SERVER_URL={{ go_server_url }} 4 | export AGENT_WORK_DIR={{ gocd_agent_work_dir }} 5 | export AGENT_MEM={{ gocd_agent_mem }} 6 | export AGENT_MAX_MEM={{ gocd_agent_max_mem }} 7 | export JAVA_HOME={{ java_home_path }} 8 | DAEMON=Y 9 | VNC=N 10 | -------------------------------------------------------------------------------- /deploy/roles/r-studio-desktop/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install R Studio Desktop 3 | yum: 4 | name: http://download1.rstudio.org/rstudio-0.98.1103-x86_64.rpm 5 | state: present 6 | when: not custom_repo 7 | 8 | - name: Install R Studio Desktop 9 | yum: 10 | name: rstudio 11 | state: present 12 | disable_gpg_check: yes 13 | when: custom_repo 14 | -------------------------------------------------------------------------------- /deploy/deploy_neo4j_server.yml: -------------------------------------------------------------------------------- 1 | # deploy_neo4j_server 2 | --- 3 | - hosts: neo4j_server 4 | become: yes 5 | become_method: sudo 6 | roles: 7 | - {role: iptables, when: "iptables_config"} 8 | - {role: puppet_disable, when: "disable_puppet"} 9 | - common 10 | - {role: epel, when: "use_epel and not 'production' in group_names"} 11 | - jdk 12 | - neo4j 13 | 14 | -------------------------------------------------------------------------------- /deploy/roles/elasticsearch-config/templates/role_mapping.yml.j2: -------------------------------------------------------------------------------- 1 | {{ template_masthead }} 2 | 3 | # Role mapping configuration file which has elasticsearch roles as keys 4 | # that map to one or more user or group distinguished names 5 | 6 | {% for role in es_role_mappings %} 7 | {{ role.name }}: 8 | {% for mapping in role.mappings %} 9 | - "{{ mapping }}" 10 | {% endfor %} 11 | {% endfor %} -------------------------------------------------------------------------------- /deploy/roles/postgresql-dbs/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Account for automated DB management 3 | pp_dba_username: vagrant 4 | pp_dba_password: vagrant 5 | 6 | # Business DB's to create 7 | pp_dbs: 8 | - 9 | name: example_db 10 | template: master_template 11 | owner: vagrant 12 | extensions: 13 | - postgis 14 | - postgis_topology 15 | - address_standardizer 16 | -------------------------------------------------------------------------------- /test/files/mesos/simple_app_aci/manifest: -------------------------------------------------------------------------------- 1 | { 2 | "acKind": "ImageManifest", 3 | "acVersion": "0.7.4", 4 | "name": "simple_app", 5 | "labels": [ 6 | {"name": "os", "value": "linux"}, 7 | {"name": "arch", "value": "amd64"} 8 | ], 9 | "app": { 10 | "exec": [ 11 | "/bin/simple_app 2" 12 | ], 13 | "user": "vagrant", 14 | "group": "vagrant" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /deploy/roles/logstash-config/templates/30-elasticsearch-output.1.5.conf.j2: -------------------------------------------------------------------------------- 1 | output { 2 | elasticsearch { 3 | host => "{{ ansible_ssh_host }}:9200" 4 | cluster => "{{ es_cluster }}" 5 | index => "logstash-%{+YYYY.MM.dd}" 6 | protocol => "http" 7 | user => "{{ logstash_user }}" 8 | password => "{{ logstash_password }}" 9 | } 10 | stdout { codec => rubydebug } 11 | } 12 | -------------------------------------------------------------------------------- /test/files/mesos/simple_app.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "simple_app", 3 | "cpus": 0.5, 4 | "mem": 32.0, 5 | "disk": 10.0, 6 | "instances": 1, 7 | "cmd": "./simple_app", 8 | "container": { 9 | "type": "MESOS", 10 | "mesos": { 11 | "image": { 12 | "type": "APPC", 13 | "appc": { 14 | "name": "test/simple_app" 15 | } 16 | } 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /deploy/roles/r-studio-server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rstudio_server_package_name: http://download2.rstudio.org/rstudio-server-rhel-1.1.383-x86_64.rpm 3 | rstudio_server_package_state: present 4 | 5 | rstudio_pro_drivers_url: http://drivers.rstudio.org/7C152C12/odbc-install.sh 6 | rstudio_pro_drivers_folder: /tmp 7 | rstudio_pro_driver_install_name: odbc-install.sh 8 | rstudio_pro_driver_install_folder: /opt/rstudio-server/drivers -------------------------------------------------------------------------------- /.devcontainer/docker-compose-host-template.yml: -------------------------------------------------------------------------------- 1 | #please note: the spacing needs to line up w/ the other yaml templates 2 | HOSTNAME: 3 | image: centos7-sshd:latest 4 | restart: always 5 | privileged: true 6 | volumes: 7 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 8 | - ../.cache/yum:/var/cache/yum:cached 9 | networks: 10 | ansible_nw: 11 | ipv4_address: IPV4 12 | 13 | -------------------------------------------------------------------------------- /deploy/deploy_elastic_search.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: elasticsearch 3 | become: yes 4 | become_method: sudo 5 | roles: 6 | - common 7 | - {role: puppet_disable, when: "disable_puppet"} 8 | - {role: iptables, when: "iptables_config"} 9 | - {role: epel, when: "use_epel and not 'production' in group_names"} 10 | - elasticsearch 11 | - {role: log-courier, when: "install_logstash"} 12 | - elasticsearch-config 13 | -------------------------------------------------------------------------------- /deploy/roles/eod/tasks/upgrade_eod.yaml: -------------------------------------------------------------------------------- 1 | - name: Extract EOD upgrader to install directory 2 | unarchive: 3 | src: "{{ eod_installer_directory }}/{{ eod_upgrader }}" 4 | dest: "{{ eod_install_directory }}/" 5 | owner: root 6 | group: root 7 | tags: 8 | - eod 9 | 10 | - name: Run the patch 11 | shell: "{{ eod_install_directory }}/bin/patch -s" 12 | tags: 13 | - eod 14 | notify: 15 | - restart eod 16 | -------------------------------------------------------------------------------- /deploy/group_vars/citus_coordinator/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # In versions 9.6 and later, archive and hot_standby are still accepted, BUT ARE MAPPED TO replica. 3 | pp_wal_level: hot_standby 4 | pp_max_wal_senders: 5 5 | pp_wal_keep_segments: 32 6 | 7 | pp_archive_mode: on 8 | # for now, assume the archiving script is in the PGDATA directory: 9 | pp_archive_command: '{{ pp_datadir }}/archive-wal-file %p %f' 10 | 11 | pp_max_replication_slots: 5 12 | -------------------------------------------------------------------------------- /deploy/roles/citus_coordinator_standby/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # copied this from aurora/deploy/roles/shared/handlers/rhel7.yml 3 | - name: restart postgres 4 | become: True 5 | systemd: 6 | name: "{{ pp_servicename }}" 7 | state: restarted 8 | daemon_reload: yes 9 | #- name: restart postgres 10 | # become: yes 11 | # become_method: sudo 12 | # service: 13 | # name: "{{ pp_servicename }}" 14 | # state: restarted 15 | -------------------------------------------------------------------------------- /deploy/roles/citus/defaults/main.yml: -------------------------------------------------------------------------------- 1 | pp_serverport: 5432 2 | pp_servicename: postgresql-10 3 | 4 | rpm_url: https://packagecloud.io/install/repositories/citusdata/community/script.rpm.sh 5 | 6 | citus_pkgs: 7 | - citus72_10 8 | 9 | pp_shared_preload_libraries: citus 10 | citus_memory_limit_service: MemoryMax 11 | citus_nofile_limit: "infinity" 12 | 13 | # Replication user 14 | replication_user: replication 15 | replication_password: replication 16 | -------------------------------------------------------------------------------- /deploy/roles/mesos_master/tasks/post_deploy_mesos_master.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Give Marathon time to start its restart process 4 | wait_for: 5 | timeout: 30 6 | delegate_to: localhost 7 | become: false 8 | 9 | - name: Delete Marathon ZK Migration in progress key if present and we didn't ask for an upgrade 10 | znode: 11 | hosts: "{{ ansible_ssh_host }}:2181" 12 | name: /marathon/state/migration-in-progress 13 | state: absent 14 | -------------------------------------------------------------------------------- /deploy/roles/emacs/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install emacs 3 | yum: 4 | name: "{{ item }}" 5 | state: present 6 | disable_gpg_check: yes 7 | with_items: 8 | - emacs 9 | - libotf 10 | 11 | - name: Install Emacs Speaks Statisitcs 12 | yum: 13 | name: "{{ item }}" 14 | state: present 15 | disable_gpg_check: yes 16 | with_items: 17 | - emacs-common-ess.noarch 18 | - emacs-ess.noarch 19 | when: custom_repo 20 | -------------------------------------------------------------------------------- /deploy/roles/scala/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | scala_version: 2.11.8 3 | scala_url: "http://downloads.lightbend.com/scala/{{ scala_version }}/scala-{{ scala_version }}.tgz" 4 | scala_sbt_version: 0.13.11 5 | scala_sbt_url: "https://dl.bintray.com/sbt/native-packages/sbt/{{ scala_sbt_version }}/sbt-{{ scala_sbt_version }}.tgz" 6 | scala_dir: "/usr/local/scala" 7 | sbt_dir: "/usr/local/sbt" 8 | -------------------------------------------------------------------------------- /deploy/roles/talend/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | talend_zip_path: . 3 | talend_zip_name: TOS_DI-20151029_1337-V6.1.0 4 | talend_icon_path: plugins/org.talend.rcp_6.1.0.20151029_1337/icons/designer.ico 5 | talend_executable: TOS_DI-linux-gtk-x86_64 6 | talend_version: 6.1 7 | talend_heap_min: 256m 8 | talend_heap_max: 768m 9 | talend_max_perm_size: 256m 10 | talend_java_version: java-1.7.0-openjdk 11 | talend_java_path: /usr/lib/jvm/jre-1.7.0/bin 12 | install_talend: true 13 | -------------------------------------------------------------------------------- /deploy/roles/eod/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # TODO: fill this in 3 | - name: ensure video driver 4 | shell: echo foo 5 | 6 | - name: restart eod 7 | command: "{{ eod_install_directory }}/bin/otecs restart" 8 | when: install_eod and eod_restart 9 | 10 | - name: restart gdm 11 | shell: killall gdm-binary 12 | ignore_errors: yes 13 | when: eod_restart 14 | 15 | - name: restart prefdm 16 | shell: restart prefdm 17 | ignore_errors: yes 18 | when: eod_restart 19 | -------------------------------------------------------------------------------- /deploy/roles/intellij/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | intellij_version: "2016.1.4" 3 | intellij_url: "http://download.jetbrains.com/idea/ideaIC-{{ intellij_version }}.tar.gz" 4 | intellij_install_dir: "/opt/idea" 5 | intellij_main_dir: "/usr/local" 6 | intellij_desktop_dir: "/usr/share/applications" 7 | intellij_desktop_file: "/usr/share/applications/idea.desktop" 8 | intellij_idea_dir: "idea-IC-145.2070.6" 9 | 10 | # https://download.jetbrains.com/idea/ideaIU-2016.1.4.tar.gz 11 | -------------------------------------------------------------------------------- /deploy/roles/stattransfer/templates/license1282.txt: -------------------------------------------------------------------------------- 1 | [Registration] 2 | PRODUCT=Stat/Transfer 3 | NAME={{stattransfer_name}} 4 | ORGANIZATION={{stattransfer_organization}} 5 | LICENSE-TYPE={{stattransfer_license_type}} 6 | EXPIRATION-DATE={{stattransfer_expiration_date}} 7 | REG-CODE={{stattransfer_reg_code}} 8 | AUTH-CODE={{stattransfer_auth_code}} 9 | SET-CODE={{stattransfer_set_code}} 10 | SIGN-CODE={{stattransfer_sign_code}} 11 | ENCODING=UTF-8 12 | GEN_DATE={{stattransfer_gen_date}} -------------------------------------------------------------------------------- /deploy/roles/ultra-edit/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Uninstall Ultra Edit 2 | yum: 3 | name: "{{ item }}" #UltraEdit.x86_64 4 | state: removed 5 | disable_gpg_check: yes 6 | with_items: "{{ ue_uninstall_list }}" 7 | when: custom_repo 8 | tags: 9 | - UltraEdit 10 | 11 | - name: Install Ultra Edit 12 | yum: 13 | name: "{{ ue_install_version }}" #UltraEdit.x86_64 14 | state: present 15 | disable_gpg_check: yes 16 | when: custom_repo 17 | tags: 18 | - UltraEdit -------------------------------------------------------------------------------- /deploy/group_vars/citus_coordinator_standby/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # In versions 9.6 and later, archive and hot_standby are still accepted, BUT ARE MAPPED TO replica. 3 | pp_wal_level: hot_standby 4 | 5 | # the standby is hot (i.e. can service read-only queries), so override default: 6 | pp_hot_standby: on 7 | 8 | # the standby does not archive: 9 | pp_archive_mode: off 10 | pp_archive_command: '' 11 | 12 | # force a logfile segment switch after this number of seconds; 0 disables: 13 | pp_archive_timeout: 0 -------------------------------------------------------------------------------- /deploy/roles/kibana-config/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kibana_log_file_locations: /var/log/kibana 3 | kibana_config_path: /opt/kibana/config 4 | kibana_config_file: kibana.yml.j2 5 | kibana_user: kibana4-user 6 | kibana_password: password 7 | kibana_plugins: 8 | - kibana/shield/2.3.4 9 | - elasticsearch/marvel/2.3.4 10 | kibana_encryption_key: "secret_key" 11 | kibana_generate_certs: True 12 | kibana_ssl_key_file: /opt/kibana/ssl/kibana.key 13 | kibana_ssl_cert_file: /opt/kibana/ssl/kibana.crt 14 | -------------------------------------------------------------------------------- /test/test_mesos.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: admin_terminal 3 | tasks: 4 | - name: Copy needed files 5 | copy: 6 | src: "{{ item }}" 7 | dest: /tmp/ 8 | with_items: 9 | - "files/mesos/simple_app.c" 10 | - "files/mesos/test.sh" 11 | 12 | - name: Ensure gcc is installed 13 | yum: 14 | name: gcc 15 | state: present 16 | 17 | - name: Build simple_app and deploy to mesos 18 | shell: "sh test.sh" 19 | args: 20 | chdir: /tmp 21 | -------------------------------------------------------------------------------- /deploy/roles/evince/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install rpms 3 | yum: 4 | name: "{{ item }}" 5 | state: present 6 | disable_gpg_check: yes 7 | with_items: 8 | - libgcc 9 | - glibc 10 | - atk 11 | - gdk-pixbuf2 12 | - fontconfig 13 | - mesa-libGL 14 | - mesa-libGLU 15 | - pango 16 | - libxml2 17 | - libidn 18 | - libXt 19 | - gtk2 20 | # - AdobeReader_enu 21 | - libcanberra-gtk2 22 | - PackageKit-gtk-module 23 | - gtk2-engines 24 | - evince -------------------------------------------------------------------------------- /deploy/roles/mesos_master/handlers/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml 2 | --- 3 | - name: restart zookeeper 4 | service: 5 | name: zookeeper-server 6 | state: restarted 7 | 8 | - name: restart mesos-master 9 | command: restart mesos-master 10 | 11 | - name: restart marathon agent 12 | command: restart marathon 13 | listen: restart marathon 14 | 15 | - name: Give Marathon time to start its restart process 16 | include_tasks: post_deploy_mesos_master.yml 17 | listen: restart marathon 18 | when: not marathon_upgrade 19 | -------------------------------------------------------------------------------- /deploy/roles/r-studio-server-config/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rstudio_domain_name: "http://rstudio.test.dev" 3 | rstudio_port: 8787 4 | rstudio_address: 0.0.0.0 5 | rstudio_pro: False 6 | rstudio_session_timeout: 15 7 | 8 | # Pro settings 9 | rstudio_enable_auth_proxy: 0 10 | rstudio_admin_group: admins 11 | rstudio_manager_group: managers 12 | ip_whitelist: 13 | - "{{ hostvars[groups['proxy_server'][0]]['ansible_ssh_host'] }}" 14 | update_rstudio: False 15 | install_pro_drivers: False 16 | rstudio_secret_header: X-RStudio-Username -------------------------------------------------------------------------------- /deploy/roles/mesos_agent/templates/default_container_dns.j2: -------------------------------------------------------------------------------- 1 | { 2 | "mesos": [ 3 | {% for network in mesos_cni_networks %} 4 | { 5 | "network_mode": "CNI", 6 | "network_name": "{{ network.network_name }}", 7 | "dns": { 8 | "nameservers": [ 9 | {% for nameserver in network.nameservers %} 10 | "{{ nameserver }}"{% if not loop.last %},{% endif %} 11 | {% endfor %} 12 | ] 13 | } 14 | }{% if not loop.last %},{% endif %} 15 | {% endfor %} 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /deploy/roles/openldap/templates/ldap.conf.j2: -------------------------------------------------------------------------------- 1 | # ldap.conf.j2 2 | 3 | # 4 | # LDAP Defaults 5 | # 6 | 7 | # See ldap.conf(5) for details 8 | # This file should be world readable but not world writable. 9 | 10 | BASE dc={{ kdc_domain_name.split('.')[-2] }},dc={{ kdc_domain_name.split('.')[-1] }} 11 | URI ldap://localhost 12 | 13 | #SIZELIMIT 12 14 | #TIMELIMIT 15 15 | #DEREF never 16 | 17 | TLS_REQCRT allow 18 | 19 | TLS_CACERTDIR /etc/openldap/cacerts 20 | TLS_CACERT /etc/openldap/certs/cert.crt 21 | -------------------------------------------------------------------------------- /deploy/roles/sas/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | install_sas: false 3 | sas_version: 9.4 4 | sas_install_home: /opt/sas/SASInstall 5 | sas_response_file_path: /opt/sas/SASInstall/deploy_plan.txt 6 | sas_sid_file: /opt/sas/SASInstall/sid_files/SAS94_9BQWQL_70104781_LINUX_X86-64.txt 7 | sas_home: /opt/sas/SASHome 8 | sas_config_home: /opt/sas/studioconfig 9 | java_heap_size: '-Xmx4096M -Xms1024M' 10 | 11 | sas_log_file_locations: 12 | - /opt/sas/studioconfig/appserver/studio 13 | - /opt/sas/studioconfig/spawner/log 14 | sas_work_dir: /sastmp 15 | -------------------------------------------------------------------------------- /deploy/roles/ntp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml - Install and configure NTP 2 | # Based on: https://github.com/geerlingguy/ansible-role-ntp 3 | --- 4 | - name: Set correct timezone 5 | file: 6 | src: "/usr/share/zoneinfo/{{ ntp_timezone }}" 7 | dest: /etc/localtime 8 | state: link 9 | force: yes 10 | 11 | - name: Install NTP 12 | yum: 13 | name: ntp 14 | state: present 15 | 16 | - name: Ensure NTP is running and enabled at system start 17 | service: 18 | name: ntpd 19 | state: started 20 | enabled: yes 21 | -------------------------------------------------------------------------------- /deploy/roles/python-libs/defaults/main.yml: -------------------------------------------------------------------------------- 1 | python_dep_packages: 2 | - libxml2 3 | 4 | python_packages: 5 | - requests 6 | - cookiecutter 7 | 8 | python3_packages: 9 | - requests 10 | - cookiecutter 11 | 12 | #python_pip_bin: /opt/rh/python27/root/usr/bin/pip2.7 13 | #python3_pip_bin: /opt/rh/rh-python36/root/usr/bin/pip3.6 14 | 15 | python_package_mode: present 16 | update_pycurl: False 17 | 18 | # Used to store the "python*_pip_bin"'s for 19 | # speced tags to avoid errors 20 | python_pip_binaries: [] 21 | python_pip_ld_path: [] 22 | -------------------------------------------------------------------------------- /deploy/roles/logstash-config/templates/10-syslog.conf: -------------------------------------------------------------------------------- 1 | filter { 2 | if [type] == "syslog" { 3 | grok { 4 | match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" } 5 | add_field => [ "received_at", "%{@timestamp}" ] 6 | add_field => [ "received_from", "%{host}" ] 7 | } 8 | syslog_pri { } 9 | date { 10 | match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ] 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /deploy/roles/psql-authnz/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install git 2 | yum: 3 | name: git 4 | state: present 5 | 6 | - name: Install dependencies 7 | yum: 8 | name: [ 9 | 'python-devel', 10 | 'libevent-devel', 11 | 'gcc', 12 | 'gcc-c++', 13 | 'kernel-devel', 14 | 'libxslt-devel', 15 | 'libffi-devel', 16 | 'openssl-devel', 17 | 'openldap-devel' 18 | ] 19 | state: present 20 | 21 | ##ToDO: Add https://github.com/cfpb/psql-authnz deployment steps and run on citus hosts and postgres hosts 22 | -------------------------------------------------------------------------------- /deploy/roles/gocd_server/templates/go-server.j2: -------------------------------------------------------------------------------- 1 | export GO_SERVER_PORT={{ gocd_server_port }} 2 | export GO_SERVER_SSL_PORT={{ gocd_server_ssl_port }} 3 | export GO_SERVER_SYSTEM_PROPERTIES="$GO_SERVER_SYSTEM_PROPERTIES -Dh2.trace.level=3" 4 | export SERVER_WORK_DIR={{ gocd_server_work_dir }} 5 | export SERVER_MEM={{ gocd_server_mem }} 6 | export SERVER_MAX_MEM={{ gocd_server_max_mem }} 7 | export SERVER_MIN_PERM_GEN={{ gocd_server_min_perm_gen }} 8 | export SERVER_MAX_PERM_GEN={{ gocd_server_max_perm_gen }} 9 | export JAVA_HOME={{ java_home_path }} 10 | DAEMON=Y 11 | -------------------------------------------------------------------------------- /deploy/roles/postgresql-server/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart postgres 3 | become: yes 4 | become_method: sudo 5 | service: 6 | name: "{{ pp_servicename }}" 7 | state: restarted 8 | when: pp_allow_restart or pp_install 9 | 10 | - name: reload-daemon 11 | service: 12 | name: "{{ pp_servicename }}" 13 | daemon_reload: yes 14 | 15 | - name: reload postgres 16 | become: yes 17 | become_method: sudo 18 | service: 19 | name: "{{ pp_servicename }}" 20 | state: reloaded 21 | when: not pp_allow_restart and not pp_install 22 | -------------------------------------------------------------------------------- /deploy/roles/eod/templates/install_eod_server.exp: -------------------------------------------------------------------------------- 1 | #! /usr/bin/expect 2 | 3 | # run the actual installer bin 4 | # TODO: figure out where to put this 5 | spawn /tmp/{{ eod_installer }} 6 | 7 | # set a 2 minute timeout 8 | set timeout 120 9 | 10 | # set up your expects 11 | expect_background { 12 | "Enter directory to install Exceed Connection Server to" { 13 | send "{{ eod_install_directory }}\r" 14 | exp_continue 15 | } 16 | "Please press to view License Agreement" { 17 | exit 0 18 | } 19 | } 20 | # interact return 21 | expect eod 22 | -------------------------------------------------------------------------------- /deploy/roles/monitor-research/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set variable for dated text file 3 | set_fact: 4 | dated_file_name: "{{ research_monitor_output_file_locaiton }}/{{ date_stamp }}.txt" 5 | tags: nightly 6 | 7 | - name: create empty logfile 8 | file: 9 | dest: "{{ dated_file_name }}" 10 | state: touch 11 | tags: nightly 12 | 13 | - name: folder sizes for nightly monitor script for research environment 14 | shell: "du -h --max-depth=1 {{ item }} >> {{ dated_file_name }}" 15 | with_items: "{{ research_nightly_monitor_locations }}" 16 | tags: nightly 17 | -------------------------------------------------------------------------------- /test/files/mesos/simple_app.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | int main(int argc, char *argv[]) { 6 | if (argc == 2) { 7 | int seconds = atoi(argv[1]); 8 | if (seconds != 0) { 9 | while (1) { 10 | printf("Hello world!\n"); 11 | sleep(seconds); 12 | } 13 | } else { 14 | printf("Seconds must be an integer greater than zero.\n"); 15 | return 1; 16 | } 17 | } else { 18 | printf("Must supply the number of seconds to sleep, and nothing else.\n"); 19 | return 1; 20 | } 21 | return 0; 22 | } 23 | -------------------------------------------------------------------------------- /deploy/roles/citus/templates/pgpass.j2: -------------------------------------------------------------------------------- 1 | # hostname:port:database:username:password 2 | {% for worker in groups['citus'] %} 3 | {{ hostvars[worker]['ansible_ssh_host'] }}:{{ pp_serverport }}:*:{{ pp_superaccount }}:{{ pp_superpassword }} 4 | {% endfor %} 5 | localhost:{{ pp_serverport }}:*:{{ pp_superaccount }}:{{ pp_superpassword }} 6 | {% for coordinator in groups['citus_coordinator'] %} 7 | {{ hostvars[coordinator]['ansible_ssh_host'] }}:5432:replication:{{ replication_user }}:{{ replication_password }} 8 | {% endfor %} 9 | localhost:{{ pp_serverport }}:*:{{ pp_superaccount }}:{{ pp_superpassword }} 10 | -------------------------------------------------------------------------------- /deploy/roles/eod/templates/node.cfg.j2: -------------------------------------------------------------------------------- 1 | [COMMON] 2 | EoDHomeDir={{ eod_install_directory }} 3 | EoDSSLAvailable=1 4 | EoDDataStoreDir={{ eod_datastore_directory }} 5 | EoDDataStorePassword={{ eod_datastore_password }} 6 | 7 | [SERVER] 8 | EoDSSHCommand=/usr/bin/ssh 9 | ECSSSLLibPath={{ eod_install_directory }}/bin/sys/libssl.so.1.0.0 10 | ECSCryptoLibPath={{ eod_install_directory }}/bin/sys/libcrypto.so.1.0.0 11 | EoDRootCAKey= 12 | EoDLdapLibrary= 13 | EoDGSSLibrary= 14 | EoDKRB5Library= 15 | EoDFirstDisplay=1 16 | EoDStartupBannerOn=0 17 | ECSNodeEnabled=1 18 | 19 | [CLUSTER_MANAGER] 20 | EoDCMPort=5500 21 | -------------------------------------------------------------------------------- /deploy/roles/mesos_master/templates/zoo.cfg: -------------------------------------------------------------------------------- 1 | maxClientCnxns=50 2 | # The number of milliseconds each click 3 | tickTime=2000 4 | # The number of ticks that the initialization phase can take 5 | initLimit=10 6 | # The number of ticks that can pass between sending a request and getting an acknowledgement 7 | syncLimit=5 8 | # the directory where the snapshot is stored 9 | dataDir=/var/lib/zookeeper 10 | # the port at which the clients will connect 11 | clientPort=2181 12 | {% for server in groups['mesos_master'] %} 13 | server.{{ loop.index }}={{ hostvars[server]['ansible_ssh_host'] }}:2888:3888 14 | {% endfor %} -------------------------------------------------------------------------------- /deploy/roles/log-courier/templates/log-courier.conf.j2: -------------------------------------------------------------------------------- 1 | { 2 | "general": { 3 | "log file": "/var/log/log-courier/log-courier.log" 4 | }, 5 | "network": { 6 | "servers": [ 7 | {% for host in groups['elk'] %} 8 | "{{ hostvars[host]['ansible_ssh_host'] }}:5043"{% if not loop.last %},{% endif %} 9 | {% endfor %} 10 | ], 11 | "transport": "tcp" 12 | }, 13 | "files": [ 14 | { 15 | "paths": [ 16 | "/var/log/messages", 17 | "/var/log/secure", 18 | "/var/log/*.log" 19 | ], 20 | "fields": { "type": "syslog" } 21 | } 22 | ] 23 | } 24 | -------------------------------------------------------------------------------- /deploy/roles/pem-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get the PEM Client installer 3 | copy: 4 | src: "{{ artifacts_dir }}/{{ pem_installer_bin }}" 5 | dest: /tmp/ 6 | mode: a+x 7 | when: install_pem 8 | 9 | - name: Ensure umask is correct 10 | become: no 11 | lineinfile: 12 | dest: ~/.bashrc 13 | line: umask 0022 14 | regexp: ^(.*)umask(.*)$ 15 | state: present 16 | 17 | - name: Install PEM Client 18 | shell: "/tmp/{{ pem_installer_bin }} --prefix {{ pem_install_dir }} --mode unattended" 19 | args: 20 | creates: "{{ pem_install_dir }}/client-v5" 21 | when: install_pem 22 | -------------------------------------------------------------------------------- /deploy/roles/iptables/tasks/rhel_7.yml: -------------------------------------------------------------------------------- 1 | # rhel_7.yml - Sets up iptables for rhel7 2 | 3 | --- 4 | - name: Install iptables-services 5 | yum: 6 | name: iptables-services 7 | state: present 8 | 9 | - name: Make sure firewalld is masked 10 | systemd: 11 | name: firewalld 12 | masked: yes 13 | 14 | - name: Enable iptables services 15 | systemd: 16 | name: "{{ item }}" 17 | state: started 18 | enabled: yes 19 | with_items: 20 | - iptables 21 | - ip6tables 22 | ignore_errors: yes 23 | 24 | - name: DEBUG -- show error log 25 | shell: systemctl status ip6tables.service; journalctl -xe 26 | -------------------------------------------------------------------------------- /deploy/roles/kerberos_server/templates/kdc.conf: -------------------------------------------------------------------------------- 1 | {{ template_masthead }} 2 | 3 | [kdcdefaults] 4 | kdc_ports = 88 5 | 6 | [realms] 7 | {% for realm in realms %} 8 | {{ realm.name }} = { 9 | max_life = 12h 0m 0s 10 | max_renewable_life = 7d 0h 0m 0s 11 | master_key_type = des3-hmac-sha1 12 | supported_enctypes = des3-hmac-sha1:normal des-cbc-crc:normal des-cbc-crc:v4 13 | } 14 | {% endfor %} 15 | 16 | [logging] 17 | default = FILE:/var/kerberos/krb5kdc/kdc.log 18 | kdc = FILE:/var/kerberos/krb5kdc/kdc.log 19 | admin_server = FILE:/var/kerberos/krb5kdc/kadmin.log 20 | -------------------------------------------------------------------------------- /deploy/roles/logstash-config/templates/02-tcp-input.conf: -------------------------------------------------------------------------------- 1 | input { 2 | tcp { 3 | port => 5043 4 | codec => multiline { 5 | pattern => "^[\s|java\.|com\.|Caused by:]" 6 | what => "previous" 7 | } 8 | } 9 | tcp { 10 | port => 5044 11 | codec => json 12 | } 13 | {% if "elasticsearch" in group_names or "elk" in group_names %} 14 | file { 15 | path => "/var/log/elasticsearch/*.log" 16 | type => "elasticsearch" 17 | start_position => "beginning" 18 | codec => multiline { 19 | pattern => "^\[" 20 | negate => true 21 | what => "previous" 22 | } 23 | } 24 | {% endif %} 25 | } 26 | -------------------------------------------------------------------------------- /.devcontainer/Makefile: -------------------------------------------------------------------------------- 1 | this_container=$(shell docker ps --filter "name=${CONTAINERNAME}" -aq) 2 | devcontainers=$(shell docker ps --filter "name=devcontainer" -aq | grep -v ${this_container}) 3 | 4 | buildbase: 5 | cd ${PROJECT_ROOT}/.devcontainer && docker build -f Dockerfile-base --rm --no-cache -t centos7-sshd . 6 | deploy: 7 | ansible-playbook ${PROJECT_ROOT}/deploy/deploy_citus_server.yml -i ${PROJECT_ROOT}/deploy/vagrant_hosts \ 8 | --ask-pass --user=super --ask-sudo-pass 9 | 10 | container: 11 | @echo ${this_container} 12 | rebuild: 13 | docker rm -f $(devcontainers) $(this_container) 14 | removeall: 15 | docker rm -f $(devcontainers) 16 | -------------------------------------------------------------------------------- /deploy/roles/r-studio-server-config/templates/rserver.conf.j2: -------------------------------------------------------------------------------- 1 | {{ template_masthead }} 2 | 3 | # R Studio Server Configuration File 4 | www-port={{ rstudio_port }} 5 | www-address={{ rstudio_address }} 6 | 7 | {% if rstudio_pro %} 8 | auth-proxy={{ rstudio_enable_auth_proxy }} 9 | auth-proxy-sign-in-url={{ rstudio_domain_name }} 10 | monitor-data-path=/opt/rstudio-server/monitoring-data 11 | server-health-check-enabled=1 12 | admin-enabled=1 13 | admin-group={{ rstudio_manager_group }} 14 | admin-superuser-group={{ rstudio_admin_group }} 15 | 16 | auth-stay-signed-in=0 17 | # do not enable google accounts 18 | auth-google-accounts=0 19 | {% endif %} 20 | -------------------------------------------------------------------------------- /deploy/roles/postgresql-server-config/tasks/create_functions.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - set_fact: 3 | database: "{{ item }}" 4 | 5 | - debug: 6 | var: database 7 | 8 | - name: "Load custom functions for defining privileged Operational DBA actions in {{ database }}" 9 | postgresql_exec: 10 | login_host: localhost 11 | login_user: "{{ pp_superaccount }}" 12 | login_password: "{{ pp_superpassword }}" 13 | port: "{{ pp_serverport }}" 14 | db: "{{ database }}" 15 | script: "{{ dba_script.script }}" 16 | with_items: "{{ pp_dba_scripts }}" 17 | loop_control: 18 | loop_var: dba_script 19 | check_mode: no 20 | changed_when: False 21 | -------------------------------------------------------------------------------- /deploy/roles/tesseract/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | # URL's 2 | autoconf_url: http://ftp.gnu.org/gnu/autoconf/autoconf-2.69.tar.gz 3 | autoconf_archive_url: http://gnu.mirror.constant.com/autoconf-archive/autoconf-archive-2019.01.06.tar.xz 4 | leptonica_url: http://leptonica.org/source/leptonica-1.77.0.tar.gz 5 | tesseract_url: https://github.com/tesseract-ocr/tesseract/archive/4.0.0.tar.gz 6 | tesseract_eng_lang: https://github.com/tesseract-ocr/tessdata/raw/master/eng.traineddata 7 | 8 | # Directories 9 | tessdata_dir: /usr/local/share/tessdata 10 | 11 | # Compile options 12 | CC: /opt/rh/devtoolset-7/root/usr/bin/gcc 13 | CXX: /opt/rh/devtoolset-7/root/usr/bin/c++ -------------------------------------------------------------------------------- /deploy/roles/stattransfer/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | stattransfer_install_directory: /usr/local/StatTransfer14 3 | stattransfer_local_bin_directory: /artifacts 4 | stattransfer_license_type: "STAT-TRANSFER-LICENSE-TYPE" 5 | stattransfer_expiration_date: "STAT-TRANSFER-EXP-DATE" 6 | stattransfer_reg_code: STAT-TRANSFER-REG 7 | stattransfer_auth_code: STAT-TRANSFER-AUTH 8 | stattransfer_sign_code: STAT-TRANSFER-SIGN 9 | stattransfer_major_version: "14" 10 | stattransfer_name: "STAT-TRANSFER-NAME" 11 | stattransfer_organization: "STAT-TRANSFER-ORGANIZATION" 12 | stattransfer_set_code: "STAT-TRANSFER-SET-CODE" 13 | stattransfer_gen_date: "STAT-TRANSFER-GEN-DATE" 14 | -------------------------------------------------------------------------------- /deploy/upgrade_gocd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: ci_server 3 | become: yes 4 | become_method: sudo 5 | pre_tasks: 6 | - name: Download GoCD Server RPM 7 | shell: "curl -o {{ gocd_server_package_path }}/{{ gocd_server_package_name }} {{ gocd_server_package_url }}" 8 | args: 9 | creates: "{{ gocd_server_package_path }}/{{ gocd_server_package_name }}" 10 | - name: Verify RPM checksum 11 | stat: 12 | path: "{{ gocd_server_package_path }}/{{ gocd_server_package_name }}" 13 | get_md5: False 14 | register: gocd_rpm 15 | - name: Upgrade GoCD 16 | command: "rpm -U {{ gocd_server_package_path }}/{{ gocd_server_package_name }}" 17 | -------------------------------------------------------------------------------- /deploy/roles/revolution-r/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | revolution_r_package: /tmp/RRO-3.2.0/RRO-3.2.0-el6.x86_64.rpm 3 | revolution_r_path: /usr/lib64/RRO-3.2.0/R-3.2.0 4 | install_mkl: false 5 | 6 | revolution_r_env_vars: 7 | R_ARCH: "" 8 | 9 | revolution_r_dependencies: 10 | - libcurl-devel 11 | - libxml2-devel 12 | - hunspell-devel 13 | 14 | revolution_r_packages: 15 | - "devtools" 16 | 17 | revolution_r_packages_url: 18 | - 19 | package: devtools 20 | url: "https://cran.r-project.org/src/contrib/Archive/devtools/devtools_1.11.1.tar.gz" 21 | 22 | revolution_r_packages_github: [] 23 | 24 | revolution_r_commands: 25 | - 'print("Hello world!")' 26 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:6 2 | 3 | # Install Ansible 4 | RUN yum -y install epel-release 5 | RUN yum -y install git ansible sudo wget openssh-server 6 | RUN yum -y install acl 7 | RUN yum clean all 8 | 9 | # Disable requiretty 10 | RUN sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers 11 | 12 | VOLUME [ "/sys/fs/cgroup" ] 13 | CMD ["/usr/sbin/init"] 14 | 15 | # Add new user called vagrant 16 | RUN useradd -ms /bin/bash vagrant 17 | 18 | #Create test cert for docker 19 | RUN openssl req -x509 -nodes -days 365 -newkey rsa:2048 -subj "/C=US/ST=CFPB/L=Washington/O=Dis/CN=www.cf.gov" -keyout /etc/pki/tls/private/localhost.key -out /etc/pki/tls/certs/localhost.crt 20 | -------------------------------------------------------------------------------- /Dockerfile7: -------------------------------------------------------------------------------- 1 | FROM centos:7 2 | 3 | # Install Ansible 4 | RUN yum -y install epel-release 5 | RUN yum -y install git ansible sudo wget openssh-server 6 | RUN yum -y install acl 7 | RUN yum clean all 8 | 9 | # Disable requiretty 10 | RUN sed -i -e 's/^\(Defaults\s*requiretty\)/#--- \1/' /etc/sudoers 11 | 12 | VOLUME [ "/sys/fs/cgroup" ] 13 | CMD ["/usr/sbin/init"] 14 | 15 | # Add new user called vagrant 16 | RUN useradd -ms /bin/bash vagrant 17 | 18 | #Create test cert for docker 19 | RUN openssl req -x509 -nodes -days 365 -newkey rsa:2048 -subj "/C=US/ST=CFPB/L=Washington/O=Dis/CN=www.cf.gov" -keyout /etc/pki/tls/private/localhost.key -out /etc/pki/tls/certs/localhost.crt 20 | -------------------------------------------------------------------------------- /deploy/roles/r-shiny-config/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create app directory 3 | file: 4 | dest: /opt/shiny-server/apps 5 | state: directory 6 | mode: 0755 7 | tags: 8 | - shiny 9 | - config 10 | 11 | - name: Load config file 12 | template: 13 | src: shiny-server.conf.j2 14 | dest: /etc/shiny-server/shiny-server.conf 15 | tags: 16 | - shiny 17 | - config 18 | notify: 19 | - restart shiny-server 20 | 21 | - name: Ensure Shiny Server is started 22 | shell: "start shiny-server" 23 | register: shiny_result 24 | failed_when: shiny_result.rc != 0 and "already running" not in shiny_result.stderr 25 | tags: 26 | - shiny 27 | -------------------------------------------------------------------------------- /deploy/roles/postgresql-client/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | postgres_repository_url: "https://download.postgresql.org/pub/repos/yum/10/redhat/rhel-6-x86_64/pgdg-redhat-repo-latest.noarch.rpm" 3 | postgres_repository_key_url: "https://download.postgresql.org/pub/repos/yum/RPM-GPG-KEY-PGDG-10" 4 | postgres_repository_name: pgdg10 5 | postgres_install_dir: /usr/pgsql-10 6 | postgres_install_repository: True 7 | 8 | postgres_packages: 9 | - postgresql10 10 | - postgresql10-devel 11 | 12 | postgres_gis_packages: 13 | - postgis24_10 14 | 15 | postgres_old_packages: 16 | - postgresql96 17 | - postgresql96-libs 18 | - postgis22 19 | - postgis24_96 20 | 21 | clean_postgresql: False 22 | -------------------------------------------------------------------------------- /deploy/roles/r-shiny/templates/shiny-server.conf.j2: -------------------------------------------------------------------------------- 1 | # shiny-server.conf 2 | 3 | description "Shiny application server" 4 | 5 | start on runlevel [2345] 6 | stop on runlevel [016] 7 | 8 | limit nofile 1000000 1000000 9 | 10 | post-stop exec sleep 3 11 | 12 | pre-start script 13 | exec sleep 100 14 | end script 15 | 16 | post-start script 17 | i=0 18 | while [ $i -lt 5 ] 19 | do 20 | pgrep "shiny-server" || exit 1 21 | sleep 10 22 | i=$((i+1)) 23 | done 24 | end script 25 | 26 | script 27 | exec shiny-server --pidfile=/var/run/shiny-server.pid >> /var/log/shiny-server.log 2>&1 28 | end script 29 | 30 | respawn limit 3 30 31 | 32 | respawn 33 | -------------------------------------------------------------------------------- /deploy/roles/eod/templates/inittab: -------------------------------------------------------------------------------- 1 | id:5:initdefault: 2 | 3 | # System initialization. 4 | si::sysinit:/etc/rc.d/rc.sysinit 5 | 6 | l0:0:wait:/etc/rc.d/rc 0 7 | l1:1:wait:/etc/rc.d/rc 1 8 | l2:2:wait:/etc/rc.d/rc 2 9 | l3:3:wait:/etc/rc.d/rc 3 10 | l4:4:wait:/etc/rc.d/rc 4 11 | l5:5:wait:/etc/rc.d/rc 5 12 | l6:6:wait:/etc/rc.d/rc 6 13 | 14 | 15 | pf::powerfail:/sbin/shutdown -f -h +2 "Power Failure; System Shutting Down" 16 | 17 | # If power was restored before the shutdown kicked in, cancel it. 18 | pr:12345:powerokwait:/sbin/shutdown -c "Power Restored; Shutdown Cancelled" 19 | 20 | 21 | x:5:respawn:/etc/X11/prefdm -nodaemon 22 | 23 | # What to do in single-user mode. 24 | #~~:S:wait:/sbin/sulogin -------------------------------------------------------------------------------- /.devcontainer/Dockerfile-base: -------------------------------------------------------------------------------- 1 | FROM centos/systemd 2 | 3 | MAINTAINER "Hung Nguyen" 4 | #https://hub.docker.com/r/centos/systemd/dockerfile 5 | RUN yum -y install sudo openssh-server openssh-clients initscripts; systemctl enable sshd.service 6 | 7 | # Create user 8 | RUN adduser super && \ 9 | usermod -a -G wheel super && \ 10 | echo "super:password" | chpasswd 11 | RUN useradd -ms /bin/bash vagrant 12 | RUN rm /usr/lib/tmpfiles.d/systemd-nologin.conf 13 | RUN yum update -y 14 | 15 | 16 | EXPOSE 22 17 | 18 | CMD ["/usr/sbin/init"] 19 | 20 | #docker run --privileged --name sshserver -v /sys/fs/cgroup:/sys/fs/cgroup:ro -d centos7-sshd 21 | #docker build -f Dockerfile-base --rm --no-cache -t centos7-sshd . 22 | -------------------------------------------------------------------------------- /deploy/roles/eod/templates/exceed-connection-server: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | #auth [user_unknown=ignore success=ok ignore=ignore default=bad] pam_securetty.so 3 | auth include system-auth 4 | account required pam_nologin.so 5 | account include system-auth 6 | password include system-auth 7 | # pam_selinux.so close should be the first session rule 8 | session required pam_selinux.so close 9 | session optional pam_keyinit.so force revoke 10 | session required pam_loginuid.so 11 | session include system-auth 12 | session optional pam_console.so 13 | # pam_selinux.so open should only be followed by sessions to be executed in the user context 14 | session required pam_selinux.so open -------------------------------------------------------------------------------- /deploy/roles/gocd/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | go_server_address: 10.0.1.26 3 | go_server_port: 8153 4 | go_server_url: https://10.0.1.26:8153/go 5 | gocd_agent_package_url: https://download.gocd.io/binaries/17.10.0-5380/rpm/go-agent-17.10.0-5380.noarch.rpm 6 | gocd_agent_package_name: go-agent-17.10.0-5380.noarch.rpm 7 | gocd_agent_package_path: /tmp 8 | gocd_agent_checksum: ce65027c2e3f3ec4768071758dd3ac8b2a846b90 9 | gocd_agent_work_dir: /var/lib/go-agent 10 | gocd_agent_mem: "128m" 11 | gocd_agent_max_mem: "256m" 12 | gocd_upgrade: False 13 | restart_gocd_server: True 14 | 15 | gocd_rpms: 16 | - screen 17 | - git 18 | - vim 19 | - emacs 20 | - "@Development tools" 21 | 22 | gocd_log_file_locations: 23 | - /var/log/go-agent 24 | -------------------------------------------------------------------------------- /deploy/roles/postgresql-dbs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create postgres databases 3 | postgresql_db: 4 | login_user: "{{ pp_dba_username }}" 5 | login_password: "{{ pp_dba_password }}" 6 | port: "{{ pp_serverport }}" 7 | name: "{{ item.name }}" 8 | owner: "{{ item.owner }}" 9 | template: "{{ item.template }}" 10 | state: present 11 | with_items: pp_dbs 12 | 13 | - name: Enable extensions for databases 14 | postgresql_ext: 15 | login_user: "{{ pp_dba_username }}" 16 | login_password: "{{ pp_dba_password }}" 17 | port: "{{ pp_serverport }}" 18 | db: "{{ item.0.name }}" 19 | name: "{{ item.1 }}" 20 | state: present 21 | with_subelements: 22 | - pp_dbs 23 | - extensions 24 | -------------------------------------------------------------------------------- /deploy/roles/gocd_server/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | gocd_server_package_url: https://download.gocd.io/binaries/17.10.0-5380/rpm/go-server-17.10.0-5380.noarch.rpm 3 | gocd_server_package_path: /tmp 4 | gocd_server_package_name: go-server-17.10.0-5380.noarch.rpm 5 | gocd_server_checksum: a0f2805698868dd338031bfbf73b391dd29ad34f 6 | gocd_server_work_dir: /var/lib/go-server 7 | gocd_server_port: 8153 8 | gocd_server_ssl_port: 8154 9 | gocd_server_mem: "256m" 10 | gocd_server_max_mem: "512m" 11 | gocd_server_min_perm_gen: "128m" 12 | gocd_server_max_perm_gen: "256m" 13 | gocd_admin_password: W6ph5Mm5Pz8GgiULbPgzG37mj9g= # "password" 14 | gocd_upgrade: False 15 | restart_gocd_server: True 16 | 17 | gocd_server_log_file_locations: 18 | - /var/log/go-server 19 | -------------------------------------------------------------------------------- /deploy/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | force_color=1 3 | forks=50 4 | gathering=smart 5 | host_key_checking=False 6 | vault_password_file=.vault_password 7 | library=/usr/share/ansible:../deploy/library 8 | stdout_callback = skippy 9 | default_become=sudo 10 | default_become_flags=-H -S 11 | # SSH connect timeout: 12 | timeout=330 13 | gather_timeout=60 14 | callback_whitelist = profile_tasks 15 | 16 | # This is a short-term hack so backup doesn't time out; 17 | # the real solution will involve an async task. GP 02/15/20 14:50:40 EST 18 | connect_timeout = 300 19 | command_timeout = 290 20 | 21 | [ssh_connection] 22 | control_path=%(directory)s/%%h-%%r 23 | ssh_args = -o ServerAliveInterval=50 24 | 25 | [privilege_escalation] 26 | become_ask_pass=False 27 | 28 | -------------------------------------------------------------------------------- /deploy/roles/gauss/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Verify install directory 3 | file: 4 | path: "{{gauss_bin_directory}}/" 5 | state: directory 6 | 7 | - name: Install expect 8 | yum: 9 | name: expect 10 | state: present 11 | 12 | - name: Untar the tarball 13 | unarchive: 14 | src: "{{guass_local_bin_directory}}/GAUSS_15_Linux_RHEL_64.tar.gz" 15 | dest: "{{gauss_bin_directory}}/" 16 | when: custom_repo 17 | 18 | - name: copy the expect script 19 | template: 20 | src: install_gauss.exp 21 | dest: "{{gauss_bin_directory}}/install_gauss.exp" 22 | mode: a+rx 23 | 24 | - name: Run gauss expect script 25 | shell: "./install_gauss.exp" 26 | args: 27 | chdir: "{{gauss_bin_directory}}" 28 | when: custom_repo 29 | -------------------------------------------------------------------------------- /deploy/roles/devtools6/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install devtools repo 3 | template: 4 | src: devtools-6.repo.j2 5 | dest: /etc/yum.repos.d/devtools-6.repo 6 | mode: 0644 7 | when: not custom_repo 8 | tags: 9 | - devtools 10 | 11 | - name: Copy devtools shell script 12 | template: 13 | src: devtools-6.sh.j2 14 | dest: /opt/devtools-6.sh 15 | mode: 0755 16 | tags: 17 | - devtools 18 | 19 | - name: Install updated devtools 20 | yum: 21 | name: "{{ item }}" 22 | state: present 23 | with_items: 24 | - devtoolset-6-binutils 25 | - devtoolset-6-build 26 | - devtoolset-6-gcc 27 | - devtoolset-6-gcc-c++ 28 | - devtoolset-6-gcc-gfortran 29 | - devtoolset-6-gdb 30 | tags: 31 | - devtools 32 | -------------------------------------------------------------------------------- /deploy/roles/devtools7/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install devtools repo 3 | template: 4 | src: devtools-7.repo.j2 5 | dest: /etc/yum.repos.d/devtools-7.repo 6 | mode: 0644 7 | when: not custom_repo 8 | tags: 9 | - devtools 10 | 11 | - name: Copy devtools shell script 12 | template: 13 | src: devtools-7.sh.j2 14 | dest: /opt/devtools-7.sh 15 | mode: 0755 16 | tags: 17 | - devtools 18 | 19 | - name: Install updated devtools 20 | yum: 21 | name: "{{ item }}" 22 | state: present 23 | with_items: 24 | - devtoolset-7-binutils 25 | - devtoolset-7-build 26 | - devtoolset-7-gcc 27 | - devtoolset-7-gcc-c++ 28 | - devtoolset-7-gcc-gfortran 29 | - devtoolset-7-gdb 30 | tags: 31 | - devtools 32 | -------------------------------------------------------------------------------- /deploy/roles/eod/templates/cluster.cfg.j2: -------------------------------------------------------------------------------- 1 | [CLUSTER] 2 | EoDCMPassword={{ eod_cm_password }} 3 | EoDCMSSL=0 4 | EoDCMLog=text 5 | EoDCMDefXconfig=XDMCP_Query.cfg 6 | EoDCMAuth=pam 7 | EoDCMAuthPAMPwdPrompt=Password: 8 | EoDCMPreferredPCM={{ eod_cm_preferred_pcm }} 9 | EoDLoadBalancing=cpu 10 | EoDCMLogLevel=1 11 | EoDCMLogRollSize=0 12 | EoDCMLogRollTime=0 13 | EodShowShareConfirmDialog=0 14 | ECSUseKerberos=0 15 | ECSUseUserCredentials=1 16 | ECSldapHost= 17 | ECSldapSSL=0 18 | ECSldapProxyUserName= 19 | ECSldapProxyPassWord= 20 | ECSldapBase= 21 | ECSldapUserLoginPrefix= 22 | ECSldapAuthOnly=0 23 | UseLicenseServer={{ eod_use_license_server }} 24 | LicenseServerHost={{ eod_license_server_host }} 25 | LicenseServerAuthKey= 26 | SuspendTimeout=0 27 | ECSEnableFIPS=0 28 | ClusterName={{ eod_cluster_name }} 29 | -------------------------------------------------------------------------------- /deploy/roles/jdbc/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #Required for computer to make an ssl connection for postgres (since they sign their cert with letsencrypt) 3 | - name: Install cert utilities 4 | yum: 5 | name: ca-certificates 6 | state: latest 7 | become: yes 8 | become_method: sudo 9 | 10 | - name: Get JDBC driver JAR file for postgresql 11 | get_url: 12 | url: "{{ jdbc_postgres_url }}" 13 | dest: "{{ jre_lib_path }}/" 14 | mode: 0644 15 | 16 | - name: Get the JDBC driver for SQLServer 17 | unarchive: 18 | src: "{{ jdbc_sql_url }}" 19 | dest: "/tmp/" 20 | copy: no 21 | 22 | - name: Move the JAR file to the right location 23 | copy: 24 | src: "/tmp/sqljdbc_6.0/enu/jre8/sqljdbc42.jar" 25 | dest: "{{ jre_lib_path }}" 26 | remote_src: yes 27 | mode: 0644 28 | -------------------------------------------------------------------------------- /deploy/roles/logstash-config/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | logstash_log_file_locations: /var/log/logstash 3 | logstash_es_output_file: 30-elasticsearch-output.conf.j2 4 | logstash_user: logstash 5 | logstash_password: password 6 | logstash_filters: 7 | - 8 | name: 01-input.conf 9 | content: > 10 | input { 11 | tcp { 12 | port => 5043 13 | codec => multiline { 14 | pattern => "^[\s|java\.|com\.|Caused by:]" 15 | what => "previous" 16 | } 17 | } 18 | tcp { 19 | port => 5044 20 | codec => json 21 | } 22 | } 23 | - 24 | name: 12-example-filter.conf 25 | content: > 26 | filter { 27 | date { 28 | match => [ "timestamp", "ISO8601" ] 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /deploy/roles/maven/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Download maven 3 | get_url: 4 | url: "{{ maven_url }}" 5 | dest: "/tmp/maven-{{ maven_version }}" 6 | tags: 7 | - maven 8 | 9 | - name: Extract Maven from /tmp 10 | unarchive: 11 | src: "/tmp/maven-{{ maven_version }}" 12 | dest: "{{ maven_install_dir }}" 13 | copy: no 14 | tags: 15 | - maven 16 | 17 | - name: Create symlink to Maven 18 | file: 19 | path: "{{ maven_install_dir }}/maven" 20 | src: "{{ maven_install_dir }}/apache-maven-{{ maven_version }}" 21 | state: link 22 | tags: 23 | - maven 24 | 25 | 26 | - name: Add Maven path to system path 27 | template: 28 | src: maven.sh.j2 29 | dest: "/etc/profile.d/maven.sh" 30 | mode: 0644 31 | tags: 32 | - maven 33 | 34 | -------------------------------------------------------------------------------- /deploy/roles/nginx/templates/default.conf.j2: -------------------------------------------------------------------------------- 1 | server { 2 | listen {{ nginx_listen }} default_server; 3 | 4 | root /usr/share/nginx/html; 5 | index index.html index.htm; 6 | 7 | server_name localhost; 8 | 9 | ### BLOCKING DOWNLOAD AGENTS: To prevent malicious traffic 10 | if ({{ nginx_block_download_agents }}) { 11 | return 403; 12 | } 13 | 14 | ### BLOCKING ROBOTS: To prevent enumeration and other attacks 15 | if ({{ nginx_block_robots }}) { 16 | return 403; 17 | } 18 | 19 | ### ALLOWING CONFIGURED DOMAINS OR REVERSE PROXIED REQUESTS: To provide layered security 20 | if ($host !~ ^({{ nginx_webcrawler_protect }}|{{ ansible_ssh_host }})) { 21 | return 444; 22 | } 23 | 24 | location / { 25 | try_files $uri $uri/ =404; 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /deploy/roles/epel/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml - Activate EPEL 2 | --- 3 | - name: Get EPEL repo 4 | get_url: 5 | url: "{{ epel_path }}/{{ epel_rpm }}" 6 | dest: /tmp/ 7 | register: get_url_result 8 | until: '"OK" in get_url_result.msg' 9 | retries: 3 10 | delay: 15 11 | check_mode: no 12 | tags: 13 | - epel 14 | 15 | # TODO: use yum module with equivalent of "yum --nogpgcheck localinstall packagename.arch.rpm" to stop warnings 16 | - name: Install EPEL repo 17 | command: "rpm -ivh /tmp/{{ epel_rpm }}" 18 | args: 19 | creates: /etc/yum.repos.d/epel.repo 20 | check_mode: no 21 | tags: 22 | - epel 23 | 24 | - name: Import the EPEL GPG key 25 | rpm_key: 26 | key: "/etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version }}" 27 | state: present 28 | check_mode: no 29 | tags: 30 | - epel 31 | -------------------------------------------------------------------------------- /deploy/roles/devtools/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install devtools repo 3 | template: 4 | src: devtools-2.repo.j2 5 | dest: /etc/yum.repos.d/devtools-2.repo 6 | mode: 0644 7 | when: not custom_repo 8 | tags: 9 | - devtools 10 | 11 | - name: Install updated devtools 12 | yum: 13 | name: "{{ item }}" 14 | state: present 15 | with_items: 16 | - devtoolset-2-binutils 17 | - devtoolset-2-build 18 | - devtoolset-2-gcc 19 | - devtoolset-2-gcc-c++ 20 | - devtoolset-2-gcc-gfortran 21 | - devtoolset-2-gdb 22 | - devtoolset-2-git 23 | tags: 24 | - devtools 25 | 26 | # Breaks some system tools 27 | #- name: Setup environment variables to use new versions 28 | # template: 29 | # src: devtools-2.sh.j2 30 | # dest: /etc/profile.d/devtools-2.sh 31 | # mode: 0644 32 | # tags: 33 | # - devtools 34 | -------------------------------------------------------------------------------- /deploy/templates/proxy.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Proxy Server 6 | 7 | 28 | 29 | 30 | 31 | 35 | 36 |
37 |

Proxy server is up!

38 |
39 | 40 | 41 | -------------------------------------------------------------------------------- /deploy/roles/glusterfs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml - Sets up a glusterfs file server node 2 | --- 3 | 4 | - name: Install required packages 5 | yum: 6 | name: centos-release-gluster6 7 | state: latest 8 | when: not custom_repo 9 | 10 | - name: Install required packages 11 | yum: 12 | name: glusterfs-server 13 | state: latest 14 | 15 | - name: Enable gluster server 16 | service: 17 | name: glusterd 18 | enabled: yes 19 | state: started 20 | 21 | - name: Open ports 22 | command: "iptables -I {{ iptables_chain }} 3 -m state --state NEW -p tcp --dport {{ item }} -j ACCEPT" 23 | with_items: 24 | - 111 25 | - "24007:24008" 26 | - "49152:49154" 27 | when: iptables_config 28 | 29 | - name: Save iptables config 30 | command: "/sbin/service iptables save" 31 | notify: 32 | - restart iptables 33 | when: iptables_config 34 | -------------------------------------------------------------------------------- /deploy/roles/r-libs/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | install_r_packages: true 3 | 4 | r_dependencies: 5 | - libcurl-devel 6 | - libxml2-devel 7 | - openssl-devel 8 | - hunspell-devel 9 | - udunits2 10 | - udunits2-devel 11 | - openmpi 12 | - openmpi-devel 13 | - openmpi-1.8-devel 14 | 15 | r_dependencies_ignore: 16 | 17 | r_packages: 18 | #- "devtools" 19 | - pdbBASE 20 | - pmclust 21 | 22 | r_packages_ignore: 23 | 24 | r_packages_github: 25 | - name: shinydashboard 26 | user: rstudio 27 | 28 | r_packages_with_opts: 29 | - name: udunits2 30 | include: /usr/include/udunits2 31 | lib: /usr/lib64 32 | 33 | r_system_package_dir: 34 | - /usr/lib64/R/library 35 | - /usr/share/doc 36 | 37 | r_packages_versioned: 38 | - name: rlecuyer 39 | version: 0.3-4 40 | - name: pbdMPI 41 | version: 0.3-8 42 | - name: pbdSLAP 43 | version: 0.2-4 44 | -------------------------------------------------------------------------------- /deploy/roles/openldap/templates/slapd.access.j2: -------------------------------------------------------------------------------- 1 | # slapd.access 2 | 3 | # Everyone can read everything 4 | access to dn.base="" by * read 5 | 6 | # The admin dn has full write access 7 | access to * 8 | by self write 9 | by dn.base="cn=admin,dc={{ kdc_domain_name.split('.')[-2] }},dc={{ kdc_domain_name.split('.')[-1] }}" write 10 | by * read 11 | 12 | access to attrs=userPassword 13 | by self write 14 | by anonymous auth 15 | by dn.base="cn=admin,dc={{ kdc_domain_name.split('.')[-2] }},dc={{ kdc_domain_name.split('.')[-1] }}" write 16 | by * none 17 | 18 | access to attrs=shadowLastChange 19 | by self write 20 | by * read 21 | 22 | rootdn "cn=admin,dc={{ kdc_domain_name.split('.')[-2] }},dc={{ kdc_domain_name.split('.')[-1] }}" 23 | rootpw {{ root_password.stdout }} 24 | -------------------------------------------------------------------------------- /deploy/roles/r-studio-server-config/templates/login.html.j2: -------------------------------------------------------------------------------- 1 |

This is a Consumer Financial Protection Bureau (CFPB) information system. The CFPB is an independent agency of the United States Government. CFPB information systems are provided for the processing of official information only. Unauthorized or improper use of this system may result in administrative action, as well as civil and criminal penalties. Because this is a CFPB information system, you have no reasonable expectation of privacy regarding any communication or data transiting or stored on this information system. All data contained on CFPB information systems is owned by CFPB and your use of the CFPB information system serves as your consent to your usage being monitored, intercepted, recorded, read, copied, captured or otherwise audited in any manner, by authorized personnel, including but not limited to employees, contractors, and/or agents of the United States Government.

2 | -------------------------------------------------------------------------------- /deploy/deploy_file_server.yml: -------------------------------------------------------------------------------- 1 | # deploy_file_server.yml 2 | --- 3 | - hosts: file_server 4 | become: yes 5 | become_method: sudo 6 | pre_tasks: 7 | - name: Install libselinux-python 8 | yum: 9 | name: libselinux-python 10 | state: latest 11 | 12 | roles: 13 | - {role: puppet_disable, when: "disable_puppet"} 14 | - iptables 15 | - epel 16 | - ntp 17 | - glusterfs 18 | - {role: log-courier, when: "install_logstash"} 19 | - odbc 20 | 21 | tasks: 22 | - name: Create the Filesystem 23 | filesystem: 24 | fstype: ext4 25 | dev: /dev/sdb 26 | 27 | - name: Mount volume 28 | mount: 29 | name: /data 30 | src: /dev/sdb 31 | fstype: ext4 32 | state: mounted 33 | 34 | - name: Ensure directories exist 35 | file: 36 | state: directory 37 | dest: /data/{{ item }} 38 | with_items: shared_folders 39 | -------------------------------------------------------------------------------- /deploy/roles/sas/templates/sas_init.d_template.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # chkconfig: - 20 80 4 | # Start up script for SAS studio application 5 | # 6 | 7 | 8 | # Source function library. 9 | . /etc/init.d/functions 10 | 11 | prog="{{ sas_config_home }}/sasstudio.sh" 12 | 13 | RETVAL=0 14 | 15 | status() { 16 | $prog status 17 | } 18 | 19 | start() { 20 | echo -n "Starting sas studio: " 21 | $prog start 22 | RETVAL=$? 23 | return $RETVAL 24 | } 25 | 26 | stop() { 27 | echo -n "Shutting down sas: " 28 | $prog stop 29 | RETVAL=$? 30 | return $RETVAL 31 | } 32 | 33 | case "$1" in 34 | status) 35 | status 36 | ;; 37 | start) 38 | start 39 | ;; 40 | stop) 41 | stop 42 | ;; 43 | restart) 44 | stop 45 | start 46 | ;; 47 | *) 48 | echo "Usage: {status|start|stop|restart}" 49 | exit 1 50 | ;; 51 | esac 52 | exit $? 53 | -------------------------------------------------------------------------------- /deploy/roles/julia/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Activate Julia repo 2 | get_url: 3 | url: https://copr.fedorainfracloud.org/coprs/nalimilan/julia/repo/epel-6/nalimilan-julia-epel-6.repo 4 | dest: /etc/yum.repos.d/nalimilan-julia-epel-6.repo 5 | when: not 'production' in group_names 6 | tags: 7 | - julia 8 | 9 | - name: Install Julia GPG key 10 | rpm_key: 11 | state: present 12 | key: "https://copr-be.cloud.fedoraproject.org/results/nalimilan/julia/pubkey.gpg" 13 | 14 | - name: Install dependency 15 | yum: 16 | name: utf8proc-devel 17 | state: present 18 | tags: 19 | - julia 20 | 21 | - name: Create needed symlink 22 | file: 23 | path: /usr/lib64/libutf8proc.so.0.1 24 | src: /usr/lib64/libutf8proc.so.1.3.0 25 | state: link 26 | mode: 0755 27 | tags: 28 | - julia 29 | 30 | - name: Install Julia 31 | yum: 32 | name: julia 33 | state: latest 34 | disable_gpg_check: yes 35 | tags: 36 | - julia 37 | -------------------------------------------------------------------------------- /deploy/roles/kerberos/templates/krb5.conf: -------------------------------------------------------------------------------- 1 | {{ template_masthead }} 2 | 3 | [logging] 4 | default = FILE:/var/log/krb5libs.log 5 | kdc = FILE:/var/log/krb5kdc.log 6 | admin_server = FILE:/var/log/kadmind.log 7 | 8 | [libdefaults] 9 | default_realm = {{ default_realm_name }} 10 | # dns_lookup_kdc = true 11 | # dns_lookup_realm = true 12 | forwardable = true 13 | proxiable = true 14 | fcc-mit-ticketflags = true 15 | default_keytab_name = FILE:/etc/krb5.keytab 16 | 17 | [realms] 18 | {% for realm in realms %} 19 | {{ realm.name }} = { 20 | kdc = {{ realm.kdc_domain_name }} 21 | admin_server = {{ realm.admin_domain_name }} 22 | default_domain = {{ realm.domain_name }} 23 | } 24 | {% endfor %} 25 | 26 | [domain_realm] 27 | {% for realm in realms %} 28 | .{{ realm.domain_name }} = {{ realm.name }} 29 | {{ realm.domain_name }} = {{ realm.name }} 30 | {% endfor %} 31 | 32 | [appdefaults] -------------------------------------------------------------------------------- /deploy/roles/kerberos_server/templates/krb5.conf: -------------------------------------------------------------------------------- 1 | {{ template_masthead }} 2 | 3 | [logging] 4 | default = FILE:/var/log/krb5libs.log 5 | kdc = FILE:/var/log/krb5kdc.log 6 | admin_server = FILE:/var/log/kadmind.log 7 | 8 | [libdefaults] 9 | default_realm = {{ default_realm_name }} 10 | # dns_lookup_kdc = true 11 | # dns_lookup_realm = true 12 | forwardable = true 13 | proxiable = true 14 | fcc-mit-ticketflags = true 15 | default_keytab_name = FILE:/etc/krb5.keytab 16 | 17 | [realms] 18 | {% for realm in realms %} 19 | {{ realm.name }} = { 20 | kdc = {{ realm.kdc_domain_name }} 21 | admin_server = {{ realm.admin_domain_name }} 22 | default_domain = {{ realm.domain_name }} 23 | } 24 | {% endfor %} 25 | 26 | [domain_realm] 27 | {% for realm in realms %} 28 | .{{ realm.domain_name }} = {{ realm.name }} 29 | {{ realm.domain_name }} = {{ realm.name }} 30 | {% endfor %} 31 | 32 | [appdefaults] -------------------------------------------------------------------------------- /deploy/roles/python36-scl/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for python36-scl 3 | 4 | - name: Install CentOS Software Collection Library (SCL) when on CentOS 5 | yum: 6 | name: centos-release-scl 7 | state: present 8 | when: ansible_distribution == "CentOS" 9 | tags: 10 | - python3 11 | 12 | - name: Install python selinux bindings 13 | yum: 14 | name: libselinux-python 15 | state: present 16 | tags: 17 | - python3 18 | 19 | - name: Install python v3.6 from SCL 20 | yum: 21 | name: "{{ item }}" 22 | state: present 23 | with_items: 24 | - rh-python36 25 | - rh-python36-python-setuptools 26 | - rh-python36-scldevel 27 | tags: 28 | - python3 29 | 30 | - name: Make python 3.6 the default python for this machine 31 | template: 32 | src: scl_enable-python36.sh.j2 33 | dest: /etc/profile.d/scl_enable-python36.sh 34 | when: set_python36_default_python 35 | tags: 36 | - python3 37 | -------------------------------------------------------------------------------- /deploy/deploy_analytics_terminal.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: analytics_terminal 3 | become: yes 4 | become_method: sudo 5 | 6 | roles: 7 | - {role: puppet_disable, when: "disable_puppet"} 8 | - {role: iptables, when: "iptables_config"} 9 | - common 10 | 11 | - import_playbook: deploy_development_terminal.yml 12 | - import_playbook: deploy_r_terminal.yml 13 | 14 | - hosts: analytics_terminal 15 | become: yes 16 | become_method: sudo 17 | roles: 18 | - {role: eod, when: "install_desktop"} 19 | - {role: log-courier, when: "install_logstash"} 20 | 21 | post_tasks: 22 | - name: Open port range for app prototyping 23 | command: "iptables -I {{ iptables_chain }} 3 -m state --state NEW -p tcp --dport 8000:8100 -s {{ hostvars[item]['ansible_ssh_host'] }} -j ACCEPT" 24 | when: iptables_config 25 | with_items: "{{ groups['proxy_server'] }}" 26 | 27 | - name: Save rules 28 | command: "/sbin/service iptables save" 29 | when: iptables_config 30 | -------------------------------------------------------------------------------- /deploy/roles/odbc/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install rpms 3 | yum: 4 | name: "{{ item }}" 5 | state: "{{ odbc_package_mode }}" 6 | disable_gpg_check: yes 7 | with_items: 8 | - "{{ odbc_rpms }}" 9 | tags: 10 | - odbc 11 | 12 | - name: Install freetds 13 | yum: 14 | name: freetds.x86_64 15 | state: present 16 | when: custom_repo 17 | tags: 18 | - odbc 19 | 20 | - name: Configure odbc.ini 21 | template: 22 | src: odbc.ini 23 | dest: /etc/odbc.ini 24 | tags: 25 | - odbc 26 | 27 | - name: Configure odbcinst.ini 28 | template: 29 | src: odbcinst.ini 30 | dest: /etc/odbcinst.ini 31 | tags: 32 | - odbc 33 | 34 | - name: Configure freetds.conf 35 | template: 36 | src: freetds.conf 37 | dest: /usr/local/etc/freetds.conf 38 | when: custom_repo 39 | tags: 40 | - odbc 41 | 42 | - name: Set odbc.ini file permissions 43 | file: 44 | path: /etc/odbc.ini 45 | mode: "u+rw,g+r,o+r" 46 | tags: 47 | - odbc -------------------------------------------------------------------------------- /deploy/roles/eod/templates/custom.conf: -------------------------------------------------------------------------------- 1 | # GDM Configuration Customization file. 2 | # 3 | # [LOTS OF COMMENTS] 4 | # NOTE: Lines that begin with "#" are considered comments. 5 | # 6 | # Have fun! 7 | 8 | [daemon] 9 | Greeter=/usr/libexec/gdmlogin 10 | 11 | [security] 12 | DisallowTCP=false 13 | AllowRemoteRoot=true 14 | 15 | [xdmcp] 16 | Enable=true 17 | MaxSessions=60 18 | DisplaysPerHost=100 19 | [gui] 20 | 21 | [greeter] 22 | DefaultWelcome=false 23 | 24 | [chooser] 25 | 26 | [debug] 27 | 28 | # Note that to disable servers defined in the defaults.conf file (such as 29 | # 0=Standard, you must put a line in this file that says 0=inactive, as 30 | # described in the Configuration section of the GDM documentation. 31 | # 32 | [servers] 33 | 34 | # Also note, that if you redefine a [server-foo] section, then GDM will 35 | # use the definition in this file, not the defaults.conf file. It is 36 | # currently not possible to disable a [server-foo] section defined 37 | # in the defaults.conf file. 38 | # 39 | -------------------------------------------------------------------------------- /test/files/gocd/remove_test_pipelines.py: -------------------------------------------------------------------------------- 1 | #!/bin/env python 2 | import os 3 | from bs4 import BeautifulSoup 4 | 5 | if __name__ == '__main__': 6 | 7 | # Determine file paths 8 | script_path = os.path.dirname(os.path.realpath(__file__)) 9 | config_xml_path = os.path.join(os.getenv("GOCD_CONFIG_PATH", "/tmp"), "cruise-config.xml") 10 | 11 | # Open destination and source files, respectively 12 | config_xml = open(config_xml_path, "r") 13 | 14 | # Parse the config and pipeline files, then create a new pipeline group and insert the pipeline 15 | config_xml.seek(0) 16 | config_soup = BeautifulSoup(config_xml.read(), "xml") 17 | config_soup.select_one('pipelines[group="automated-test"]').extract() 18 | config_xml.close() 19 | 20 | # Output the modified contents 21 | config_xml = open(config_xml_path, "w") 22 | config_xml.seek(0) 23 | config_xml.write(config_soup.prettify()) 24 | config_xml.truncate() 25 | config_xml.close() 26 | 27 | exit(0) 28 | -------------------------------------------------------------------------------- /.devcontainer/bashrc: -------------------------------------------------------------------------------- 1 | export WORKSPACE=${WORKSPACE:-/workspace} 2 | cat $WORKSPACE/.devcontainer/welcome.txt 3 | 4 | alias deploy='ansible-playbook $WORKSPACE/deploy/deploy_nginx_proxy_server.yml -i $WORKSPACE/deploy/vagrant_hosts --ask-pass --user=super --ask-sudo-pass' 5 | alias rbp='make -f $WORKSPACE/.devcontainer/Makefile rebuild' 6 | alias rebuild='make -f $WORKSPACE/.devcontainer/Makefile rebuild' 7 | alias inventory='cat $WORKSPACE'"/deploy/vagrant_hosts | grep ssh_host | sed 's/ ansible_ssh_host=/,/' | sed 's/ .*\$//'" 8 | alias refreshcomposefile='$WORKSPACE/.devcontainer/scripts/rebuild-docker-compose.sh' 9 | alias runansible='ansible-playbook --ask-pass --ask-sudo-pass --user=super -i $WORKSPACE/deploy/vagrant_hosts' 10 | 11 | alias lf='ls -altr' 12 | export PROMPT_COMMAND= 13 | export VERBOSITY=-vv 14 | export STANDBY=10.0.1.44 15 | export PAGER=/bin/cat 16 | pip3 install --upgrade ansible # if it wasn't; takes about 34 seconds 17 | 18 | cd $WORKSPACE/deploy 19 | 20 | export PS1='[\t \u@dind \W]\$ ' 21 | -------------------------------------------------------------------------------- /deploy/roles/r/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | install_r_updates: true 3 | install_r_packages: true 4 | 5 | r_core_state: present 6 | 7 | r_dependencies: 8 | - QuantLib 9 | - QuantLib-devel 10 | - openssl-devel 11 | - libcurl-devel 12 | - libxml2-devel 13 | - hunspell-devel 14 | - udunits2 15 | - udunits2-devel 16 | 17 | # Don't install these r dependencies... meant to be set in host vars 18 | r_dependencies_ignore: 19 | 20 | r_packages: 21 | - devtools 22 | # - "http://cran.r-project.org/src/contrib/Archive/RQuantLib/RQuantLib_0.3.12.tar.gz" # The latest version requires QuantLib 1.4.0, which is not supported by Centos 6.5 23 | 24 | # Don't install these r packages... meant to be set in host vars 25 | r_packages_ignore: 26 | 27 | r_packages_github: 28 | - name: shinydashboard 29 | user: rstudio 30 | 31 | r_packages_with_opts: 32 | - name: udunits2 33 | include: /usr/include/udunits2 34 | lib: /usr/lib64 35 | 36 | r_system_package_dir: 37 | - /usr/lib64/R/library 38 | - /usr/share/doc 39 | -------------------------------------------------------------------------------- /.devcontainer/welcome.txt: -------------------------------------------------------------------------------- 1 | 2 | Run alias: 3 | inventory - to see all the inventory from vagrant_host 4 | refreshcomposefile - will build docker-compose file w/ tmp_inventory 5 | rebuild - drop all containers 6 | 7 | Generate working CSV: 8 | inventory > /workspace/.devcontainer/scripts/tmp_inventory.csv 9 | 10 | OR Comment out the ones you don't want or filter with grep: 11 | inventory | grep citus > /workspace/.devcontainer/scripts/tmp_inventory.csv 12 | deploy: 13 | ansible-playbook -i /workspace/deploy/vagrant_hosts --ask-pass --ask-sudo-pass --user=super path_to_playbook.yml 14 | 15 | cat /workspace/.devcontainer/scripts/tmp_inventory.csv 16 | 17 | Rebuild steps: 18 | cd into .devcontainer-->make buildbase (only once) 19 | Generate working CSV (see above) 20 | refreshcomposefile 21 | rebuild 22 | wait 10 sec and click "RELOAD WINDOW" 23 | runansible /path/to/play/book/playbook.yml 24 | 25 | Fish is your friend!!!" 26 | TYPE fish 27 | 28 | -------------------------------------------------------------------------------- /deploy/roles/jdk/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install specified JDK version and dependencies 3 | yum: 4 | name: "{{ jdk_packages }}" 5 | state: present 6 | disable_gpg_check: yes 7 | 8 | - name: Ensure Java environment variables are set 9 | template: 10 | src: jdk.sh.j2 11 | dest: /etc/profile.d/jdk.sh 12 | mode: 0644 13 | 14 | - name: Add additional deployment settings 15 | lineinfile: 16 | dest: "{{ jre_lib_path }}/deployment.properties" 17 | line: "{{ item }}" 18 | regexp: "{{ item }}" 19 | state: present 20 | create: yes 21 | with_items: "{{ java_deploy_options }}" 22 | 23 | - name: Add deployment config 24 | template: 25 | src: deployment.config 26 | dest: "{{ jre_lib_path }}/deployment.config" 27 | 28 | - name: Ensure this version is the default 29 | alternatives: 30 | name: java 31 | path: "{{ java_home_path }}/bin/java" 32 | link: /usr/bin/java 33 | 34 | - name: Ensure nss package is the latest 35 | yum: 36 | name: nss 37 | state: latest 38 | -------------------------------------------------------------------------------- /deploy/deploy_admin_terminal.yml: -------------------------------------------------------------------------------- 1 | # deploy_admin_terminal.yml 2 | --- 3 | - hosts: admin_terminal 4 | become: yes 5 | become_method: sudo 6 | roles: 7 | - {role: puppet_disable, when: "disable_puppet"} 8 | - {role: iptables, when: "iptables_config"} 9 | - common 10 | - {role: epel, when: "use_epel and not 'production' in group_names"} 11 | - {role: python, when: "custom_repo"} 12 | - {role: python-build, when: "install_python and not custom_repo" } 13 | - {role: log-courier, when: "install_logstash"} 14 | - eod 15 | - odbc 16 | 17 | tasks: 18 | - name: Install development tools 19 | yum: 20 | name: "@development" 21 | state: present 22 | disable_gpg_check: yes 23 | 24 | - name: Install Ansible in Python 2.7 25 | become: no 26 | pip: 27 | virtualenv_command: /usr/local/bin/virtualenv 28 | virtualenv: ~/.virtualenvs/data-platform 29 | name: ansible 30 | state: present 31 | umask: "0022" 32 | when: "install_python" 33 | -------------------------------------------------------------------------------- /deploy/roles/spark/templates/log4j.properties: -------------------------------------------------------------------------------- 1 | # Set everything to be logged to the console 2 | log4j.rootCategory={{ spark_log_level }}, console 3 | log4j.appender.console=org.apache.log4j.ConsoleAppender 4 | log4j.appender.console.target=System.err 5 | log4j.appender.console.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n 7 | 8 | # Settings to quiet third party logs that are too verbose 9 | log4j.logger.org.spark-project.jetty=WARN 10 | log4j.logger.org.spark-project.jetty.util.component.AbstractLifeCycle=ERROR 11 | log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=INFO 12 | log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=INFO 13 | log4j.logger.org.apache.parquet=ERROR 14 | log4j.logger.parquet=ERROR 15 | 16 | # SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support 17 | log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL 18 | log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR 19 | -------------------------------------------------------------------------------- /deploy/roles/talend/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Java version for Pentaho 3 | yum: 4 | name: "{{ talend_java_version }}" 5 | state: present 6 | 7 | - name: Ensure opt folder exists 8 | file: 9 | dest: /opt 10 | state: directory 11 | 12 | - name: Extract Talend 13 | unarchive: 14 | src: "{{ talend_zip_path }}/{{ talend_zip_name }}.zip" 15 | dest: /opt/ 16 | creates: /opt/{{ talend_zip_name }} 17 | when: install_talend 18 | 19 | - name: Configure Talend 20 | template: 21 | src: TOS_DI-linux-gtk-x86.ini.j2 22 | dest: /opt/{{ talend_zip_name }}/TOS_DI-linux-gtk-x86.ini 23 | when: install_talend 24 | 25 | - name: Ensure talend is executable 26 | file: 27 | dest: /opt/{{ talend_zip_name }}/TOS_DI-linux-gtk-x86.sh 28 | mode: 0755 29 | when: install_talend 30 | 31 | - name: Create talend shortcut 32 | template: 33 | src: talend.desktop.j2 34 | dest: /usr/share/applications/talend.desktop 35 | mode: 0644 36 | when: install_talend 37 | -------------------------------------------------------------------------------- /deploy/templates/enclave_proxy.php: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | CFPB Data Enclave 6 | 7 | 28 | 29 | 30 | 31 | 35 | 36 |
37 | "; 39 | foreach($_SERVER as $key_name => $key_value) { 40 | print $key_name . " = " . $key_value . "
"; 41 | } 42 | ?> 43 |
44 | 45 | 46 | -------------------------------------------------------------------------------- /deploy/roles/postgresql-server/templates/config_param.j2: -------------------------------------------------------------------------------- 1 | mode=unattended 2 | unattendedmodeui=none 3 | create_samples={{ pp_create_samples }} 4 | databasemode={{ pp_databasemode }} 5 | extract-only={{ pp_extract_only }} 6 | installer-language={{ pp_installer_language }} 7 | 8 | autostart_pgagent={{ pp_autostart_pgagent }} 9 | autostart_pgbouncer={{ pp_autostart_pgbouncer }} 10 | pgbouncerport={{ pp_pgbouncerport }} 11 | install_server_monitor={{ pp_install_server_monitor }} 12 | 13 | datadir={{ pp_datadir }} 14 | prefix={{ pp_prefix }} 15 | debuglevel={{ pp_debuglevel }} 16 | serverport={{ pp_serverport }} 17 | server_utilization={{ pp_server_utilization }} 18 | serviceaccount={{ pp_serviceaccount }} 19 | servicename={{ pp_servicename }} 20 | workload_profile={{ pp_workload_profile }} 21 | xlogdir={{ pp_xlogdir }} 22 | 23 | superaccount={{ pp_superaccount }} 24 | superpassword={{ pp_superpassword }} 25 | webusername={{ pp_web_username }} 26 | webpassword={{ pp_web_password }} 27 | 28 | {% if pp_disable_components is defined %}disable-components={{ pp_disable_components }}{% endif %} 29 | -------------------------------------------------------------------------------- /deploy/roles/eod/tasks/install_eod.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Copy over the EOD bin file 3 | copy: 4 | src: "{{ eod_installer_directory }}/{{ eod_installer }}" 5 | dest: "/tmp/" 6 | when: custom_repo 7 | tags: 8 | - eod 9 | 10 | - name: Chmod EOD binary 11 | file: 12 | path: "/tmp/{{ eod_installer }}" 13 | mode: a+x 14 | when: custom_repo 15 | tags: 16 | - eod 17 | 18 | - name: Ensure that eod directory exists 19 | file: 20 | path: "{{ eod_install_directory }}" 21 | state: directory 22 | tags: 23 | - eod 24 | 25 | - name: Copy the eod expect script 26 | template: 27 | src: install_eod_server.exp 28 | dest: /tmp/install_eod_server.exp 29 | mode: a+x 30 | tags: 31 | - eod 32 | 33 | - name: run EOD expect script 34 | shell: /tmp/install_eod_server.exp 35 | when: custom_repo 36 | tags: 37 | - eod 38 | 39 | - name: run eod silent install 40 | shell: "{{ eod_install_directory }}/bin/install -s" 41 | when: custom_repo 42 | args: 43 | creates: "{{ eod_install_directory }}/bin/otecs" 44 | tags: 45 | - eod 46 | -------------------------------------------------------------------------------- /deploy/templates/enclave_proxy_restricted.php: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | CFPB Data Enclave - Restricted Area 6 | 7 | 28 | 29 | 30 | 31 | 35 | 36 |
37 | Restricted content found here! 38 | 39 | $value) { 41 | echo "$name: $value\n"; 42 | } 43 | ?> 44 |
45 | 46 | -------------------------------------------------------------------------------- /deploy/roles/pdfgrep/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Verify install directory 2 | file: 3 | path: "{{pdfgrep_install_directory}}/" 4 | state: directory 5 | mode: og+rx 6 | recurse: yes 7 | 8 | 9 | - name: Copy PDFGrep zip from artifacts to install directory 10 | copy: 11 | src: "{{pdfgrep_local_artifact_directory}}/{{pdfgrep_zip_file_name}}" 12 | dest: "{{pdfgrep_install_directory}}/{{pdfgrep_zip_file_name}}" 13 | when: custom_repo 14 | 15 | 16 | - name: Unzip lisence manager 17 | unarchive: 18 | src: "{{pdfgrep_install_directory}}/{{pdfgrep_zip_file_name}}" 19 | dest: "{{pdfgrep_install_directory}}/" 20 | 21 | 22 | - name: Set the install bin to be executable 23 | file: 24 | path: "{{pdfgrep_install_directory}}/{{pdfgrep_install_package_name}}" 25 | mode: a+x 26 | when: custom_repo 27 | 28 | 29 | - name: Run install bin 30 | shell: "./{{pdfgrep_install_package_name}} --mode silent" 31 | args: 32 | chdir: "{{ pdfgrep_install_directory }}" 33 | creates: "{{ _install_directory }}/" 34 | when: custom_repo 35 | -------------------------------------------------------------------------------- /.devcontainer/scripts/rebuild-docker-compose.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export WORKSPACE=${WORKSPACE:-/workspace} 3 | 4 | #inventory="$(cat $WORKSPACE/.devcontainer/scripts/tmp_inventory.csv | grep ssh_host | sed 's/ ansible_ssh_host=/,/' | sed 's/ .*$//')" 5 | inventory="$(cat $WORKSPACE/.devcontainer/scripts/tmp_inventory.csv | grep -v '^#' | sed 's/ ansible_ssh_host=/,/' | sed 's/ .*$//')" 6 | tmp_file="$WORKSPACE/.devcontainer/tmp_docker_compose.yml" 7 | 8 | IFS=$'\n' 9 | #echo $inventory 10 | cat $WORKSPACE/.devcontainer/docker-compose-base.yml >$tmp_file 11 | hosttemplate="$WORKSPACE/.devcontainer/docker-compose-host-template.yml" 12 | networks="$WORKSPACE/.devcontainer/docker-compose-networks.yml" 13 | while IFS= read -r line; do 14 | echo "... $line ..." 15 | IFS=', ' read -r -a array <<< "$line" 16 | 17 | hostname="${array[0]}" 18 | ipv4="${array[1]}" 19 | #appending network to bottom 20 | cat $hosttemplate | sed "s/HOSTNAME/$hostname/"|sed "s/IPV4/$ipv4/">>$tmp_file 21 | done <<< "$inventory" 22 | 23 | #appending network to bottom 24 | cat $networks >>$tmp_file 25 | cat $tmp_file >$WORKSPACE/.devcontainer/docker-compose.yml 26 | -------------------------------------------------------------------------------- /deploy/roles/stata/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | stata_install_directory: /usr/local/stata16 3 | stata_tarball: Stata16Linux64-Legacy.tar.gz 4 | stata_update_tarball: stata16update_linux64legacy.tar 5 | stata_update_directory: /usr/local/stata16/stata16update_linux64legacy 6 | stata_directory: /usr/local/stata 7 | stata_local_bin_directory: /artifacts 8 | stata_remote_bin_directory: /tmp 9 | stata_serial_number: STATA_SERIAL_NUMBER 10 | stata_code: STATE_CODE 11 | stata_authorization: STATA_AUTHORIZATION 12 | stata_line_one: Consumer Financial Protection Bureau 13 | stata_line_two: Washington, DC 14 | 15 | stata_odbc_mgr: unixodbc 16 | stata_processor: 4 17 | stata_site: "/home/work/shared_ado" 18 | stata_profile_dir: /usr/local/stata 19 | 20 | stata_patch_update: False 21 | 22 | stata_printers: 23 | - 1001_Konica ps "lpr -P 1001_Konica_1234 @" 24 | - 1002_Konica ps "lpr -P 1002_Konica_1234 @" 25 | -------------------------------------------------------------------------------- /deploy/roles/shared/handlers/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml - Handlers that are used by other roles should be placed here 2 | --- 3 | - include_tasks: rhel6.yml 4 | when: ansible_distribution_major_version == "6" 5 | 6 | - include_tasks: rhel7.yml 7 | when: ansible_distribution_major_version == "7" 8 | 9 | - name: restart iptables 10 | service: 11 | name: iptables 12 | state: restarted 13 | 14 | - name: restart rsyslog 15 | service: 16 | name: rsyslog 17 | state: restarted 18 | 19 | - name: restart rstudio-server 20 | command: restart rstudio-server 21 | 22 | - name: restart elasticsearch 23 | service: 24 | name: elasticsearch 25 | state: restarted 26 | 27 | - name: restart kibana 28 | service: 29 | name: kibana 30 | state: restarted 31 | 32 | - name: restart logstash 33 | service: 34 | name: logstash 35 | state: restarted 36 | 37 | - name: restart shiny-server 38 | command: "restart shiny-server" 39 | 40 | - name: restart apache 41 | shell: "service httpd24-httpd restart" 42 | 43 | - name: restart sas-server 44 | service: 45 | name: sas-studio 46 | state: restarted 47 | when: sas_server_installed | default(False) | bool 48 | -------------------------------------------------------------------------------- /deploy/roles/postgresql-server/templates/pgplus_env.sh.j2: -------------------------------------------------------------------------------- 1 | # EnterpriseDB shell environment loader 2 | # 3 | # Instructions: 4 | # This file contains additions to the user environment 5 | # that make accessing Postgres Plus Advanced Server 6 | # executables easier. 7 | # 8 | # To load the environment for a single user: 9 | # cp pgplus_env.sh /home/ 10 | # chown /home//pgplus_env.sh 11 | # vi /home//.bash_profile 12 | # At the bottom, add the line: 13 | # . /home//pgplus_env.sh 14 | # ( Note the '.' followed by a space ) 15 | 16 | # To load the environment for all users: 17 | # cp pgplus_env.sh /etc 18 | # vi /etc/profile 19 | # At the bottom, add the line: 20 | # . /etc/pgplus_env.sh 21 | # ( Note the '.' followed by a space ) 22 | 23 | # Environment 24 | 25 | export PATH={{ pp_prefix }}/{{ pp_install_path }}/bin:$PATH 26 | export PGHOME={{ pp_prefix }}/{{ pp_install_path }} 27 | export PGDATA={{ pp_datadir }} 28 | export PGDATABASE={{ pp_serviceaccount }} 29 | # export PGUSER=postgres 30 | export PGPORT={{ pp_serverport }} 31 | export PGLOCALEDIR={{ pp_prefix }}/{{ pp_install_path }}/share/locale 32 | -------------------------------------------------------------------------------- /deploy/deploy_mesos_master.yml: -------------------------------------------------------------------------------- 1 | # deploy_mesos_master.yml 2 | --- 3 | - hosts: mesos_master 4 | become: yes 5 | become_method: sudo 6 | tasks: 7 | - name: Shutdown all but one of the Mesos Master instances 8 | service: 9 | name: mesos-master 10 | state: stopped 11 | when: mesos_upgrade and 'mesos_master' in groups and inventory_hostname != groups['mesos_master'][0] 12 | 13 | - name: Shutdown all but one of the Marathon instances 14 | service: 15 | name: marathon 16 | state: stopped 17 | when: marathon_upgrade and 'mesos_master' in groups and inventory_hostname != groups['mesos_master'][0] 18 | 19 | - hosts: mesos_master 20 | become: yes 21 | become_method: sudo 22 | serial: 1 23 | max_fail_percentage: 30 24 | roles: 25 | - {role: iptables, when: "iptables_config"} 26 | - epel # This is for rhel6 pip 27 | - common 28 | - mesos_master 29 | tasks: 30 | # used to install kazoo 31 | - name: Install pip 32 | yum: 33 | name: python-pip 34 | state: present 35 | 36 | - name: Install Kazoo (required by Znode ansible module) 37 | pip: 38 | name: kazoo 39 | umask: "0022" 40 | -------------------------------------------------------------------------------- /deploy/roles/python36-scl/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Install Python3.6 environment from Centos / Redhat Software Collections 5 | 6 | Requirements 7 | ------------ 8 | 9 | libselinux-python may be required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | ansible_distribution 15 | 16 | If "CentOS" will install the CentOS SCL. SCL assumed to already be installed for RHEL 17 | 18 | set_python36_default_python: [False] 19 | 20 | If True will set /etc/profile.d to make python36 the default python for users 21 | 22 | python3 tag will run all tasks in this role 23 | 24 | 25 | Dependencies 26 | ------------ 27 | 28 | python-libs role python3_pip_binary should point to pip binary installed by this role and depends on python being installed 29 | 30 | Example Playbook 31 | ---------------- 32 | 33 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 34 | 35 | - hosts: servers 36 | roles: 37 | - python36-scl 38 | 39 | - hosts: servers 40 | roles: 41 | - { role: python36-scl, set_python36_default_python: True } 42 | 43 | 44 | Author Information 45 | ------------------ 46 | 47 | CFPB 48 | -------------------------------------------------------------------------------- /deploy/deploy_db_server.yml: -------------------------------------------------------------------------------- 1 | # deploy_db_server.yml 2 | --- 3 | - hosts: db_server 4 | become: yes 5 | become_method: sudo 6 | roles: 7 | - {role: iptables, when: "iptables_config"} 8 | - common 9 | - {role: epel, when: "use_epel and not 'production' in group_names"} 10 | - {role: log-courier, when: "install_logstash"} 11 | - role: jdk 12 | - role: postgresql-server 13 | post_tasks: 14 | - name: Ensure file transfer folder exists 15 | file: 16 | dest: "{{ pp_file_transfer_dir }}" 17 | state: directory 18 | mode: 0755 19 | owner: postgres 20 | group: postgres 21 | when: pp_file_transfer_dir is defined 22 | 23 | - name: Create symlink from PostgreSQL data directory 24 | file: 25 | dest: "{{ pp_datadir }}/file_transfers" 26 | src: "{{ pp_file_transfer_dir }}" 27 | state: link 28 | when: pp_file_transfer_dir is defined 29 | 30 | - name: Ensure PG is started 31 | service: 32 | name: "{{ pp_servicename }}" 33 | state: started 34 | enabled: yes 35 | tags: 36 | - postgresql-server 37 | 38 | - hosts: db_server 39 | roles: 40 | - role: postgresql-server-config 41 | 42 | -------------------------------------------------------------------------------- /test/test_postgres.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: db_terminal 3 | tasks: 4 | - name: Install requirements for postgresql Ansible tasks 5 | sudo: yes 6 | yum: 7 | name: "{{ item }}" 8 | state: present 9 | with_items: 10 | - python-psycopg2 11 | 12 | - name: Ensure that a connection can be made 13 | postgresql_exec: 14 | login_user: "{{ pp_dba_username }}" 15 | login_password: "{{ pp_dba_password }}" 16 | login_host: "{{ hostvars[groups['db_server'][0]]['ansible_ssh_host'] }}" 17 | port: "{{ pp_serverport }}" 18 | db: "postgres" 19 | script: "SELECT 1;" 20 | 21 | - name: Run custom test scripts 22 | postgresql_exec: 23 | login_user: "{{ pp_dba_username }}" 24 | login_password: "{{ pp_dba_password }}" 25 | login_host: "{{ hostvars[groups['db_server'][0]]['ansible_ssh_host'] }}" 26 | port: "{{ pp_serverport }}" 27 | db: "postgres" 28 | script: "{{ item.script }}" 29 | register: test_output 30 | with_items: pp_custom_test_scripts 31 | 32 | - debug: 33 | msg: "{{ test_output }}" 34 | 35 | - name: Assert results are correct 36 | assert: 37 | that: "{{ item.assertion }}" 38 | with_items: pp_custom_test_scripts 39 | -------------------------------------------------------------------------------- /deploy/roles/logstash-config/templates/20-elasticsearch-input.conf.j2: -------------------------------------------------------------------------------- 1 | filter { 2 | if [type] == "elasticsearch" { 3 | grok { 4 | match => [ "message", "\[%{TIMESTAMP_ISO8601:timestamp}\]\[%{DATA:level}%{SPACE}\]\[%{DATA:source}%{SPACE}\]%{SPACE}(?(.|\r|\n)*)" ]"] 5 | overwrite => [ "message" ] 6 | } 7 | if "_grokparsefailure" not in [tags] { 8 | grok { # regular logs 9 | match => [ 10 | "message", "^\[%{DATA:node}\] %{SPACE}\[%{DATA:index}\]%{SPACE}(?(.|\r|\n)*)", 11 | "message", "^\[%{DATA:node}\]%{SPACE}(?(.|\r|\n)*)" 12 | ] 13 | tag_on_failure => [] 14 | } 15 | } 16 | grok { # slow logs 17 | match => [ "message", "took\[%{DATA:took}\], took_millis\[%{NUMBER:took_millis}\], types\[%{DATA:types}\], stats\[%{DATA:stats}\], search_type\[%{DATA:search_type}\], total_shards\[%{NUMBER:total_shards}\], source\[%{DATA:source_query}\], extra_source\[%{DATA:extra_source}\]," ] 18 | tag_on_failure => [] 19 | add_tag => [ "elasticsearch-slowlog" ] 20 | } 21 | } 22 | date { 23 | "match" => ["timestamp", "YYYY-MM-dd HH:mm:ss,SSS"] 24 | target => "@timestamp" 25 | } 26 | mutate { 27 | remove_field => ["timestamp"] 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /deploy/roles/python27-scl/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for python27-scl 3 | 4 | - name: Install CentOS Software Collection Library (SCL) when on CentOS 5 | yum: 6 | name: centos-release-scl 7 | state: present 8 | when: ansible_distribution == "CentOS" 9 | tags: 10 | - python2 11 | 12 | - name: Install python selinux bindings 13 | yum: 14 | name: libselinux-python 15 | state: present 16 | tags: 17 | - python2 18 | 19 | - name: Install python v2.7 from SCL 20 | yum: 21 | name: "{{ item }}" 22 | state: present 23 | with_items: 24 | - python27 25 | - python27-python-setuptools 26 | # - python27-scldevel 27 | tags: 28 | - python2 29 | 30 | # Nedded if /usr/local/bin python2.7 removed from env 31 | #- name: Add ld config for python2 scl library 32 | # copy: 33 | # content: "/opt/rh/python27/root/usr/lib64" 34 | # dest: /etc/ld.so.conf.d/python27-scl.conf 35 | # tags: 36 | # - python2 37 | # 38 | #- name: Run ldconfig for SCL python2 lib 39 | # command: ldconfig 40 | # tags: 41 | # - python2 42 | 43 | - name: Make python 2.7 the default python for this machine 44 | template: 45 | src: scl_enable-python27.sh.j2 46 | dest: /etc/profile.d/scl_enable-python27.sh 47 | when: set_python27_default_python 48 | tags: 49 | - python2 50 | -------------------------------------------------------------------------------- /deploy/config_nginx_proxy_server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: nginx_server 3 | become: yes 4 | become_method: sudo 5 | handlers: 6 | - import_tasks: roles/nginx/handlers/main.yml 7 | tasks: 8 | - name: Install git 9 | yum: 10 | name: git 11 | state: present 12 | 13 | - name: Ensure configuration directory exists 14 | file: 15 | dest: /srv/www/proxy/repo 16 | owner: nginx 17 | group: nginx 18 | mode: 0600 19 | state: directory 20 | 21 | - name: Pull down custom proxy configs 22 | git: 23 | repo: "{{ nginx_proxy_repo }}" 24 | dest: "/srv/www/proxy/repo/" 25 | force: yes 26 | version: "{{ git_environment }}" 27 | when: nginx_proxy_repo is defined 28 | notify: 29 | - restart nginx 30 | 31 | - name: Copy over configuration files if not using repo 32 | copy: 33 | src: "{{ nginx_proxy_conf_src_dir }}/" 34 | dest: "/srv/www/proxy/repo/" 35 | when: nginx_proxy_conf_src_dir is defined and not nginx_proxy_repo is defined 36 | notify: 37 | - restart nginx 38 | 39 | - name: Ensure permissions are correct on all config files 40 | file: 41 | dest: /srv/www/proxy/repo 42 | owner: nginx 43 | group: nginx 44 | mode: 0600 45 | recurse: yes 46 | state: directory 47 | -------------------------------------------------------------------------------- /deploy/roles/gauss/templates/install_gauss.exp: -------------------------------------------------------------------------------- 1 | #! /usr/bin/expect 2 | 3 | # run the actual installer bin 4 | spawn {{gauss_bin_directory}}/ginstall 5 | 6 | # set a 2 minute timeout 7 | set timeout 120 8 | 9 | set force_conservative 0 ;# set to 1 to force conservative mode even if 10 | ;# script wasn’t run conservatively originally 11 | if {$force_conservative} { 12 | set send_slow {1 .1} 13 | proc send {ignore arg} { 14 | sleep .1 15 | exp_send -s -- $arg 16 | } 17 | } 18 | 19 | 20 | # set up your expects 21 | expect_background { 22 | "Choose installation type" { 23 | send "a\r" 24 | exp_continue 25 | } 26 | "Press Enter to continue..." { 27 | send "\r" 28 | exp_continue 29 | } 30 | "Read Carefully Before Installing the Software Product" { 31 | send "q" 32 | exp_continue 33 | } 34 | "Do you understand and accept the license agreement" { 35 | send "yes\r" 36 | exp_continue 37 | } 38 | "Install for \\\[S\\\]ingle-user or \\\[M\\\]ulti-user" { 39 | send "m\r" 40 | exp_continue 41 | } 42 | "Leave shared libraries in install dir or relocate" { 43 | send "l\r" 44 | exp_continue 45 | } 46 | "*** PLEASE READ THE FOLLOWING MESSAGES! ***" { 47 | exit 0 48 | } 49 | } 50 | # interact return 51 | expect eof -------------------------------------------------------------------------------- /deploy/roles/elasticsearch-config/templates/shield_logging.yml.j2: -------------------------------------------------------------------------------- 1 | # default configuration for the audit trail logs 2 | # 3 | # Error Levels: 4 | # 5 | # ERROR authentication_failed, access_denied, tampered_request, connection_denied 6 | # WARN authentication_failed, access_denied, tampered_request, connection_denied, anonymous_access 7 | # INFO authentication_failed, access_denied, tampered_request, connection_denied, anonymous_access, access_granted 8 | # DEBUG doesn't output additional entry types beyond INFO, but extends the information emmitted for each entry 9 | # TRACE authentication_failed, access_denied, tampered_request, connection_denied, anonymous_access, access_granted, connection_granted, authentication_failed []. In addition, internal system requests (self-management requests triggered by elasticsearch itself) will also be logged for "access_granted" entry type. 10 | # 11 | 12 | logger: 13 | shield.audit.logfile: {{ es_shield_log_level }}, access_log 14 | 15 | additivity: 16 | shield.audit.logfile: false 17 | 18 | appender: 19 | 20 | access_log: 21 | type: {{ es_shield_log_type }} 22 | file: ${path.logs}/${cluster.name}-access.log 23 | {% for config_item in es_shield_log_options %} 24 | {{ config_item.option }}: {{ config_item.value }} 25 | {% endfor %} 26 | layout: 27 | type: pattern 28 | conversionPattern: "[%d{ISO8601}] %m%n" 29 | -------------------------------------------------------------------------------- /deploy/roles/odbc/templates/odbcinst.ini: -------------------------------------------------------------------------------- 1 | [PostgreSQL] 2 | Description = ODBC for PostgreSQL 3 | Driver = /usr/lib/pgsql/psqlodbc.so 4 | Setup = /usr/lib/libodbcpsqlS.so 5 | Driver64 = /usr/lib/pgsql/psqlodbc.so 6 | Setup64 = /usr/lib64/libodbcpsqlS.so 7 | FileUsage = 1 8 | 9 | [MySQL] 10 | Description = ODBC for MySQL 11 | Driver = /usr/lib/libmyodbc5.so 12 | Setup = /usr/lib/libodbcmyS.so 13 | Driver64 = /usr/lib64/libmyodbc5.so 14 | Setup64 = /usr/lib64/libodbcmyS.so 15 | FileUsage = 1 16 | 17 | [ODBC Driver 11 for SQL Server] 18 | Description = Microsoft ODBC Driver 11 for SQL Server 19 | Driver = /opt/microsoft/msodbcsql/lib64/libmsodbcsql-11.0.so.2270.0 20 | Threading = 1 21 | FileUsage = 1 22 | 23 | [FreeTDS] 24 | Description = ODBC for Microsoft SQL 25 | Driver = /usr/lib64/libtdsodbc.so 26 | Setup = /usr/lib64/libtdsS.so 27 | FileUsage = 2 28 | Threading = 2 29 | useNTLMv2 = True 30 | 31 | {% for driver in odbcinst_entries %} 32 | [{{ driver.drivername }}] 33 | {% for key, value in driver.attributes.items() %} 34 | {{ key }} = {{ value }} 35 | {% endfor %} 36 | {% endfor %} -------------------------------------------------------------------------------- /deploy/roles/clouseau/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get clouseau code 3 | git: 4 | repo: https://github.com/cfpb/clouseau 5 | dest: /opt/clouseau 6 | force: yes 7 | tags: 8 | -python2 9 | 10 | #- name: Create links 11 | # file: 12 | # path: "/usr/local/bin/{{ item }}" 13 | # src: "/opt/clouseau/bin/{{ item }}" 14 | # state: link 15 | # with_items: 16 | # - clouseau 17 | # - clouseau_thin 18 | 19 | - name: Check for python2 installation 20 | set_fact: 21 | python_pip_binaries: "{{ python_pip_binaries }} + [ '{{ python_pip_bin }}' ]" 22 | python2_pip_bin: "{{ python_pip_bin }}" 23 | when: "install_python and custom_repo" 24 | tags: 25 | - python2 26 | 27 | - name: Check for python2 scl installation 28 | set_fact: 29 | python_pip_binaries: "{{ python_pip_binaries }} + [ '{{ python_scl_pip_bin }}' ]" 30 | python2_pip_bin: "{{ python_scl_pip_bin }}" 31 | python_pip_ld_path: "{{ python_pip_ld_path }} + [ '{{ python_scl_pip_ld_path }}' ]" 32 | when: "install_python and not custom_repo" 33 | tags: 34 | - python2 35 | 36 | - name: Install clouseau 37 | pip: 38 | executable: "{{ python2_pip_bin }}" # /opt/rh/python27/root/usr/bin/pip2.7 39 | name: /opt/clouseau 40 | umask: "0022" 41 | environment: 42 | LD_LIBRARY_PATH: "{{ python_pip_ld_path | join(':') }}" 43 | when: install_python 44 | tags: 45 | -python2 46 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // If you want to run as a non-root user in the container, see .devcontainer/docker-compose.yml. 2 | { 3 | "name": "Docker in Docker Compose", 4 | "dockerComposeFile": "docker-compose.yml", 5 | "service": "docker-in-docker", 6 | "workspaceFolder": "/workspace", 7 | 8 | // Use 'settings' to set *default* container specific settings.json values on container create. 9 | // You can edit these settings after create using File > Preferences > Settings > Remote. 10 | "settings": { 11 | "terminal.integrated.shell.linux": "/bin/bash" 12 | }, 13 | 14 | // Uncomment the next line if you want start specific services in your Docker Compose config. 15 | // "runServices": [], 16 | 17 | // Uncomment the next line if you want to keep your containers running after VS Code shuts down. 18 | // "shutdownAction": "none", 19 | 20 | // Uncomment the next line to run commands after the container is created. 21 | // "postCreateCommand": "docker --version" 22 | 23 | // Uncomment the next line to have VS Code connect as an existing non-root user in the container. See 24 | // https://aka.ms/vscode-remote/containers/non-root for details on adding a non-root user if none exist. 25 | // "remoteUser": "vscode", 26 | 27 | // Add the IDs of extensions you want installed when the container is created in the array below. 28 | "extensions": [ 29 | "ms-azuretools.vscode-docker" 30 | ] 31 | } -------------------------------------------------------------------------------- /deploy/roles/eod/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | #==================# 3 | ### EoD SETTINGS ### 4 | #==================# 5 | install_desktop: False 6 | eod_restart: False 7 | eod_install: False 8 | eod_upgrade: False 9 | eod_installer: ecs-13.8.5-linux-x64.bin 10 | eod_upgrader: 11 | eod_installer_directory: '/artifacts' 12 | 13 | eod_client_ports: 14 | - 5500 15 | - 8406 16 | - 8407 17 | - 6000:6025 18 | 19 | eod_client_ports_udp: 20 | - 177 21 | 22 | eod_admins: 23 | - vagrant 24 | 25 | eod_install_directory: '/usr/local/eod' 26 | eod_cm_password: PASSWORD 27 | 28 | # Define this variable to setup a cluster with a common datastore 29 | # The datastore must be a shared volume accessible by all EOD nodes. 30 | # eod_datastore_directory: 31 | 32 | eod_datastore_password: PASSWORD 33 | eod_cm_preferred_pcm: "{{ ansible_ssh_host }}" 34 | eod_cluster_name: "{{ ansible_ssh_host }}" 35 | xdmc_query_host: "{{ ansible_ssh_host }}" 36 | 37 | eod_use_license_server: 0 38 | eod_license_server_host: 39 | eod_license_server_cache: 40 | 41 | # TODO: you can put all the keys you want to here 42 | eod_license_keys: 43 | - AAAA-BBBBBB-CCCCCC-DDDD-EEEEEE-FFF -0123456789 123 44 | 45 | # GDM Settings 46 | # This example disables the login user list 47 | gconf_settings: 48 | - 49 | setting: "/apps/gdm/simple-greeter/disable_user_list" 50 | type: "bool" 51 | value: "true" 52 | 53 | eod_greeter_include: false 54 | -------------------------------------------------------------------------------- /deploy/roles/nodejs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Remove system installs of node/npm 3 | yum: 4 | name: "{{ item }}" 5 | state: absent 6 | with_items: 7 | - nodejs 8 | - npm 9 | tags: 10 | - nodejs 11 | 12 | - name: Install dependencies 13 | yum: 14 | name: "{{ item }}" 15 | state: present 16 | with_items: 17 | - git 18 | - gcc 19 | - gcc-c++ 20 | - make 21 | - openssl-devel 22 | - libselinux-python 23 | tags: 24 | - nodejs 25 | 26 | - name: Install NVM 27 | git: 28 | repo: https://github.com/creationix/nvm.git 29 | version: v0.33.2 30 | dest: "{{ nvm_install_path }}" 31 | tags: 32 | - nodejs 33 | 34 | - name: Activate NVM 35 | template: 36 | src: nvm.sh.j2 37 | dest: /etc/profile.d/nvm.sh 38 | owner: root 39 | group: root 40 | mode: 0644 41 | tags: 42 | - nodejs 43 | 44 | - name: Install latest version of Node 45 | shell: "source {{ nvm_install_path }}/nvm.sh && nvm install {{ node_version }}" 46 | register: nvm_install_status 47 | changed_when: not 'is already installed' in nvm_install_status.stderr 48 | tags: 49 | - nodejs 50 | 51 | - name: Create symlinks 52 | file: 53 | state: link 54 | src: "{{ nvm_install_path }}/versions/node/v{{ node_version }}/bin/{{ item }}" 55 | dest: "/usr/local/bin/{{ item }}" 56 | with_items: 57 | - node 58 | - npm 59 | tags: 60 | - nodejs 61 | -------------------------------------------------------------------------------- /deploy/config_proxy_server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: proxy_server 3 | become: yes 4 | become_method: sudo 5 | handlers: 6 | - import_tasks: roles/shared/handlers/main.yml 7 | tasks: 8 | - name: Install git 9 | yum: 10 | name: git 11 | state: present 12 | 13 | - name: Pull down custom proxy configs 14 | git: 15 | repo: "{{ enclave_proxy_repo }}" 16 | dest: "/srv/www/enclave_proxy/repo/" 17 | force: yes 18 | version: "{{ git_environment }}" 19 | when: enclave_proxy_repo is defined 20 | register: gitresult 21 | 22 | - name: Ensure permissions are correct 23 | file: 24 | dest: /srv/www/enclave_proxy/repo 25 | owner: apache 26 | group: apache 27 | mode: 0600 28 | recurse: yes 29 | state: directory 30 | 31 | - name: Test configuration 32 | command: "service httpd24-httpd configtest" 33 | register: apache_result 34 | ignore_errors: yes 35 | 36 | - name: Reset git commit if the test failed 37 | git: 38 | repo: "{{ enclave_proxy_repo }}" 39 | dest: "/srv/www/enclave_proxy/repo/" 40 | force: yes 41 | version: "{{ gitresult.before }}" 42 | when: enclave_proxy_repo is defined and apache_result.rc != 0 43 | 44 | - name: Gracefully restart apache 45 | command: "service httpd24-httpd graceful" 46 | when: apache_result.rc == 0 47 | -------------------------------------------------------------------------------- /deploy/roles/eod/templates/xorg.conf: -------------------------------------------------------------------------------- 1 | #org configuration created by system-config-display 2 | 3 | Section "ServerLayout" 4 | Identifier "single head configuration" 5 | Screen 0 "Screen0" 0 0 6 | InputDevice "Keyboard0" "CoreKeyboard" 7 | EndSection 8 | 9 | Section "Module" 10 | Load "dri" 11 | Load "glx" 12 | EndSection 13 | 14 | Section "InputDevice" 15 | Identifier "Keyboard0" 16 | Driver "kbd" 17 | Option "XkbModel" "pc105" 18 | Option "XkbLayout" "us" 19 | EndSection 20 | 21 | Section "Monitor" 22 | Identifier "Monitor0" 23 | ModelName "Monitor 1024x768" 24 | ### Comment all HorizSync and VertSync values to use DDC: 25 | HorizSync 31.5 - 61.0 26 | VertRefresh 50.0 - 75.0 27 | Option "dpms" 28 | EndSection 29 | 30 | Section "Device" 31 | Identifier "card0" 32 | Driver "vesa" 33 | Option "NoAccel" "false" 34 | EndSection 35 | 36 | Section "Screen" 37 | Identifier "Screen0" 38 | Device "Videocard0" 39 | Monitor "Monitor0" 40 | DefaultDepth 24 41 | SubSection "Display" 42 | Viewport 0 0 43 | Depth 24 44 | Modes "1024x768" "800x600" "640x480" 45 | EndSubSection 46 | EndSection 47 | 48 | Section "DRI" 49 | Mode 0666 50 | EndSection -------------------------------------------------------------------------------- /deploy/roles/nginx/defaults/main.yml: -------------------------------------------------------------------------------- 1 | 2 | # file: aurora/deploy/roles/nginx/defaults/main.yml 3 | --- 4 | 5 | deploy_user: vagrant 6 | 7 | nginx_listen: 80 8 | 9 | nginx_user: nginx 10 | nginx_group: nginx 11 | 12 | nginx_dir: "/etc/nginx" 13 | nginx_log_dir: "/var/log/nginx" 14 | nginx_error_log: "/var/log/nginx/error.log" 15 | nginx_error_log_format: warn 16 | nginx_access_log: "/var/log/nginx/access.log" 17 | nginx_access_log_format: main 18 | nginx_pid: "/var/run/nginx.pid" 19 | nginx_conf_dir: "/etc/nginx/conf.d" 20 | nginx_certificate_files: {} 21 | 22 | # nginx.conf 23 | nginx_sendfile: "on" 24 | nginx_tcp_nopush: "on" 25 | nginx_tcp_nodelay: "on" 26 | nginx_keepalive_timeout: "0" 27 | nginx_worker_processes: 1 28 | nginx_worker_connections: 1024 29 | nginx_client_max_body_size: "1k" 30 | nginx_client_header_buffer_size: "1k" 31 | nginx_client_body_buffer_size: "1k" 32 | nginx_large_client_buffers: "2 1k" 33 | 34 | # disabling version number display 35 | nginx_server_tokens: 'off' 36 | 37 | # Blocking download agents and configuring nginx to return error message 38 | nginx_block_download_agents: "$http_user_agent ~* msnbot|scrapbot" 39 | nginx_block_robots: "$http_user_agent ~* LWP::Simple|BBBike|wget" 40 | nginx_webcrawler_protect: "(.*).test.dev" 41 | 42 | # Nginx initial state 43 | nginx_state: "stopped" 44 | nginx_enable: "no" 45 | 46 | # Access restrictions 47 | nginx_access_list: 48 | - 49 | type: allow 50 | address: all 51 | -------------------------------------------------------------------------------- /deploy/roles/nginx/templates/nginx.conf.j2: -------------------------------------------------------------------------------- 1 | user {{ nginx_user }}; 2 | 3 | error_log {{ nginx_error_log }} {{ nginx_error_log_format }}; 4 | pid {{ nginx_pid }}; 5 | 6 | worker_processes {{ nginx_worker_processes }}; 7 | 8 | events { 9 | worker_connections {{ nginx_worker_connections }}; 10 | } 11 | 12 | http { 13 | {% for restriction in nginx_access_list %} 14 | {{ restriction.type }} {{ restriction.address }}; 15 | {% endfor %} 16 | 17 | include /etc/nginx/mime.types; 18 | default_type application/octet-stream; 19 | 20 | log_format main '$remote_addr - $remote_user [$time_local] "$status" ' 21 | '"$request" $body_bytes_sent "$http_referer" ' 22 | '"$http_user_agent" $server_port $host '; 23 | 24 | access_log {{ nginx_access_log }} {{ nginx_access_log_format }}; 25 | 26 | sendfile {{ nginx_sendfile }}; 27 | tcp_nopush {{ nginx_tcp_nopush }}; 28 | tcp_nodelay {{ nginx_tcp_nodelay }}; 29 | 30 | keepalive_timeout {{ nginx_keepalive_timeout }}; 31 | 32 | server_tokens {{ nginx_server_tokens }}; 33 | 34 | client_max_body_size {{ nginx_client_max_body_size }}; 35 | client_header_buffer_size {{ nginx_client_header_buffer_size }}; 36 | client_body_buffer_size {{ nginx_client_body_buffer_size }}; 37 | large_client_header_buffers {{ nginx_large_client_buffers }}; 38 | 39 | include {{ nginx_conf_dir }}/*.conf; 40 | } 41 | -------------------------------------------------------------------------------- /deploy/roles/pycharm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Download pycharm to remote host 3 | shell: "mkdir -p /var/cache/downloads; if [[ ! -f /var/cache/downloads/{{ pycharm_download_basename }} ]]; then curl -L --silent --show-error {{ pycharm_download_url }} -o /var/cache/downloads/{{ pycharm_download_basename }} ; fi" 4 | tags: 5 | - pycharm 6 | 7 | - name: Extract pycharm to destination 8 | unarchive: 9 | copy: no 10 | src: /var/cache/downloads/{{ pycharm_download_basename }} 11 | dest: /opt/ 12 | mode: 0644 13 | creates: /opt/{{ pycharm_path }} 14 | tags: 15 | - pycharm 16 | 17 | - name: Ensure read permissions are set 18 | shell: "chmod a+r /opt/{{ pycharm_path }}/ -R" 19 | tags: 20 | - pycharm 21 | 22 | - name: Ensure directories can be displayed 23 | shell: "find /opt/{{ pycharm_path }}/ -type d -exec chmod a+x {} +" 24 | tags: 25 | - pycharm 26 | 27 | - name: Ensure executables are executable 28 | shell: "chmod a+x /opt/{{ pycharm_path }}/bin/{{ item }}" 29 | with_items: 30 | - "pycharm.sh" 31 | - "inspect.sh" 32 | - "fsnotifier" 33 | - "fsnotifier64" 34 | tags: 35 | - pycharm 36 | 37 | - name: Create syslinks for executables 38 | file: 39 | src: "/opt/{{ pycharm_path }}/bin/{{ item }}.sh" 40 | dest: "/usr/local/bin/{{ item }}" 41 | state: link 42 | force: yes 43 | with_items: 44 | - "pycharm" 45 | - "inspect" 46 | tags: 47 | - pycharm 48 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Guidance on how to contribute 2 | 3 | > All contributions to this project will be released under the CC0 public domain 4 | > dedication. By submitting a pull request or filing a bug, issue, or 5 | > feature request, you are agreeing to comply with this waiver of copyright interest. 6 | > Details can be found in our [TERMS](TERMS.md) and [LICENCE](LICENSE). 7 | 8 | 9 | There are two primary ways to help: 10 | - Using the issue tracker, and 11 | - Changing the code-base. 12 | 13 | 14 | ## Using the issue tracker 15 | 16 | Use the issue tracker to suggest feature requests, report bugs, and ask questions. 17 | This is also a great way to connect with the developers of the project as well 18 | as others who are interested in this solution. 19 | 20 | Use the issue tracker to find ways to contribute. Find a bug or a feature, mention in 21 | the issue that you will take on that effort, then follow the _Changing the code-base_ 22 | guidance below. 23 | 24 | 25 | ## Changing the code-base 26 | 27 | Generally speaking, you should fork this repository, make changes in your 28 | own fork, and then submit a pull-request. All new code should have associated unit 29 | tests that validate implemented features and the presence or lack of defects. 30 | Additionally, the code should follow any stylistic and architectural guidelines 31 | prescribed by the project. In the absence of such guidelines, mimic the styles 32 | and patterns in the existing code-base. 33 | -------------------------------------------------------------------------------- /deploy/deploy_kerberos_server.yml: -------------------------------------------------------------------------------- 1 | # kerberos_server.yml - Mock server to test Kerberos authentication 2 | --- 3 | - hosts: all 4 | become: yes 5 | become_method: sudo 6 | roles: 7 | - {role: puppet_disable, when: "disable_puppet"} 8 | - common 9 | - iptables 10 | - epel 11 | - ntp 12 | - openldap 13 | - kerberos_server 14 | - {role: log-courier, when: "install_logstash"} 15 | 16 | tasks: 17 | - name: Add LDAP to Kerberos 18 | command: kadmin -p {{ krb_service_user }}/admin -w {{ krb_service_user_password }} -q "addprinc -randkey ldap/{{ ansible_fqdn }}" 19 | args: 20 | creates: /etc/openldap/ldap.keytab 21 | 22 | - name: Generate keytab for LDAP 23 | command: kadmin -p {{ krb_service_user }}/admin -w {{ krb_service_user_password }} -q "ktadd -k /etc/openldap/ldap.keytab ldap/{{ ansible_fqdn }}" 24 | args: 25 | creates: /etc/openldap/ldap.keytab 26 | 27 | - name: Update keytab file settings 28 | file: 29 | dest: /etc/openldap/ldap.keytab 30 | owner: ldap 31 | group: ldap 32 | 33 | - name: Add keytab file to slapd's settings 34 | lineinfile: 35 | dest: /etc/default/slapd 36 | line: "export KRB5_KTNAME=/etc/ldap/ldap.keytab" 37 | regexp: "^export KRB5_KTNAME=" 38 | state: present 39 | create: yes 40 | 41 | - name: Restart slapd 42 | service: 43 | name: slapd 44 | state: restarted 45 | -------------------------------------------------------------------------------- /deploy/group_vars/citus_worker/main.yml: -------------------------------------------------------------------------------- 1 | #--- 2 | #pg_hba_settings: 3 | # - 4 | # comment: '"local" is for Unix domain socket connections only' 5 | # context: local 6 | # db: all 7 | # user: all 8 | # address: '' 9 | # ip_mask: '' 10 | # auth_method: md5 11 | # auth_options: '' 12 | # - 13 | # comment: 'Postgres user connection' 14 | # context: host 15 | # db: all 16 | # user: postgres 17 | # address: '10.0.1.0/24' 18 | # ip_mask: '' 19 | # auth_method: md5 20 | # auth_options: '' 21 | # - 22 | # comment: 'replication user connection:' 23 | # context: host 24 | # db: replication 25 | # user: replication 26 | # address: '10.0.1.0/24' 27 | # ip_mask: '' 28 | # auth_method: md5 29 | # auth_options: '' 30 | # - 31 | # comment: 'Ansible connection (must be last of the host/postgres entries)' 32 | # context: host 33 | # db: all 34 | # user: postgres 35 | # address: 'localhost' 36 | # ip_mask: '' 37 | # auth_method: md5 38 | # auth_options: '' 39 | # - 40 | # comment: 'IPv4 local connections:' 41 | # context: host 42 | # db: all 43 | # user: all 44 | # address: '10.0.1.0/24' 45 | # ip_mask: '' 46 | # auth_method: trust 47 | # auth_options: '' 48 | # - 49 | # comment: 'IPv6 local connections:' 50 | # context: host 51 | # db: all 52 | # user: all 53 | # address: '::1/128' 54 | # ip_mask: '' 55 | # auth_method: trust 56 | # auth_options: '' 57 | -------------------------------------------------------------------------------- /deploy/roles/spark/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | spark_mirror: "http://archive.apache.org/dist/spark/spark-1.5.2/spark-1.5.2-bin-hadoop2.6.tgz" 3 | spark_conf_dir: "/etc/spark" 4 | # Spark extraction folder 5 | spark_usr_parent_dir: "/usr/lib" 6 | spark_main_dir: "/usr/lib/spark-1.5.2-bin-hadoop2.6" 7 | 8 | spark_lib_dir: "/var/lib/spark" 9 | spark_log_dir: "/var/log/spark" 10 | spark_run_dir: "/var/run/spark" 11 | spark_user: "spark" 12 | spark_user_groups: [] 13 | spark_user_shell: "/bin/false" 14 | spark_source_file: "/etc/profile.d/spark.sh" 15 | 16 | # Spark uses log4j for logging. 17 | # The valid log levels are "ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN" 18 | spark_log_level: WARN 19 | spark_bin_dir: "/usr/bin" 20 | spark_secret: "test" 21 | spark_auth: "TRUE" 22 | spark_num_cores: 4 23 | spark_executor_uri: "http://archive.apache.org/dist/spark/spark-1.5.2/spark-1.5.2-bin-hadoop2.6.tgz" 24 | spark_execute_user: "mesagent" 25 | spark_libprocess_port: 8100 26 | spark_driver_port: 8099 27 | spark_blockManager_port: 32000 28 | 29 | spark_master: mesos://zk://10.0.1.31:2181,10.0.1.32:2181,10.0.1.33:2181/mesos 30 | spark_mesos_principal: "username" 31 | spark_mesos_secret: "password" 32 | mesos_native_lib_path: /usr/lib/mesos/libmesos.so 33 | -------------------------------------------------------------------------------- /deploy/roles/citus_coordinator/templates/archive-wal-file.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Generated at {{ ansible_date_time.iso8601 }} for host {{ inventory_hostname }} 3 | # Use like this (in /var/lib/pgsql/10/data/postgresql.conf): 4 | # archive_command = '/var/lib/pgsql/10/data/archive-wal-file %p %f' 5 | 6 | set -eux # what else should we set? pipefail? 7 | 8 | if [[ -z "$1" ]]; then 9 | echo "$0: must supply full path and base filename as command-line parameters" 10 | exit 1; 11 | fi; 12 | if [[ -z "$2" ]]; then 13 | echo "$0: missing base filename (second command-line parameter)" 14 | exit 2; 15 | fi; 16 | 17 | DFMT="%Y-%m-%d %T %Z" 18 | echo $(date +"$DFMT") "archive-wal-file started..." 19 | 20 | fullpath="$1" 21 | filename="$2" 22 | # Note: this is the same directory we pull the archives FROM on the standby cooridnator 23 | ARCHIVE_DIR="{{ pp_datadir }}/archives" 24 | # Set this as a fact before using this template: 25 | STANDBY_ADDRESS="{{ citus_coordinator_standby_address }}" 26 | 27 | # check that we can connect first.... 28 | ssh $STANDBY_ADDRESS hostname 29 | 30 | scp -q $fullpath $STANDBY_ADDRESS:$ARCHIVE_DIR/$filename 31 | 32 | # TODO: Remove this code once functionality is confirmed 33 | # cp $fullpath $ARCHIVE_DIR/$filename 34 | # cd $ARCHIVE_DIR 35 | # rsync -ac . $STANDBY_ADDRESS:$ARCHIVE_DIR/ 36 | 37 | echo $(date +"$DFMT") "archive-wal-file succeeded." 38 | 39 | # TODO: 40 | # make the destination host (or host+dir?) a parameter 41 | # make the archive dir a parameter? optional parameter? 42 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | All notable changes to this project will be documented in this file. 2 | We follow the [Semantic Versioning 2.0.0](http://semver.org/) format. 3 | 4 | ## 0.0.1 - 2015-04-09 5 | 6 | ### Added 7 | - Ansible roles and playbooks for deploying the admin terminal and file servers. 8 | 9 | ### Deprecated 10 | - Nothing. 11 | 12 | ### Removed 13 | - Nothing. 14 | 15 | ### Fixed 16 | - Nothing. 17 | 18 | ## 1.0 - 2016/17 19 | 20 | ### Added 21 | - Proxy Server 22 | - Analytics Terminals 23 | - Elasticsearch and ELK 24 | - Research Environment (partial) 25 | - GoCD 26 | - Data Warehouse 27 | - Mesos and Marathon 28 | - DMZ Proxy 29 | 30 | ### Deprecated 31 | - Nothing 32 | 33 | ### Fixed 34 | - Many things 35 | 36 | ## 1.1.0 - 2017-12-01 37 | 38 | ### Added 39 | - Citus 40 | 41 | ### Deprecated 42 | - Nothing 43 | 44 | ### Fixed 45 | - Many things 46 | 47 | ## 1.1.1 - 2018-01-05 48 | It's a new year, so let's start actually tracking things in the Changelog! 49 | 50 | ### Added 51 | - Ability to pull in TLS certificates to the DMZ Proxy 52 | - Ability to allow a list of groups access to Postgres config and log files 53 | - Support for new GoCD URL syntax 54 | 55 | ### Deprecated 56 | - Nothing 57 | 58 | ### Fixed 59 | - Fixed bugs with DMZ Proxy permissions 60 | 61 | ## 1.1.2 - 2018-01-26 62 | 63 | ### Added 64 | - Support for using symlinked postgres directory 65 | - R Studio 1.1 by default. 66 | 67 | ### Deprecated 68 | - Nothing 69 | 70 | ### Fixed 71 | - Permissions issues in Postgres/Citus roles. 72 | -------------------------------------------------------------------------------- /deploy/roles/stattransfer/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install compat-libstdc 3 | yum: 4 | name: compat-libstdc++-33 5 | state: present 6 | tags: 7 | - stattransfer 8 | 9 | - name: Verify install directory 10 | file: 11 | path: "{{stattransfer_install_directory}}/" 12 | state: directory 13 | mode: og+rx 14 | recurse: yes 15 | tags: 16 | - stattransfer 17 | 18 | - name: Copy over the install bin 19 | copy: 20 | src: "{{stattransfer_local_bin_directory}}/stlinux64_install" 21 | dest: "{{stattransfer_install_directory}}/stlinux64_install" 22 | when: custom_repo 23 | tags: 24 | - stattransfer 25 | 26 | - name: Set the install bin to be executable 27 | file: 28 | path: "{{stattransfer_install_directory}}/stlinux64_install" 29 | mode: a+x 30 | when: custom_repo 31 | tags: 32 | - stattransfer 33 | 34 | - name: Run install bin 35 | shell: ./stlinux64_install --mode silent 36 | args: 37 | chdir: "{{ stattransfer_install_directory }}" 38 | creates: "{{ stattransfer_install_directory }}/stattransfer" 39 | when: custom_repo 40 | tags: 41 | - stattransfer 42 | 43 | - name: Copy license file over 44 | template: 45 | src: license1282.txt 46 | dest: "{{stattransfer_install_directory}}/st14.lic" 47 | tags: 48 | - stattransfer 49 | 50 | - name: Copy configure_stattransfer to /etc/profile.d 51 | template: 52 | src: configure_stattransfer.sh 53 | dest: /etc/profile.d/configure_stattransfer.sh 54 | tags: 55 | - stattransfer -------------------------------------------------------------------------------- /deploy/roles/mesos_agent/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | mesos_agent_log_file_locations: 3 | - /var/log/mesos 4 | 5 | mesos_agent_package: mesos-0:1.4.1-2.0.1.x86_64 6 | mesos_agent_version: 1.4.1 7 | mesos_agent_port: 5051 8 | mesos_agent_logging_level: "INFO" 9 | 10 | mesos_agent_isolation: "cgroups/cpu,cgroups/mem,cgroups/net_cls,filesystem/linux,disk/du,docker/runtime" 11 | mesos_agent_cgroups_path: "/cgroups" 12 | mesos_agent_attributes: "" 13 | 14 | mesos_agent_work_dir: "/opt/mesos" 15 | 16 | mesos_agent_credential: "/etc/mesos_agent_credential" 17 | mesos_agent_principal: "username" 18 | mesos_agent_secret: "password" 19 | 20 | mesos_agent_fix_link: True 21 | 22 | # User to run processes in Mesos as 23 | marathon_mesos_user: "mesagent" 24 | 25 | # Container images 26 | mesos_containerizers: "mesos" 27 | mesos_launcher: "linux" 28 | mesos_image_providers: "appc,docker" 29 | mesos_agent_image_backend: "copy" 30 | 31 | mesos_executor_registration_timeout: 5mins 32 | 33 | mesos_appc_enable: yes 34 | mesos_appc_simple_discovery_uri_prefix: "http://10.0.1.31:8080/v2/artifacts/appc/" 35 | # File uri is not working, perhaps a bug in Mesos 36 | #mesos_appc_simple_discovery_uri_prefix: file:///home/work/appc/ 37 | mesos_appc_store_dir: "/tmp/mesos/store/appc" 38 | 39 | mesos_docker_enable: yes 40 | mesos_docker_store_dir: "/tmp/mesos/store/docker" 41 | mesos_docker_registry: "/home/work/mesos/docker/images" 42 | 43 | mesos_cni_networks: 44 | - network_name: "default" 45 | nameservers: 46 | - "8.8.8.8" 47 | - "8.8.4.4" 48 | -------------------------------------------------------------------------------- /deploy/deploy_elk.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: elk 3 | become: yes 4 | become_method: sudo 5 | roles: 6 | - common 7 | - {role: puppet_disable, when: "disable_puppet"} 8 | - {role: iptables, when: "iptables_config"} 9 | - {role: epel, when: "use_epel and not 'production' in group_names"} 10 | - jdk 11 | - elasticsearch 12 | 13 | - hosts: elk 14 | become: yes 15 | become_method: sudo 16 | handlers: 17 | - import_tasks: roles/shared/handlers/main.yml 18 | pre_tasks: 19 | - name: Create key for message authentication 20 | shell: "/usr/share/elasticsearch/bin/shield/syskeygen" 21 | args: 22 | creates: /etc/elasticsearch/shield/system_key 23 | register: create_key 24 | when: es_message_auth_enabled 25 | environment: 26 | ES_JAVA_OPTS: "-Des.path.conf=/etc/elasticsearch" 27 | 28 | - name: Ensure permissions are correct 29 | file: 30 | dest: /etc/elasticsearch/shield/system_key 31 | owner: "{{ es_user }}" 32 | group: "{{ es_user }}" 33 | mode: 0600 34 | when: es_message_auth_enabled 35 | 36 | - name: Fetch the key 37 | fetch: 38 | src: /etc/elasticsearch/shield/system_key 39 | dest: ./ 40 | fail_on_missing: yes 41 | flat: yes 42 | when: es_message_auth_enabled and create_key.changed 43 | 44 | roles: 45 | - elasticsearch-config 46 | - logstash 47 | - logstash-config 48 | - kibana 49 | - kibana-config 50 | - {role: log-courier, when: "install_logstash"} 51 | -------------------------------------------------------------------------------- /deploy/roles/eod/templates/system-auth-ac: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authconfig is run. 4 | auth required pam_env.so 5 | auth sufficient pam_winbind.so use_first_pass 6 | auth sufficient pam_unix.so nullok try_first_pass 7 | auth sufficient pam_krb5.so use_first_pass 8 | auth requisite pam_succeed_if.so uid >= 500 quiet 9 | auth required pam_deny.so 10 | 11 | account required pam_access.so 12 | account required pam_unix.so broken_shadow 13 | account sufficient pam_localuser.so 14 | account sufficient pam_succeed_if.so uid < 500 quiet 15 | account [default=bad success=ok user_unknown=ignore] pam_krb5.so 16 | account [default=bad success=ok user_unknown=ignore] pam_winbind.so 17 | account required pam_permit.so 18 | 19 | password requisite pam_cracklib.so try_first_pass retry=3 20 | password sufficient pam_unix.so md5 shadow nullok try_first_pass use_authtok 21 | password sufficient pam_krb5.so use_authtok 22 | password sufficient pam_winbind.so use_authtok 23 | password required pam_deny.so 24 | 25 | session optional pam_keyinit.so revoke 26 | session required pam_limits.so 27 | session optional pam_oddjob_mkhomedir.so skel=/etc/skel umask=0077 28 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 29 | session required pam_unix.so 30 | session optional pam_krb5.so -------------------------------------------------------------------------------- /deploy/roles/open_office/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | open_office_rpms: 3 | - openoffice.x86_64 4 | - openoffice-base.x86_64 5 | - openoffice-brand-base.x86_64 6 | - openoffice-brand-calc.x86_64 7 | - openoffice-brand-draw.x86_64 8 | - openoffice-brand-en-US.x86_64 9 | - openoffice-brand-impress.x86_64 10 | - openoffice-brand-math.x86_64 11 | - openoffice-brand-writer.x86_64 12 | - openoffice-calc.x86_64 13 | - openoffice-core01.x86_64 14 | - openoffice-core02.x86_64 15 | - openoffice-core03.x86_64 16 | - openoffice-core04.x86_64 17 | - openoffice-core05.x86_64 18 | - openoffice-core06.x86_64 19 | - openoffice-core07.x86_64 20 | - openoffice-draw.x86_64 21 | - openoffice-en-US.x86_64 22 | - openoffice-en-US-base.x86_64 23 | - openoffice-en-US-calc.x86_64 24 | - openoffice-en-US-draw.x86_64 25 | - openoffice-en-US-help.x86_64 26 | - openoffice-en-US-impress.x86_64 27 | - openoffice-en-US-math.x86_64 28 | - openoffice-en-US-res.x86_64 29 | - openoffice-en-US-writer.x86_64 30 | - openoffice-gnome-integration.x86_64 31 | - openoffice-graphicfilter.x86_64 32 | - openoffice-images.x86_64 33 | - openoffice-impress.x86_64 34 | - openoffice-javafilter.x86_64 35 | - openoffice-math.x86_64 36 | - openoffice-ogltrans.x86_64 37 | - openoffice-onlineupdate.x86_64 38 | - openoffice-ooofonts.x86_64 39 | - openoffice-ooolinguistic.x86_64 40 | - openoffice-pyuno.x86_64 41 | - openoffice-ure.x86_64 42 | - openoffice-writer.x86_64 43 | - openoffice-xsltfilter.x86_64 44 | - openoffice4.0-redhat-menus.noarch -------------------------------------------------------------------------------- /deploy/roles/python27-scl/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | Install Python2.7 environment from Centos / Redhat Software Collections 5 | 6 | Requirements 7 | ------------ 8 | 9 | libselinux-python may be required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | ansible_distribution 15 | 16 | If "CentOS" will install the CentOS SCL. SCL assumed to already be installed for RHEL 17 | 18 | python_pip_bin: [/opt/rh/python27/root/usr/bin/pip2.7] 19 | 20 | points to pip binary installed by this role useful for setting the executable parameter 21 | in "pip" module tasks. Used by python-libs and clouseau roles. 22 | 23 | python_pip_ld_path: [/opt/rh/python27/root/usr/lib64] 24 | 25 | For setting LD_LIBRARY_PATH when using SCL python. Needed by python27 where libpython path 26 | is not compiled in to the binary. 27 | 28 | set_python27_default_python: [False] 29 | 30 | If True will set /etc/profile.d to make python27 the default python for users 31 | 32 | "python" tag will run all tasks in this role 33 | 34 | 35 | Dependencies 36 | ------------ 37 | 38 | Under RHEL this role expects the SCL to be availiable. 39 | 40 | Example Playbook 41 | ---------------- 42 | 43 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 44 | 45 | - hosts: servers 46 | roles: 47 | - python27-scl 48 | 49 | - hosts: servers 50 | roles: 51 | - { role: python27-scl, set_python27_default_python: True } 52 | 53 | 54 | Author Information 55 | ------------------ 56 | 57 | CFPB 58 | -------------------------------------------------------------------------------- /deploy/roles/kerberos_apache/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml - Role for configuring kerberos to work with apache 2 | --- 3 | - name: Create http principal 4 | command: kadmin -p {{ krb_service_user }}/admin -w {{ krb_service_user_password }} -q "addprinc -randkey HTTP/{{ proxy_subdomain }}.{{ domain_name }}" 5 | when: create_keytabs and kerberos_enabled 6 | run_once: True 7 | 8 | - name: Add keytab for http 9 | command: kadmin -p {{ krb_service_user }}/admin -w {{ krb_service_user_password }} -q "ktadd -k /etc/httpd/http.keytab HTTP/{{ proxy_subdomain }}.{{ domain_name }}" 10 | args: 11 | creates: /etc/httpd/http.keytab 12 | when: create_keytabs and kerberos_enabled 13 | register: created_http_keytab 14 | 15 | - name: Update permissions 16 | file: 17 | path: /etc/httpd/http.keytab 18 | state: touch 19 | owner: apache 20 | group: apache 21 | when: create_keytabs and created_http_keytab 22 | 23 | - name: Install mod_auth_kerb and mod_authz_ldap (6.x) 24 | yum: 25 | name: "{{ item }}" 26 | state: latest 27 | with_items: 28 | - mod_auth_kerb 29 | - mod_authz_ldap 30 | when: ansible_distribution_major_version == "6" 31 | 32 | - name: Install mod_auth_kerb and mod_ldap (7.x) 33 | yum: 34 | name: "{{ item }}" 35 | state: latest 36 | with_items: 37 | - mod_auth_kerb 38 | - mod_ldap 39 | when: ansible_distribution_major_version == "7" 40 | 41 | - name: Update neccessary LDAP setting 42 | lineinfile: 43 | dest: /etc/openldap/ldap.conf 44 | line: "REFERRALS off" 45 | state: present 46 | insertafter: EOF 47 | -------------------------------------------------------------------------------- /deploy/roles/kerberos/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # main.yml - Kerberos client setup 2 | --- 3 | - name: Install Kerberos 4 | yum: 5 | name: "{{ item }}" 6 | state: present 7 | with_items: 8 | - krb5-libs 9 | - krb5-workstation 10 | - krb5-devel 11 | - pam_krb5 12 | - sssd-krb5 13 | - sssd-krb5-common 14 | - libselinux-python 15 | 16 | - name: Create log directory 17 | file: 18 | dest: /var/krb5 19 | state: directory 20 | 21 | - name: Update krb5.conf configuration file 22 | template: 23 | src: krb5.conf 24 | dest: /etc/krb5.conf 25 | 26 | - name: Create host principal 27 | command: kadmin -p {{ krb_service_user }}/admin -w {{ krb_service_user_password }} -q "addprinc -randkey host/{{ proxy_subdomain }}.{{ domain_name }}" 28 | when: create_keytabs and kerberos_enabled 29 | run_once: True 30 | 31 | - name: Add keytab for host 32 | command: kadmin -p {{ krb_service_user }}/admin -w {{ krb_service_user_password }} -q "ktadd host/{{ proxy_subdomain }}.{{ domain_name }}" 33 | when: create_keytabs and kerberos_enabled 34 | run_once: True 35 | 36 | - name: Create empty log files if they don't exist 37 | file: 38 | path: "{{ item }}" 39 | state: touch 40 | owner: root 41 | group: root 42 | mode: 0640 43 | with_items: "{{ krb_log_file_locations }}" 44 | 45 | - name: Allow admins to read logs 46 | acl: 47 | path: "{{ item }}" 48 | entity: "{{ restricted_group_name }}" 49 | etype: group 50 | permissions: rX 51 | recursive: yes 52 | state: present 53 | with_items: "{{ krb_log_file_locations }}" 54 | 55 | -------------------------------------------------------------------------------- /deploy/roles/log-courier/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install libselinux-python 3 | yum: 4 | name: libselinux-python 5 | state: present 6 | 7 | - name: Enable log-courier repo 8 | template: 9 | src: log-courier.repo 10 | dest: /etc/yum.repos.d/log-courier.repo 11 | # when: not custom_repo 12 | 13 | - name: Install log-courier 14 | yum: 15 | name: [ 16 | 'zeromq3', 17 | 'zeromq3-devel', 18 | 'log-courier' 19 | ] 20 | state: present 21 | when: '"elk" in groups' 22 | 23 | - name: Install configuration template 24 | template: 25 | src: log-courier.conf.j2 26 | dest: /etc/log-courier/log-courier.conf 27 | when: '"elk" in groups' 28 | 29 | - name: Make log directory 30 | file: 31 | dest: /var/log/log-courier/ 32 | state: directory 33 | 34 | - name: Start and enable log-courier 35 | service: 36 | name: log-courier 37 | state: started 38 | enabled: yes 39 | when: '"elk" in groups' 40 | 41 | - name: Create admin group 42 | group: 43 | name: "{{ restricted_group_name }}" 44 | state: present 45 | 46 | - name: Create empty log files if they don't exist 47 | file: 48 | path: "{{ item }}" 49 | state: touch 50 | owner: root 51 | group: root 52 | mode: 0640 53 | with_items: "{{ logcourier_log_file_locations }}" 54 | 55 | - name: Allow admins to read logs 56 | acl: 57 | path: "{{ item }}" 58 | entity: "{{ restricted_group_name }}" 59 | etype: group 60 | permissions: rX 61 | recursive: yes 62 | state: present 63 | with_items: "{{ logcourier_log_file_locations }}" 64 | 65 | -------------------------------------------------------------------------------- /deploy/roles/apache/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apache_24_repo: "http://repos.fedorapeople.org/repos/jkaluza/httpd24/epel-httpd24.repo" 3 | apache_packages: 4 | - httpd24-httpd 5 | - httpd24-httpd-devel 6 | - httpd24-mod_ssl 7 | - httpd24-mod_ldap 8 | - http://cbs.centos.org/kojifiles/packages/httpd24-mod_auth_kerb/5.4/30.el6/x86_64/httpd24-mod_auth_kerb-5.4-30.el6.x86_64.rpm 9 | - openssh 10 | - php 11 | - php-common 12 | - php-ldap 13 | 14 | apache_disabled_services: 15 | - httpd 16 | 17 | apache_config_path: /opt/rh/httpd24/root/etc/httpd 18 | apache_config_dir: /opt/rh/httpd24/root/etc/httpd/conf.d 19 | 20 | # Apache service initial state 21 | apache_service_state: "started" 22 | apache_service_enable: "yes" 23 | 24 | # Apache configuration 25 | apache_server_tokens: "Full" 26 | apache_default_listen: 80 27 | apache_timeout: 60 28 | apache_server_root: "/opt/rh/httpd24/root/etc/httpd" 29 | apache_keepalive: On 30 | apache_max_keepalive_requests: 100 31 | apache_keepalive_timeout: 5 32 | apache_server_name: "{{ ansible_hostname }}:80" 33 | apache_document_root: "/opt/rh/httpd24/root/var/www/html" 34 | apache_document_parent: "/opt/rh/httpd24/root/var/www" 35 | apache_default_document_root: "/opt/rh/httpd24/root/var/html" 36 | apache_log_formats: 37 | - LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined 38 | - LogFormat "%h %l %u %t \"%r\" %>s %b" common 39 | apache_custom_logs: 40 | - CustomLog "logs/access_log" combined 41 | apache_trace_enable: On 42 | apache_log_level: warn 43 | 44 | apache_error_log: logs/error_log 45 | 46 | apache_log_folder: /var/log/httpd24/ 47 | -------------------------------------------------------------------------------- /deploy/audit_resources.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: no 4 | 5 | tasks: 6 | - name: Make directory for stats 7 | file: 8 | state: directory 9 | path: /tmp/resource_allocation 10 | mode: 0755 11 | delegate_to: admin_terminal 12 | 13 | - name: Get number of processors 14 | shell: "nproc" 15 | register: num_proc 16 | 17 | - name: Get memory 18 | shell: "vmstat -s | awk -F' ' '{print $1}' | head -1" 19 | register: total_mem 20 | 21 | - name: Get disk allocation 22 | shell: "df -P | grep '^/dev' | awk -F' ' '{sum += $2} END { print sum }'" 23 | register: total_disk 24 | 25 | - name: Get disk usage 26 | shell: "df -P | grep '^/dev' | awk -F' ' '{sum += $3} END { print sum }'" 27 | register: used_disk 28 | 29 | - name: Output to file 30 | copy: 31 | dest: "/tmp/resource_allocation/IN_{{ ansible_fqdn }}.csv" 32 | content: > 33 | {{ansible_fqdn }},{{ num_proc.stdout }},{{ total_mem.stdout }},{{ total_disk.stdout }},{{ used_disk.stdout }}\n 34 | force: yes 35 | delegate_to: admin_terminal 36 | 37 | - hosts: admin_terminal 38 | tasks: 39 | - name: Rollup all 40 | shell: cat /tmp/resource_allocation/IN_* 41 | register: resources 42 | 43 | - name: Create file report 44 | copy: 45 | dest: /tmp/resource_allocation/resource_allocation.csv 46 | content: "{{ resources.stdout }}" 47 | force: yes 48 | 49 | - name: Fetch file 50 | fetch: 51 | src: /tmp/resource_allocation/resource_allocation.csv 52 | dest: ./ 53 | flat: yes 54 | -------------------------------------------------------------------------------- /deploy/roles/r-core/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure umask is correct 3 | lineinfile: 4 | dest: "~/.bashrc" 5 | line: umask 0022 6 | regexp: (.*)umask(.*) 7 | state: present 8 | tags: 9 | - r-core 10 | 11 | - name: Install R-core 12 | yum: 13 | name: R-core 14 | state: "{{ r_core_state }}" 15 | disable_gpg_check: yes 16 | register: r_core_updated 17 | tags: 18 | - r-core 19 | 20 | - name: Check if rstudio server is installed 21 | yum: 22 | list: rstudio-server 23 | when: r_core_updated.changed 24 | register: rstudio_installed 25 | tags: 26 | - r-core 27 | 28 | - name: Restart rstudio if R updates and rstudio server is installed 29 | command: /bin/true 30 | when: "{{ (rstudio_installed is defined and rstudio_installed.results|length > 0 and item['yumstate']=='installed') }}" 31 | with_items: "{{ rstudio_installed.results | default([]) }}" 32 | notify: restart rstudio-server 33 | tags: 34 | - r-core 35 | 36 | - name: Install base packages 37 | yum: 38 | name: "{{ item }}" 39 | state: "{{ r_core_state }}" 40 | disable_gpg_check: yes 41 | with_items: 42 | - R-devel 43 | - blas 44 | - blas-devel 45 | - lapack 46 | - lapack-devel 47 | - texinfo 48 | - texinfo-tex 49 | - libicu 50 | - libicu-devel 51 | tags: 52 | - r-core 53 | 54 | - name: Ensure Java is configured 55 | shell: "R CMD javareconf" 56 | tags: 57 | - r-core 58 | 59 | - name: Ensure that R is executable 60 | file: 61 | dest: /usr/lib64/R/etc/{{ item }} 62 | mode: 0644 63 | with_items: 64 | - ldpaths 65 | - Makeconf 66 | tags: 67 | - r-core 68 | -------------------------------------------------------------------------------- /deploy/deploy_development_terminal.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: analytics_terminal 3 | become: yes 4 | become_method: sudo 5 | 6 | roles: 7 | - {role: epel, when: "use_epel and not 'production' in group_names"} 8 | - {role: python, when: "install_python and custom_repo" } 9 | #- {role: python-build, when: "not custom_repo"} 10 | - {role: python27-scl, when: "install_python and not custom_repo" } 11 | - {role: python36-scl, when: "install_python"} 12 | - {role: devtools, when: "ansible_distribution_major_version == 6"} 13 | - {role: devtools6, when: "ansible_distribution_major_version == 7"} 14 | - ruby 15 | - jdk 16 | - pycharm 17 | - postgresql-client 18 | - {role: nginx, vars: { nginx_state: "stopped", nginx_enable: "no" } } 19 | - spark 20 | - sublime 21 | - nodejs 22 | - redis 23 | - maven 24 | - intellij 25 | - clouseau 26 | - odbc 27 | - python-libs 28 | # - julia 29 | 30 | # Install development packages 31 | - hosts: analytics_terminal 32 | become: yes 33 | tasks: 34 | - name: Install development packages 35 | yum: 36 | name: "{{ item }}" 37 | state: present 38 | disable_gpg_check: yes 39 | with_items: 40 | - "@development" 41 | - gcc-gfortran 42 | - python-devel 43 | 44 | - name: Install custom RPMs 45 | yum: 46 | name: "{{ item }}" 47 | state: present 48 | disable_gpg_check: yes 49 | with_items: 50 | - xsv 51 | - gnucobol 52 | - htop 53 | - p7zip 54 | - rhobot 55 | when: custom_repo 56 | -------------------------------------------------------------------------------- /deploy/upgrade_citus_enterprise.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Example usage: 3 | # ansible-playbook --private-key=~/.vagrant.d/insecure_private_key -u vagrant -i vagrant_hosts upgrade_citus_enterprise.yml --limit citus_coordinator 4 | - hosts: citus 5 | become: yes 6 | become_method: sudo 7 | serial: 1 8 | max_fail_percentage: 1 9 | roles: 10 | - citus 11 | pre_tasks: 12 | - name: Uninstall community edition 13 | yum: 14 | name: "{{ item }}" 15 | state: absent 16 | with_items: "{{ citus_community_pkgs }}" 17 | 18 | - name: Install enterprise edition 19 | yum: 20 | name: "{{ item }}" 21 | state: present 22 | with_items: "{{ citus_pkgs }}" 23 | notify: restart postgres 24 | 25 | # Restart postgres 26 | - meta: flush_handlers 27 | 28 | - pause: 29 | seconds: 30 30 | 31 | - name: Update citus extension 32 | postgresql_exec: 33 | login_user: "{{ pp_superaccount }}" 34 | login_password: "{{ pp_superpassword }}" 35 | login_host: localhost 36 | port: "{{ pp_serverport }}" 37 | db: "{{ item }}" 38 | script: "ALTER EXTENSION citus UPDATE;" 39 | with_items: "{{ citus_databases }}" 40 | check_mode: false 41 | 42 | - name: Create shared_rebalancer extension 43 | postgresql_exec: 44 | login_user: "{{ pp_superaccount }}" 45 | login_password: "{{ pp_superpassword }}" 46 | login_host: localhost 47 | port: "{{ pp_serverport }}" 48 | db: "{{ item }}" 49 | script: "CREATE EXTENSION shard_rebalancer;" 50 | with_items: "{{ citus_databases }}" 51 | check_mode: false 52 | -------------------------------------------------------------------------------- /deploy/templates/proxy.conf: -------------------------------------------------------------------------------- 1 | {{ template_masthead }} 2 | 3 | # Place this file in /etc/nginx/conf.d and ensure the below path is proper for your setup. 4 | # The vhost folder contains a collection of conf files, each of which defines a single 5 | # vhost directive. 6 | 7 | {% if enable_ssl %} 8 | server { 9 | listen 80; 10 | # Redirect to HTTPS here 11 | } 12 | 13 | server { 14 | listen 443 ssl; 15 | 16 | ssl_certificate {{ nginx_ssl_cert_file }}; 17 | ssl_certificate_key {{ nginx_ssl_key_file }}; 18 | ssl_protocols {{ nginx_ssl_protocols }}; 19 | 20 | {% if nginx_client_certificate is defined %} 21 | ssl_client_certificate {{ nginx_client_certificate }}; 22 | ssl_verify_client on; 23 | {% endif %} 24 | 25 | {% else %} 26 | server { 27 | listen 80; 28 | {% endif %} 29 | 30 | root /srv/www/proxy/html; 31 | index index.html index.htm; 32 | 33 | server_name {{ ansible_hostname }}.{{ domain_name }}; 34 | 35 | ### BLOCKING DOWNLOAD AGENTS: To prevent malicious traffic 36 | if ({{ nginx_block_download_agents }}) { 37 | return 403; 38 | } 39 | 40 | ### BLOCKING ROBOTS: To prevent enumeration and other attacks 41 | if ({{ nginx_block_robots }}) { 42 | return 403; 43 | } 44 | 45 | ### ALLOWING CONFIGURED DOMAINS OR REVERSE PROXIED REQUESTS: To provide layered security 46 | if ($host !~ ^({{ nginx_webcrawler_protect }}|{{ ansible_ssh_host }})) { 47 | return 444; 48 | } 49 | 50 | location / { 51 | try_files $uri $uri/ =404; 52 | } 53 | 54 | # Include additional files containing other locations to proxy. 55 | include /srv/www/proxy/repo/*.conf; 56 | } 57 | -------------------------------------------------------------------------------- /deploy/deploy_db_terminal.yml: -------------------------------------------------------------------------------- 1 | # deploy_admin_terminal.yml 2 | --- 3 | - hosts: db_terminal 4 | become: yes 5 | become_method: sudo 6 | 7 | roles: 8 | - {role: puppet_disable, when: "disable_puppet"} 9 | - {role: iptables, when: "iptables_config"} 10 | - common 11 | - {role: epel, when: "use_epel and not 'production' in group_names"} 12 | - umask 13 | - {role: log-courier, when: "install_logstash"} 14 | - jdk 15 | - postgresql-client 16 | - eod 17 | - devtools 18 | - {role: python, when: "custom_repo"} 19 | - {role: python-build, when: "not custom_repo"} 20 | - python36-scl 21 | - python-libs 22 | - sqitch 23 | - {role: pem-client, tags: pem-client} 24 | - sublime 25 | - odbc 26 | tasks: 27 | 28 | - name: Install needed packages 29 | yum: 30 | name: "{{ item }}" 31 | state: present 32 | with_items: 33 | - htop 34 | - screen 35 | - rsync 36 | - unzip 37 | - gpg 38 | - pigz 39 | - facter 40 | - sysstat # sar, iostat, vmstat 41 | - systemtap # dtrace equivalent 42 | - ant 43 | 44 | - name: Install custom packages 45 | yum: 46 | name: "{{ item }}" 47 | state: "{{ db_automation_package_state }}" 48 | disable_gpg_check: yes 49 | with_items: "{{ db_automation_custom_packages }}" 50 | 51 | # - https://github.com/Factual/drake 52 | # - https://github.com/audreyr/cookiecutter 53 | # sudo yum install perl-devel perl-CPAN && curl -L http://cpanmin.us | perl - --sudo App::cpanminus 54 | # http://hammerora.sourceforge.net/download.html 55 | # - ogr2ogr in GDAL 56 | -------------------------------------------------------------------------------- /deploy/roles/logstash/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add elasticsearch key 3 | rpm_key: 4 | key: https://packages.elasticsearch.org/GPG-KEY-elasticsearch 5 | validate_certs: no # TODO remove on rhel7 6 | state: present 7 | when: not "production" in group_names 8 | 9 | - name: Enable Logstash repo 10 | template: 11 | src: logstash.repo 12 | dest: /etc/yum.repos.d/logstash.repo 13 | when: not "production" in group_names 14 | 15 | - name: Install logstash and other needed packages 16 | yum: 17 | name: "{{ item }}" 18 | state: latest 19 | with_items: 20 | - logstash 21 | 22 | - name: Ensure that logstash user is able to login to a shell 23 | user: 24 | name: "{{ logstash_user }}" 25 | shell: /bin/bash 26 | 27 | - name: Install log-courier plugin 28 | shell: 'su logstash -c "/opt/logstash/bin/plugin install logstash-input-courier"' 29 | when: install_logstash 30 | notify: 31 | - restart logstash 32 | 33 | - name: Install logstash-output-elasticsearch plugin 34 | shell: 'su logstash -c "/opt/logstash/bin/logstash-plugin install logstash-output-elasticsearch"' 35 | notify: 36 | - restart logstash 37 | ignore_errors: yes 38 | 39 | - name: Remove sgroups because it doesn't work with Active Directory 40 | lineinfile: 41 | dest: /etc/init.d/logstash 42 | line: > 43 | #SGROUPS=$(id -Gn "$LS_USER") | tr " " "," | sed 's/,$//'; echo '')" 44 | regexp: (.*)SGROUPS=(.*) 45 | 46 | - name: Open port 5043:5044 in iptables 47 | command: "iptables -I {{ iptables_chain }} 3 -m state --state NEW -p tcp --dport 5043:5044 -j ACCEPT" 48 | when: iptables_config 49 | 50 | - name: Save rules 51 | command: "/sbin/service iptables save" 52 | when: iptables_config 53 | -------------------------------------------------------------------------------- /deploy/config_file_server.yml: -------------------------------------------------------------------------------- 1 | # configure_file_shares.yml - Setup shared volumes in GlusterFS 2 | # This configuration is specific to the default configuration 3 | # used by Vagrant, and so should not be used for a real deployment 4 | # without extensive modification. 5 | --- 6 | - hosts: file_server_2 7 | become: yes 8 | become_method: sudo 9 | 10 | tasks: 11 | - name: Probe for other file server 12 | command: "gluster peer probe 10.0.1.4" 13 | 14 | - name: Create volume 15 | command: "gluster volume create {{ item.name }} replica 2 transport tcp 10.0.1.4:/data/{{ item.name }} 10.0.1.5:/data/{{ item.name }}" 16 | with_items: "{{ shared_folders }}" 17 | register: gluster_create 18 | failed_when: gluster_create.rc != 0 and "already exists" not in gluster_create.stderr 19 | changed_when: gluster_create.rc == 0 20 | when: item.state == 'mounted' 21 | 22 | # - name: Create gluster volumes 23 | # gluster_volume: 24 | # name: "{{ item }}" 25 | # state: present 26 | # transport: tcp 27 | # cluster: "10.0.1.4" 28 | # brick: "/data/{{ item }}" 29 | # replicas: 2 30 | # with_items: shared_folders 31 | # run_once: True 32 | 33 | - name: Start gluster volumes 34 | gluster_volume: 35 | name: "{{ item.name }}" 36 | state: started 37 | with_items: "{{ shared_folders }}" 38 | when: item.state == "mounted" 39 | 40 | # - name: Start the volume 41 | # command: "gluster volume start {{ item }}" 42 | # with_items: shared_folders 43 | # register: gluster_start 44 | # failed_when: gluster_start.rc != 0 and "already started" not in gluster_start.stderr 45 | # changed_when: gluster_start.rc == 0 46 | -------------------------------------------------------------------------------- /deploy/roles/redis/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create Redis user 4 | user: 5 | name: "{{ redis_user }}" 6 | system: yes 7 | home: "{{ redis_db_dir }}" 8 | shell: "{{ redis_user_shell }}" 9 | state: present 10 | groups: "{{ redis_user_groups | join(',') }}" 11 | tags: 12 | - redis 13 | 14 | - name: Install the Redis package 15 | yum: 16 | name: redis 17 | state: present 18 | disable_gpg_check: yes 19 | tags: 20 | - redis 21 | 22 | - name: Make Configuration directory 23 | file: 24 | path: "{{ redis_config_directory }}" ## /etc/redis 25 | state: directory 26 | mode: 0755 27 | tags: 28 | - redis 29 | 30 | - name: Make Redis working directory 31 | file: 32 | path: "{{ redis_working_directory }}" ## /var/redis 33 | state: directory 34 | mode: 0755 35 | tags: 36 | - redis 37 | 38 | - name: Make Redis logfile 39 | file: 40 | path: "{{ redis_logfile }}" 41 | state: touch 42 | owner: "{{ redis_user }}" 43 | group: "{{ redis_user }}" 44 | mode: 1755 45 | tags: 46 | - redis 47 | 48 | - name: Copy the Redis configuration file 49 | template: 50 | src: redis.conf.j2 51 | dest: "{{ redis_config_file }}" ## /etc/redis/6379.conf 52 | tags: 53 | - redis 54 | 55 | - name: Open port 6379 in iptables 56 | command: "iptables -I {{ iptables_chain }} 3 -m state --state NEW -p tcp --dport {{ redis_port }} -j ACCEPT" 57 | when: iptables_config and deploy_iptables 58 | tags: 59 | - redis 60 | 61 | - name: Ensure redis is running 62 | service: 63 | name: "{{ redis_service }}" 64 | state: started 65 | enabled: yes 66 | when: not custom_repo # Redis doesn't have an init file when using our RPM 67 | tags: 68 | - redis 69 | -------------------------------------------------------------------------------- /deploy/roles/intellij/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # For now, we will install from a tarball, but we may move 3 | # to using a custom RPM, hence the commented out lines. 4 | 5 | # - name: Install intellij rpm 6 | # yum: 7 | # name: "intellij" 8 | # state: present 9 | # when: custom_repo 10 | # tags: 11 | # - intellij 12 | 13 | - name: Download intellij 14 | shell: "mkdir -p /var/cache/downloads; if [[ ! -f /var/cache/downloads/intellij-{{ intellij_version }} ]]; then curl -L --silent --show-error {{ intellij_url }} -o /var/cache/downloads/intellij-{{ intellij_version }} ; fi" 15 | # when: not custom_repo 16 | tags: 17 | - intellij 18 | 19 | - name: Create install directory 20 | file: 21 | path: "{{ intellij_install_dir }}" 22 | state: directory 23 | mode: 0755 24 | tags: 25 | - intellij 26 | 27 | - name: Extract intellij from downloads dir 28 | unarchive: 29 | src: "/var/cache/downloads/intellij-{{ intellij_version }}" 30 | dest: "{{ intellij_install_dir }}" 31 | copy: no 32 | # when: not custom_repo 33 | tags: 34 | - intellij 35 | 36 | - name: Create symlink for idea.sh 37 | file: 38 | src: "{{ intellij_install_dir }}/{{ intellij_idea_dir }}/bin/idea.sh" 39 | dest: "{{ intellij_main_dir }}/bin/idea.sh" 40 | state: link 41 | # when: not custom_repo 42 | tags: 43 | - intellij 44 | 45 | - name: Install Intellij IDEA desktop file 46 | template: 47 | src: idea.desktop.j2 48 | dest: "{{ intellij_desktop_dir }}/idea.desktop" 49 | mode: 0644 50 | # when: not custom_repo 51 | tags: 52 | - intellij 53 | 54 | - name: Set up idea.png 55 | copy: 56 | src: idea.png 57 | dest: "{{ intellij_main_dir }}/bin/" 58 | # when: not custom_repo 59 | tags: 60 | - intellij 61 | -------------------------------------------------------------------------------- /deploy/roles/matlab/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | matlab_install_directory: /usr/local/MATLAB 3 | # matlab_install_files: /usr/local/MATLAB_install 4 | matlab_version: R2018b 5 | matlab_iso1_mount: iso1 6 | matlab_iso2_mount: iso2 7 | matlab_license_folder: licenses 8 | matlab_combined_mount: installer 9 | matlab_output_location: output 10 | install_matlab: False 11 | 12 | matlab_iso1: R2018b_glnxa64_dvd1.iso 13 | matlab_iso2: R2018b_glnxa64_dvd2.iso 14 | 15 | matlab_installation_key: installkey 16 | matlab_outputfile: /usr/local/MATLAB/R2018b/output/mathworks.log 17 | matlab_license_location: /usr/local/MATLAB/R2018b/licenses/license.lic 18 | matlab_network_license_file: /usr/local/MATLAB/R2018b/licenses/network.lic 19 | matlab_license_file: proprietary_license_info 20 | matlab_license_server: server 21 | matlab_license_daemon: daemon 22 | matlab_license_boot_symlink_src: etc/lmboot 23 | matlab_license_down_symlink_src: etc/lmdown 24 | 25 | # Used in the GROUP NAMED_USERS line in the mlm.opt file. 26 | # A user's operating-system username MUST be listed here to have access to MATLAB. 27 | # Case does not matter: 'USERNAME', 'UserName', or 'username' will all match 'username'. 28 | # The number of users listed here must not exceed the number of available license keys. 29 | matlab_named_users: 30 | - dummyusername 31 | # - anotherusername 32 | 33 | matlab_symlink_boot_dest: /etc/lmboot_TMW 34 | matlab_symlink_down_dest: /etc/lmdown_TMW 35 | matlab_symlink_dest: /usr/local/bin/matlab 36 | matlab_symlink_src: bin/matlab 37 | matlab_nnu_assetinfo: "fake_nnu_assetinfo" 38 | matlab_concurrent_assetinfo: "fake_concurrent_assetinfo" 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled source # 2 | ################### 3 | *.com 4 | *.class 5 | *.dll 6 | *.exe 7 | *.o 8 | *.so 9 | _site/ 10 | 11 | # Packages # 12 | ############ 13 | # it's better to unpack these files and commit the raw source 14 | # git has its own built in compression methods 15 | *.7z 16 | *.dmg 17 | *.gz 18 | *.iso 19 | *.jar 20 | *.rar 21 | *.tar 22 | *.zip 23 | 24 | # Logs and databases # 25 | ###################### 26 | *.log 27 | *.sql 28 | *.sqlite 29 | 30 | # OS generated files # 31 | ###################### 32 | .DS_Store 33 | .DS_Store? 34 | .Spotlight-V100 35 | .Trashes 36 | Icon? 37 | ehthumbs.db 38 | Thumbs.db 39 | 40 | # Vim swap files # 41 | ################## 42 | *.swp 43 | 44 | # Python # 45 | ################# 46 | *.pyc 47 | *.egg-info/ 48 | __pycache__/ 49 | *.py[cod] 50 | .env 51 | 52 | # Django # 53 | ################# 54 | *.egg-info 55 | .installed.cfg 56 | 57 | # Unit test / coverage reports 58 | ################# 59 | htmlcov/ 60 | .tox/ 61 | .coverage 62 | .cache 63 | nosetests.xml 64 | coverage.xml 65 | 66 | # Front-End # 67 | ############# 68 | node_modules/ 69 | bower_components/ 70 | .grunt/ 71 | src/vendor/ 72 | dist/ 73 | 74 | # Vagrant # 75 | ########### 76 | .vagrant/ 77 | *.vdi 78 | local_settings.yml 79 | 80 | # extra variable files 81 | .vagrant* 82 | 83 | 84 | # Ansible # 85 | ########### 86 | *.retry 87 | 88 | # Ansible Vault password file 89 | .vault_password 90 | 91 | # Local project files # 92 | ####################### 93 | local_settings.yml 94 | system_key 95 | 96 | *~ 97 | \#* 98 | pipcache 99 | yumcache 100 | 101 | # This is a generated file, but the initial version is useful and should not change: 102 | .devcontainer/docker-compose.yml 103 | -------------------------------------------------------------------------------- /deploy/roles/kibana-config/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup configuration 3 | template: 4 | src: "{{ kibana_config_file }}" 5 | dest: "{{ kibana_config_path }}/kibana.yml" 6 | notify: 7 | - restart kibana 8 | 9 | - name: Create SSL folder 10 | file: 11 | dest: /opt/kibana/ssl 12 | state: directory 13 | when: kibana_generate_certs 14 | 15 | - name: Generate SSL Certs if requested 16 | command: "openssl req -new -nodes -x509 -subj '/C={{ country }}/ST={{ state }}/L={{ city }}/O={{ organization_name }}/CN={{ ansible_hostname }}.{{ domain_name }}' -days 3650 -keyout {{ kibana_ssl_key_file }} -out {{ kibana_ssl_cert_file }} -extensions v3_ca" 17 | when: kibana_generate_certs 18 | args: 19 | creates: "{{ kibana_ssl_cert_file }}" 20 | 21 | - name: Install kibana plugins 22 | command: /opt/kibana/bin/kibana plugin --install {{ item }} 23 | with_items: "{{ kibana_plugins }}" 24 | register: kibana_result 25 | failed_when: kibana_result.rc != 0 and "already exists" not in kibana_result.stderr 26 | changed_when: '"already exists" not in kibana_result.stderr' 27 | notify: 28 | - restart kibana 29 | 30 | - name: Start Kibana 31 | service: 32 | name: kibana 33 | state: started 34 | 35 | - name: Set recursive admin permissions on log folders 36 | acl: 37 | path: "{{ item }}" 38 | entity: "{{ restricted_group_name }}" 39 | etype: group 40 | permissions: rX 41 | recursive: yes 42 | state: present 43 | with_items: "{{ kibana_log_file_locations }}" 44 | 45 | - name: Set default admin permissions on log folders 46 | acl: 47 | path: "{{ item }}" 48 | entity: "{{ restricted_group_name }}" 49 | etype: group 50 | permissions: rX 51 | default: yes 52 | state: present 53 | with_items: "{{ kibana_log_file_locations }}" -------------------------------------------------------------------------------- /test/files/gocd/wait_for_pipeline.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import time 3 | import sys 4 | 5 | if __name__ == "__main__": 6 | # Set exit code 7 | exit_code = 0 8 | 9 | # Extract the pipeline name from arguments 10 | assert len(sys.argv)==3, "Please provide the gocd server and pipeline slug (and no other arguments): %s" % sys.argv[1:] 11 | gocd_server = sys.argv[1] 12 | pipeline_slug = sys.argv[2] 13 | 14 | # First, get the pipeline's history 15 | pipeline_history_url = '{0}/go/api/pipelines/{1}/history/0'.format(gocd_server, pipeline_slug) 16 | pipeline_history = requests.get(pipeline_history_url).json() 17 | 18 | pipeline_instance_id = pipeline_history['pipelines'][0]["natural_order"] 19 | pipeline_instance_url = '{0}/go/api/pipelines/{1}/instance/{2}'.format( 20 | gocd_server, pipeline_slug, pipeline_instance_id 21 | ) 22 | 23 | time_elapsed = 0 24 | 25 | # Every 15 seconds, check if the pipeline's stages are all completed. 26 | while True: 27 | time.sleep(15) 28 | 29 | time_elapsed += 15 30 | if time_elapsed > 300: 31 | print "Script timed-out while waiting for pipeline to complete." 32 | exit_code = 1 33 | 34 | pipeline_instance = requests.get(pipeline_instance_url).json() 35 | 36 | for stage in pipeline_instance['stages']: 37 | if stage['result'] == "Failed": 38 | print "Pipeline execution failed at stage: {0}".format(stage['name']) 39 | exit_code = 1 40 | break 41 | 42 | if stage['result'] != "Passed": 43 | continue 44 | 45 | break 46 | 47 | if exit_code == 0: 48 | print "Pipeline instance {0} completed successfully!".format(pipeline_instance_id) 49 | 50 | exit(exit_code) 51 | --------------------------------------------------------------------------------