├── group_vars
├── .gitkeep
├── pubkeys
│ ├── bgeiger.pub
│ ├── cseto.pub
│ ├── michael.pub
│ ├── sloria.pub
│ ├── fabian.pub
│ ├── joshcarp.pub
│ ├── jspies.pub
│ └── lyndsysimon.pub
└── osf-benchmarking
├── roles
├── docker-spa
│ └── files
│ │ ├── cos
│ │ └── env
│ │ ├── lookit
│ │ └── env
│ │ ├── badges
│ │ └── env
│ │ ├── experimenter
│ │ └── env
│ │ └── nginx
│ │ ├── conf.d
│ │ ├── badges.conf
│ │ ├── isp.conf
│ │ ├── lookit.conf
│ │ ├── experimenter.conf
│ │ └── cos.conf
│ │ └── nginx.conf
├── docker-mongo
│ ├── files
│ │ ├── mongo-keyfile
│ │ └── mongo.conf
│ └── defaults
│ │ └── main.yml
├── docker-nginx
│ └── files
│ │ ├── ssl
│ │ └── .gitkeep
│ │ └── conf
│ │ ├── .htpasswd
│ │ ├── nginx.conf
│ │ └── conf.d
│ │ └── default.conf
├── docker-haproxy
│ ├── files
│ │ ├── ssl
│ │ │ └── .gitkeep
│ │ └── conf
│ │ │ └── errors
│ │ │ ├── 400.http
│ │ │ ├── 403.http
│ │ │ ├── 500.http
│ │ │ ├── 504.http
│ │ │ ├── 502.http
│ │ │ ├── 408.http
│ │ │ ├── 503.http
│ │ │ └── README
│ └── defaults
│ │ └── main.yml
├── docker-mfr
│ ├── files
│ │ └── settings.json
│ ├── tasks
│ │ └── main.yml
│ └── defaults
│ │ └── main.yml
├── docker-tokumx
│ ├── files
│ │ └── tokumx-keyfile
│ └── defaults
│ │ └── main.yml
├── docker-waterbutler
│ ├── files
│ │ ├── ssl
│ │ │ └── .gitkeep
│ │ └── settings.json
│ └── tasks
│ │ ├── main.yml
│ │ └── celery.yml
├── (legacy)
│ ├── tokumx
│ │ ├── tasks
│ │ │ ├── migrate.yml
│ │ │ └── main.yml
│ │ ├── files
│ │ │ └── tokumx.list
│ │ └── handlers
│ │ │ └── main.yml
│ ├── lxml
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── env-vars
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── nginx
│ │ ├── templates
│ │ │ ├── modules
│ │ │ │ └── http_gzip_static.conf.j2
│ │ │ └── default.site.j2
│ │ ├── tasks
│ │ │ └── modules
│ │ │ │ ├── ipv6_module.yml
│ │ │ │ ├── http_perl_module.yml
│ │ │ │ ├── http_spdy_module.yml
│ │ │ │ ├── http_ssl_module.yml
│ │ │ │ ├── google_perftools_module.yml
│ │ │ │ ├── _authorized_ips.yml
│ │ │ │ ├── http_realip_module.yml
│ │ │ │ ├── http_gzip_static_module.yml
│ │ │ │ ├── http_echo_module.yml
│ │ │ │ ├── http_stub_status_module.yml
│ │ │ │ ├── http_auth_request_module.yml
│ │ │ │ ├── headers_more_module.yml
│ │ │ │ ├── naxsi_module.yml
│ │ │ │ └── upload_progress_module.yml
│ │ └── handlers
│ │ │ └── main.yml
│ ├── newrelic
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── docker-haproxy
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── 49-haproxy.conf.j2
│ │ │ └── logrotate.j2
│ │ └── defaults
│ │ │ └── main.yml
│ ├── gitlab
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── gitconfig.j2
│ │ │ ├── database.j2
│ │ │ └── init_defaults.j2
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ ├── ruby.yml
│ │ │ ├── install_ruby_from_source.yml
│ │ │ └── database.yml
│ │ └── files
│ │ │ └── rack_attack.rb
│ ├── sentry
│ │ ├── templates
│ │ │ └── sentry.conf.j2
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── files
│ │ │ └── create_superuser.py
│ ├── uwsgi
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── files
│ │ │ └── uwsgi.conf
│ │ └── tasks
│ │ │ └── main.yml
│ ├── supervisor
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── handlers
│ │ │ └── main.yml
│ ├── osf
│ │ ├── README.md
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── nginx.conf.j2
│ │ │ ├── uwsgi.conf.j2
│ │ │ └── uwsgi.ini.j2
│ │ └── defaults
│ │ │ └── main.yml
│ ├── r-lang
│ │ └── tasks
│ │ │ └── main.yml
│ ├── elasticsearch
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── postgresql
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── rackspace-multicast
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── docker-rsyslog
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ └── logrotate.j2
│ │ └── tasks
│ │ │ └── main.yml
│ ├── docker-logentries
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── bower
│ │ └── tasks
│ │ │ └── main.yml
│ ├── docker-clean
│ │ └── tasks
│ │ │ └── main.yml
│ ├── github
│ │ └── tasks
│ │ │ └── main.yml
│ └── mongo
│ │ └── tasks
│ │ └── main.yml
├── apt
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── hostname
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── overcommit-memory
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── security_checks
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── restart_csf.yml
│ │ ├── stop_testing.yml
│ │ ├── start_testing.yml
│ │ ├── tests
│ │ │ ├── check_configuration.yml
│ │ │ ├── deny_brute_force_ssh.yml
│ │ │ └── deny_port_scans.yml
│ │ └── main.yml
│ ├── defaults
│ │ └── main.yml
│ └── README.md
├── docker-cas
│ ├── tasks
│ │ └── main.yml
│ ├── files
│ │ ├── shibboleth-sp
│ │ │ └── apache2
│ │ │ │ ├── ports.conf
│ │ │ │ └── sites-enabled
│ │ │ │ └── default.conf
│ │ └── nginx
│ │ │ ├── conf.d
│ │ │ └── cas.conf
│ │ │ └── nginx.conf
│ └── defaults
│ │ └── main.yml
├── docker-cos
│ ├── tasks
│ │ └── main.yml
│ ├── files
│ │ ├── server
│ │ │ ├── local.py
│ │ │ └── uwsgi.ini
│ │ └── nginx
│ │ │ ├── conf.d
│ │ │ └── cos.io.conf
│ │ │ └── nginx.conf
│ └── defaults
│ │ └── main.yml
├── docker-dor
│ ├── tasks
│ │ └── main.yml
│ ├── files
│ │ ├── nginx
│ │ │ ├── conf.d
│ │ │ │ └── dor.conf
│ │ │ └── nginx.conf
│ │ ├── local.py
│ │ └── uwsgi.ini
│ └── defaults
│ │ └── main.yml
├── docker-jam
│ ├── tasks
│ │ └── main.yml
│ ├── files
│ │ ├── nginx
│ │ │ ├── conf.d
│ │ │ │ └── jam.conf
│ │ │ └── nginx.conf
│ │ └── conf
│ │ │ └── local.yml
│ └── defaults
│ │ └── main.yml
├── docker-osf
│ ├── files
│ │ ├── elasticsearch
│ │ │ ├── .htpasswd
│ │ │ └── nginx.conf
│ │ ├── conf
│ │ │ └── api
│ │ │ │ └── local.py
│ │ ├── adminserver
│ │ │ └── uwsgi.ini
│ │ ├── apiserver
│ │ │ └── uwsgi.ini
│ │ ├── server
│ │ │ └── uwsgi.ini
│ │ └── nginx
│ │ │ └── nginx.conf
│ └── tasks
│ │ └── sharejs.yml
├── docker-share-reg
│ ├── tasks
│ │ └── main.yml
│ └── files
│ │ ├── local.py
│ │ ├── uwsgi.ini
│ │ └── nginx.conf
├── docker
│ └── templates
│ │ ├── daemon.json.j2
│ │ └── logrotate.j2
├── newrelic-sysmond
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── Ansibles.build-essential
│ ├── test.yml
│ ├── .gitignore
│ ├── vars
│ │ ├── Suse.yml
│ │ ├── Debian.yml
│ │ └── Fedora.yml
│ ├── .travis.yml
│ ├── README.md
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── LICENSE
├── Ansibles.timezone
│ ├── defaults
│ │ └── main.yml
│ ├── test.yml
│ ├── .gitignore
│ ├── tasks
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── .travis.yml
│ ├── README.md
│ └── LICENSE
├── rackspace-cloudmonitor
│ ├── files
│ │ └── README.txt
│ └── tasks
│ │ └── main.yml
├── chkrootkit
│ ├── defaults
│ │ └── main.yml
│ ├── templates
│ │ └── etc_chkrootkit.conf.j2
│ └── tasks
│ │ └── main.yml
├── htop
│ └── tasks
│ │ └── main.yml
├── ntp
│ └── tasks
│ │ └── main.yml
├── tmux
│ └── tasks
│ │ └── main.yml
├── generic-users
│ ├── test.yml
│ ├── .gitignore
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── .travis.yml
│ ├── LICENSE
│ └── tasks
│ │ └── main.yml
├── ssh
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── main.yml
│ │ └── test.yml
│ └── defaults
│ │ └── main.yml
├── csf
│ ├── templates
│ │ ├── ui.allow.j2
│ │ └── csf.allow.j2
│ └── tasks
│ │ └── deps.yml
├── logrotate
│ └── tasks
│ │ └── main.yml
├── swap
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── Ansibles.monit
│ ├── .gitignore
│ ├── handlers
│ │ └── main.yml
│ ├── test.yml
│ ├── templates
│ │ ├── etc_monit_conf.d_cron.j2
│ │ ├── etc_monit_conf.d_ntp.j2
│ │ └── etc_monit_conf.d_sshd.j2
│ ├── .travis.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── defaults
│ │ └── main.yml
│ └── LICENSE
├── editors
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── network_interfaces
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── interfaces.j2
├── ansible
│ └── tasks
│ │ └── main.yml
├── docker-storage
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── rkhunter
│ ├── tasks
│ │ ├── cron.yml
│ │ └── main.yml
│ └── files
│ │ └── etc_cron.sh
├── fwknop
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── docker-memcached
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── skel
│ └── tasks
│ │ └── main.yml
├── docker-fluentd
│ ├── files
│ │ └── etc
│ │ │ └── fluent.conf
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── logentries
│ ├── defaults
│ │ └── main.yml
│ └── files
│ │ └── filters.py
├── docker-scrapi
│ └── files
│ │ ├── fluentd
│ │ └── fluent.conf
│ │ ├── nginx
│ │ ├── conf.d
│ │ │ └── scrapi.conf
│ │ └── nginx.conf
│ │ └── apiserver
│ │ └── uwsgi.ini
├── jenkins-deployment
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── python
│ └── tasks
│ │ └── main.yml
├── docker-openvpn
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── csf-docker
│ ├── files
│ │ ├── csf-docker-wait.conf
│ │ ├── csf-docker-started.conf
│ │ └── csfdocker.sh
│ └── tasks
│ │ └── main.yml
├── rsyslog
│ └── tasks
│ │ └── main.yml
├── docker-shibboleth-sp
│ └── files
│ │ ├── apache2
│ │ ├── ports.conf
│ │ └── sites-enabled
│ │ │ └── default.conf
│ │ └── conf
│ │ ├── localLogout.html
│ │ ├── partialLogout.html
│ │ ├── globalLogout.html
│ │ ├── accessError.html
│ │ ├── sslError.html
│ │ ├── sp-cert.pem
│ │ ├── metadataError.html
│ │ ├── console.logger
│ │ ├── syslog.logger
│ │ └── security-policy.xml
├── docker-sentry
│ ├── files
│ │ └── nginx
│ │ │ ├── conf.d
│ │ │ └── sentry.conf
│ │ │ └── nginx.conf
│ └── tasks
│ │ └── celery.yml
├── docker-postgres
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── docker-cassandra
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── docker-celery-flower
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── docker-jenkins
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── docker-rabbitmq
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── docker-unoconv
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── docker-elasticsearch
│ ├── files
│ │ └── conf
│ │ │ └── logging.yml
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── docker-redis
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── transparent-huge-pages
│ ├── tasks
│ │ └── main.yml
│ └── files
│ │ └── disable-transparent-hugepages
├── docker-prerender
│ ├── defaults
│ │ └── main.yml
│ └── files
│ │ └── conf
│ │ └── server.js
├── docker-newrelic-pluginagent
│ └── defaults
│ │ └── main.yml
├── docker-postgres-vacuumlo
│ ├── tasks
│ │ └── main.yml
│ └── defaults
│ │ └── main.yml
├── rackspace-cloudbackup
│ └── tasks
│ │ └── main.yml
├── docker-varnish
│ ├── defaults
│ │ └── main.yml
│ └── files
│ │ └── conf
│ │ └── default.vcl
├── docker-ember
│ └── defaults
│ │ └── main.yml
├── docker-postgres-barman
│ └── files
│ │ └── conf
│ │ └── crontab
└── docker-postgres-repmgr
│ └── tasks
│ └── main.yml
├── playbooks
├── README.md
└── testing.yml
├── requirements.txt
├── ansible.cfg
├── fwknop.yml
├── logentries.yml
├── roles.txt
├── swap.yml
├── user.yml
├── mongo.yml
├── docker.yml
├── overcommit-memory.yml
├── jenkins-deployment.yml
├── rackspace-multicast.yml
├── transparent-huge-pages.yml
├── tokumx.yml
├── security.yml
├── rackspace.yml
├── setup_benchmarking.yml
├── docker-share-reg.yml
├── Brewfile
├── docker-elasticsearch.yml
├── newrelic-sysmond.yml
├── provision.yml
├── common.yml
├── gitlab.yml
├── docker-haproxy.yml
├── docker-varnish.yml
├── docker-newrelic-pluginagent.yml
├── deploy.yml
├── vagranthosts
├── site.yml
├── .gitignore
├── docker-jenkins.yml
├── bastion.yml
└── docker-logentries.yml
/group_vars/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/roles/docker-spa/files/cos/env:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/roles/docker-spa/files/lookit/env:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/roles/docker-mongo/files/mongo-keyfile:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/roles/docker-nginx/files/ssl/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/roles/docker-spa/files/badges/env:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/roles/docker-haproxy/files/ssl/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/roles/docker-mfr/files/settings.json:
--------------------------------------------------------------------------------
1 | {}
2 |
--------------------------------------------------------------------------------
/roles/docker-nginx/files/conf/.htpasswd:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/roles/docker-spa/files/experimenter/env:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/roles/docker-tokumx/files/tokumx-keyfile:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/roles/docker-waterbutler/files/ssl/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/roles/(legacy)/tokumx/tasks/migrate.yml:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/playbooks/README.md:
--------------------------------------------------------------------------------
1 | "One-off" playbooks go here.
2 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | invoke==0.8.2
2 | passlib==1.6.2
3 |
--------------------------------------------------------------------------------
/roles/apt/defaults/main.yml:
--------------------------------------------------------------------------------
1 | apt_update_cache: yes
2 |
--------------------------------------------------------------------------------
/roles/docker-waterbutler/files/settings.json:
--------------------------------------------------------------------------------
1 | {}
2 |
--------------------------------------------------------------------------------
/roles/hostname/defaults/main.yml:
--------------------------------------------------------------------------------
1 | hostname_name:
2 |
--------------------------------------------------------------------------------
/ansible.cfg:
--------------------------------------------------------------------------------
1 | [ssh_connection]
2 | ssh_args = -o ForwardAgent=yes
3 |
--------------------------------------------------------------------------------
/roles/overcommit-memory/defaults/main.yml:
--------------------------------------------------------------------------------
1 | overcommit_memory: 1
2 |
--------------------------------------------------------------------------------
/roles/security_checks/meta/main.yml:
--------------------------------------------------------------------------------
1 | dependencies:
2 | - { role: csf }
3 |
--------------------------------------------------------------------------------
/roles/(legacy)/lxml/meta/main.yml:
--------------------------------------------------------------------------------
1 |
2 | dependencies:
3 | - {role: python}
4 |
--------------------------------------------------------------------------------
/roles/docker-cas/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: server.yml
2 | when: docker_cas_server
3 |
--------------------------------------------------------------------------------
/roles/docker-cos/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: server.yml
2 | when: docker_cos_server
3 |
--------------------------------------------------------------------------------
/roles/docker-dor/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: server.yml
2 | when: docker_dor_server
3 |
--------------------------------------------------------------------------------
/roles/docker-jam/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: server.yml
2 | when: docker_jam_server
3 |
--------------------------------------------------------------------------------
/roles/docker-mfr/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: server.yml
2 | when: docker_mfr_server
3 |
--------------------------------------------------------------------------------
/fwknop.yml:
--------------------------------------------------------------------------------
1 | - name: Set up fwknop
2 | hosts: all
3 | roles:
4 | - role: fwknop
5 |
--------------------------------------------------------------------------------
/logentries.yml:
--------------------------------------------------------------------------------
1 | - name: Log Entries
2 | hosts: all
3 | roles:
4 | - role: logentries
5 |
--------------------------------------------------------------------------------
/roles/(legacy)/env-vars/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # - {var: mycoolvar, val: yourcoolval}
2 | envvars:
3 |
--------------------------------------------------------------------------------
/roles/docker-osf/files/elasticsearch/.htpasswd:
--------------------------------------------------------------------------------
1 | elastic:$apr1$kfKRFRHN$mmlPgYJtyMr8kxj/ci7Jv0
2 |
--------------------------------------------------------------------------------
/roles/docker-share-reg/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: uwsgi.yml
2 | when: docker_share_reg_uwsgi
3 |
--------------------------------------------------------------------------------
/roles.txt:
--------------------------------------------------------------------------------
1 | Ansibles.build-essential,v1.0.0
2 | Ansibles.monit,v1.0.0
3 | Ansibles.timezone,v1.0.0
4 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/templates/modules/http_gzip_static.conf.j2:
--------------------------------------------------------------------------------
1 | gzip_static {{nginx_gzip_static}};
2 |
--------------------------------------------------------------------------------
/roles/docker/templates/daemon.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "icc": false,
3 | "storage-driver": "overlay2"
4 | }
--------------------------------------------------------------------------------
/roles/newrelic-sysmond/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_newrelic_sysmond_source_conf_file: nrsysmond.cfg
2 |
--------------------------------------------------------------------------------
/swap.yml:
--------------------------------------------------------------------------------
1 | - name: Common setup with file swap
2 | hosts: all
3 | roles:
4 | - role: swap
5 |
--------------------------------------------------------------------------------
/user.yml:
--------------------------------------------------------------------------------
1 | - name: Common Users setup
2 | hosts: all
3 | roles:
4 | - role: generic-users
5 |
--------------------------------------------------------------------------------
/roles/(legacy)/lxml/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # Either a path to a virtualenv, or "no"
2 | lxml_virtualenv: no
3 |
--------------------------------------------------------------------------------
/roles/(legacy)/tokumx/files/tokumx.list:
--------------------------------------------------------------------------------
1 | deb [arch=amd64] http://s3.amazonaws.com/tokumx-debs trusty main
2 |
--------------------------------------------------------------------------------
/roles/Ansibles.build-essential/test.yml:
--------------------------------------------------------------------------------
1 | - hosts: all
2 | tasks:
3 | - include: 'tasks/main.yml'
4 |
--------------------------------------------------------------------------------
/roles/Ansibles.timezone/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # file: timezone/defaults/main.yml
2 |
3 | timezone_zone: ETC
4 |
--------------------------------------------------------------------------------
/mongo.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: Set up mongodb
3 | hosts:
4 | - osf-staging
5 | roles:
6 | - role: mongo
7 |
--------------------------------------------------------------------------------
/roles/(legacy)/newrelic/defaults/main.yml:
--------------------------------------------------------------------------------
1 | newrelic_license_key: '...'
2 | newrelic_conf_dir: '/etc/newrelic'
3 |
--------------------------------------------------------------------------------
/roles/(legacy)/tokumx/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart tokumx
2 | service: name=tokumx state=restarted
3 |
--------------------------------------------------------------------------------
/docker.yml:
--------------------------------------------------------------------------------
1 | - name: Docker setup
2 | hosts: all
3 | roles:
4 | - role: csf-docker
5 | - role: docker
6 |
--------------------------------------------------------------------------------
/overcommit-memory.yml:
--------------------------------------------------------------------------------
1 | - name: Overcommit Memory
2 | hosts: all
3 | roles:
4 | - role: overcommit-memory
5 |
--------------------------------------------------------------------------------
/roles/(legacy)/docker-haproxy/meta/main.yml:
--------------------------------------------------------------------------------
1 | dependencies:
2 | - { role: docker-rsyslog, when: docker_rsyslog }
3 |
--------------------------------------------------------------------------------
/jenkins-deployment.yml:
--------------------------------------------------------------------------------
1 | - name: Adds deployment scripts
2 | hosts: all
3 | roles:
4 | - role: jenkins-deployment
5 |
--------------------------------------------------------------------------------
/rackspace-multicast.yml:
--------------------------------------------------------------------------------
1 | - name: Rackspace Multicast
2 | hosts: all
3 | roles:
4 | - role: rackspace-multicast
5 |
--------------------------------------------------------------------------------
/roles/rackspace-cloudmonitor/files/README.txt:
--------------------------------------------------------------------------------
1 | Source: https://github.com/racker/rackspace-monitoring-agent-plugins-contrib
2 |
--------------------------------------------------------------------------------
/roles/(legacy)/gitlab/meta/main.yml:
--------------------------------------------------------------------------------
1 | dependencies:
2 | - { role: python }
3 | - { role: postgresql }
4 | # - { role: nginx }
5 |
--------------------------------------------------------------------------------
/roles/chkrootkit/defaults/main.yml:
--------------------------------------------------------------------------------
1 | chkrootkit_rundaily: "true"
2 | chkrootkit_diffmode: "false"
3 | chkrootkit_rundailyopts: "-q"
4 |
--------------------------------------------------------------------------------
/transparent-huge-pages.yml:
--------------------------------------------------------------------------------
1 | - name: Transparent Huge Pages
2 | hosts: all
3 | roles:
4 | - role: transparent-huge-pages
5 |
--------------------------------------------------------------------------------
/roles/htop/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure htop system dependency
2 | become: yes
3 | apt:
4 | state: present
5 | pkg: htop
6 |
--------------------------------------------------------------------------------
/roles/ntp/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure ntp system dependency
2 | become: yes
3 | apt:
4 | state: present
5 | pkg: ntp
6 |
--------------------------------------------------------------------------------
/roles/tmux/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure tmux system dependency
2 | become: yes
3 | apt:
4 | state: present
5 | pkg: tmux
6 |
--------------------------------------------------------------------------------
/tokumx.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: Set up tokumx
3 | hosts:
4 | - osf-staging
5 | - osf-production
6 | roles:
7 | - role: tokumx
8 |
--------------------------------------------------------------------------------
/roles/generic-users/test.yml:
--------------------------------------------------------------------------------
1 | - hosts: all
2 | vars_files:
3 | - 'defaults/main.yml'
4 | tasks:
5 | - include: 'tasks/main.yml'
6 |
--------------------------------------------------------------------------------
/roles/ssh/handlers/main.yml:
--------------------------------------------------------------------------------
1 | # file: roles/ssh/handlers/main.yml
2 |
3 | - name: restart ssh
4 | action: service name=ssh state=restarted
5 |
--------------------------------------------------------------------------------
/roles/Ansibles.timezone/test.yml:
--------------------------------------------------------------------------------
1 | - hosts: all
2 | vars_files:
3 | - 'defaults/main.yml'
4 | tasks:
5 | - include: 'tasks/main.yml'
6 |
--------------------------------------------------------------------------------
/roles/(legacy)/sentry/templates/sentry.conf.j2:
--------------------------------------------------------------------------------
1 | [program:sentry]
2 | command={{ virtualenv_path }}sentry/bin/sentry --config=/etc/sentry/conf.py start
3 |
--------------------------------------------------------------------------------
/roles/csf/templates/ui.allow.j2:
--------------------------------------------------------------------------------
1 | {% if csf_allowed_ips %}
2 |
3 | {% for ip in csf_allowed_ips %}
4 | {{ ip }}
5 | {% endfor %}
6 |
7 | {% endif %}
8 |
--------------------------------------------------------------------------------
/roles/logrotate/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure logrotate system dependency
2 | become: yes
3 | apt:
4 | state: present
5 | pkg: logrotate
6 |
--------------------------------------------------------------------------------
/roles/(legacy)/docker-haproxy/templates/49-haproxy.conf.j2:
--------------------------------------------------------------------------------
1 | local0.=info -/log/haproxy_1-access.log
2 | local0.notice -/log/haproxy_1-status.log
3 | local0.* ~
--------------------------------------------------------------------------------
/roles/(legacy)/uwsgi/defaults/main.yml:
--------------------------------------------------------------------------------
1 |
2 | # How to install uwsgi, either 'apt' or 'pip'
3 | uwsgi_install_mode: 'apt'
4 | uwsgi_version: 2.0.5.1
5 |
6 |
--------------------------------------------------------------------------------
/roles/swap/defaults/main.yml:
--------------------------------------------------------------------------------
1 | swap_vm_swappiness: 1
2 | swap_vm_vfs_cache_pressure: 50
3 | swap_swapfile_size: "{{ (ansible_memtotal_mb * 1.5) | int }}m"
4 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/tasks/modules/ipv6_module.yml:
--------------------------------------------------------------------------------
1 | # file: roles/nginx/tasks/modules/ipv6_module.yml
2 | # configure flag: --with-ipv6
3 |
4 | # no contents
5 |
--------------------------------------------------------------------------------
/roles/Ansibles.monit/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .AppleDouble
3 | .LSOverride
4 | Icon
5 | ._*
6 | .Spotlight-V100
7 | .Trashes
8 | .vagrant
9 | test
10 |
--------------------------------------------------------------------------------
/roles/Ansibles.timezone/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .AppleDouble
3 | .LSOverride
4 | Icon
5 | ._*
6 | .Spotlight-V100
7 | .Trashes
8 | .vagrant
9 | test
10 |
--------------------------------------------------------------------------------
/roles/generic-users/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .AppleDouble
3 | .LSOverride
4 | Icon
5 | ._*
6 | .Spotlight-V100
7 | .Trashes
8 | .vagrant
9 | test
10 |
--------------------------------------------------------------------------------
/roles/security_checks/tasks/restart_csf.yml:
--------------------------------------------------------------------------------
1 | - name: test csf | csf restart
2 | command: csf -r
3 | become: yes
4 | changed_when: false
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/security.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: Common Ubuntu security
3 | hosts: all
4 | roles:
5 | - role: csf
6 | - role: chkrootkit
7 | - role: security_checks
8 |
--------------------------------------------------------------------------------
/roles/(legacy)/supervisor/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # file: roles/supervisor/tasks/main.yml
2 |
3 | - name: Install supervisord
4 | apt: pkg=supervisor
5 | become: yes
6 |
--------------------------------------------------------------------------------
/roles/Ansibles.monit/handlers/main.yml:
--------------------------------------------------------------------------------
1 | # file: monit/handlers/main.yml
2 |
3 | - name: restart monit
4 | service:
5 | name: monit
6 | state: restarted
7 |
--------------------------------------------------------------------------------
/roles/Ansibles.build-essential/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .AppleDouble
3 | .LSOverride
4 | Icon
5 | ._*
6 | .Spotlight-V100
7 | .Trashes
8 | .vagrant
9 | test
10 |
--------------------------------------------------------------------------------
/roles/docker-waterbutler/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - include: celery.yml
2 | when: docker_waterbutler_celery
3 |
4 | - include: server.yml
5 | when: docker_waterbutler_server
6 |
--------------------------------------------------------------------------------
/roles/editors/defaults/main.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | editors_emacs_base: "emacs" # Your favorite emacs distribution
4 | editors_vim_base: "vim-nox" # Your favorite vim flavor
5 |
--------------------------------------------------------------------------------
/roles/network_interfaces/defaults/main.yml:
--------------------------------------------------------------------------------
1 |
2 | network_interfaces:
3 | - name: eth1
4 | # address: IP ADDRESS
5 | # netmask: SUBNET MASK
6 |
7 | network_config:
8 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/tasks/modules/http_perl_module.yml:
--------------------------------------------------------------------------------
1 | # file: nginx/tasks/modules/http_perl_module.yml
2 | # configure flag: --with-http_perl_module
3 |
4 | # no contents
5 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/tasks/modules/http_spdy_module.yml:
--------------------------------------------------------------------------------
1 | # file: nginx/tasks/modules/http_spdy_module.yml
2 | # configure flag: --with-http_spdy_module
3 |
4 | # no contents
5 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/tasks/modules/http_ssl_module.yml:
--------------------------------------------------------------------------------
1 | # file: nginx/tasks/modules/http_ssl_module.yml
2 | # configure flag: --with-http_ssl_module
3 |
4 | # no contents
5 |
--------------------------------------------------------------------------------
/roles/chkrootkit/templates/etc_chkrootkit.conf.j2:
--------------------------------------------------------------------------------
1 | RUN_DAILY="{{chkrootkit_rundaily}}"
2 | RUN_DAILY_OPTS="{{chkrootkit_rundailyopts}}"
3 | DIFF_MODE="{{chkrootkit_diffmode}}"
4 |
--------------------------------------------------------------------------------
/roles/(legacy)/gitlab/templates/gitconfig.j2:
--------------------------------------------------------------------------------
1 |
2 | [user]
3 | name = {{gitlab_git_name}}
4 | email = {{gitlab_git_email}}
5 | [core]
6 | autocrlf = input
7 |
--------------------------------------------------------------------------------
/roles/(legacy)/sentry/meta/main.yml:
--------------------------------------------------------------------------------
1 | # file: roles/sentry/meta/main.yml
2 |
3 | dependencies:
4 | - { role: python }
5 | - { role: postgresql }
6 | - { role: supervisor }
7 |
--------------------------------------------------------------------------------
/roles/ansible/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ansible | Install apt and pycurl
2 | apt: pkg={{ item }}
3 | with_items:
4 | - python-apt
5 | - python-pycurl
6 | become: yes
7 |
--------------------------------------------------------------------------------
/roles/(legacy)/supervisor/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: reload supervisor
2 | command: supervisorctl reload
3 |
4 | - name: reload supervisor config
5 | command: supervisorctl reread
6 |
--------------------------------------------------------------------------------
/roles/docker-storage/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # servers
2 |
3 | docker_storage_name: storage_1
4 | docker_storage_copy_ops: []
5 | docker_storage_file_ops: []
6 | docker_storage_volumes: []
7 |
--------------------------------------------------------------------------------
/roles/(legacy)/sentry/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: start sentry
2 | supervisorctl: name=sentry state=started
3 |
4 | - name: restart sentry
5 | supervisorctl: name=sentry state=restarted
6 |
--------------------------------------------------------------------------------
/rackspace.yml:
--------------------------------------------------------------------------------
1 | - name: Set up rackspace services
2 | hosts: all
3 | roles:
4 | - { role: rackspace-cloudbackup, tags: backup }
5 | - { role: rackspace-cloudmonitor, tags: monitor }
6 |
--------------------------------------------------------------------------------
/roles/(legacy)/osf/README.md:
--------------------------------------------------------------------------------
1 | # OSF Ansible Role
2 |
3 | ## TODO:
4 |
5 | - Install R (for mfr)
6 | - mongodb
7 | - nginx
8 | - supervisor
9 | - celery
10 | - local.py template
11 |
12 |
--------------------------------------------------------------------------------
/roles/(legacy)/r-lang/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Install R
4 | apt:
5 | pkg: "{{item}}"
6 | state: present
7 | with_items:
8 | - r-base
9 | - r-base-dev
10 |
--------------------------------------------------------------------------------
/roles/Ansibles.monit/test.yml:
--------------------------------------------------------------------------------
1 | - hosts: all
2 | vars_files:
3 | - 'defaults/main.yml'
4 | tasks:
5 | - include: 'tasks/main.yml'
6 | handlers:
7 | - include: 'handlers/main.yml'
8 |
--------------------------------------------------------------------------------
/setup_benchmarking.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: Set up benchmarking server
3 | hosts: osf-benchmarking
4 | roles:
5 | - hostname
6 | - generic-users
7 | - apt
8 | - python
9 |
10 |
--------------------------------------------------------------------------------
/docker-share-reg.yml:
--------------------------------------------------------------------------------
1 | - name: Set up share
2 | hosts: all
3 | pre_tasks:
4 | - fail: msg="One or more tags must be specified to run this playbook"
5 | roles:
6 | - role: docker-share-reg
7 |
--------------------------------------------------------------------------------
/roles/(legacy)/gitlab/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart gitlab
2 | service: name=gitlab state=restarted
3 | become: yes
4 |
5 | - name: restart nginx
6 | service: name=nginx state=restarted
7 |
--------------------------------------------------------------------------------
/roles/(legacy)/elasticsearch/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Elasticsearch Ansible Handlers
3 |
4 | # Restart Elasticsearch
5 | - name: Restarting Elasticsearch
6 | service: name=elasticsearch state=restarted
7 |
--------------------------------------------------------------------------------
/roles/(legacy)/postgresql/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # These variables are not yet set during configuration, but are here to make
2 | # it possible to plan ahead when writing other roles.
3 |
4 | postgresql_port: 5432
5 |
--------------------------------------------------------------------------------
/roles/rkhunter/tasks/cron.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: Add cronjob for rkhunter
3 | copy:
4 | src: etc_cron.sh
5 | dest: "/etc/cron.{{ rkhunter_cron_frequency }}/rkhunter"
6 | mode: 0755
7 | become: yes
8 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/handlers/main.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: restart nginx
3 | service: name=nginx state=restarted
4 | become: yes
5 |
6 | - name: reload nginx
7 | service: name=nginx state=reloaded
8 | become: yes
9 |
--------------------------------------------------------------------------------
/roles/generic-users/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # file: generic-users/defaults/main.yml
2 |
3 | genericusers_groups: []
4 | genericusers_users: []
5 |
6 | genericusers_groups_removed: []
7 | genericusers_users_removed: []
8 |
--------------------------------------------------------------------------------
/Brewfile:
--------------------------------------------------------------------------------
1 | update
2 |
3 | # install virtualbox and vagrant with homebrew-cask
4 | install caskroom/cask/brew-cask
5 | cask install virtualbox --force
6 | cask install vagrant
7 |
8 | cleanup
9 | cask cleanup
10 |
--------------------------------------------------------------------------------
/docker-elasticsearch.yml:
--------------------------------------------------------------------------------
1 | - name: Set up elasticsearch
2 | hosts: all
3 | pre_tasks:
4 | - fail: msg="One or more tags must be specified to run this playbook"
5 | roles:
6 | - role: docker-elasticsearch
7 |
--------------------------------------------------------------------------------
/newrelic-sysmond.yml:
--------------------------------------------------------------------------------
1 | - name: New Relic Sysmon Daemon
2 | hosts: all
3 | vars:
4 | docker_newrelic_sysmond_source_conf_file: "{{ root_source_conf_dir }}newrelic/nrsysmond.cfg"
5 | roles:
6 | - role: newrelic-sysmond
7 |
--------------------------------------------------------------------------------
/roles/apt/tasks/main.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: Update APT package cache
3 | apt: update_cache=yes
4 | become: yes
5 | when: apt_update_cache|bool
6 |
7 | - name: Run apt-get upgrade
8 | apt: upgrade=dist
9 | become: yes
10 |
--------------------------------------------------------------------------------
/roles/fwknop/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # Access Configuration
2 |
3 | fwknop_pcap_intf: eth0
4 | fwknop_access_stanzas:
5 | - source: ANY
6 | open_ports: tcp/22
7 | key_base64: __change_me__
8 | hmac_key_base64: __change_me__
9 |
--------------------------------------------------------------------------------
/roles/docker-memcached/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_memcached_name: memcached_1
2 | docker_memcached_command: "memcached -vv"
3 | docker_memcached_image: memcached:1
4 | docker_memcached_expose:
5 | - 11211
6 | docker_memcached_ports: []
7 |
--------------------------------------------------------------------------------
/roles/overcommit-memory/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Set vm.overcommit_memory in /etc/sysctl.conf
2 | sysctl:
3 | name: vm.overcommit_memory
4 | value: "{{ overcommit_memory }}"
5 | state: present
6 | tags:
7 | - install
8 |
9 |
--------------------------------------------------------------------------------
/roles/skel/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # .bashrc changes
2 | # - modified \h to \H to display FQDN
3 |
4 | - name: Set /etc/skel/.bashrc
5 | become: yes
6 | copy:
7 | src: .bashrc
8 | dest: /etc/skel/.bashrc
9 | mode: 0644
10 |
--------------------------------------------------------------------------------
/roles/security_checks/tasks/stop_testing.yml:
--------------------------------------------------------------------------------
1 | - name: test csf | Disable testing mode
2 | lineinfile: dest=/etc/csf/csf.conf regexp="^TESTING\s?=" line='TESTING="0"' state=present
3 | changed_when: false
4 |
5 | - include: restart_csf.yml
6 |
--------------------------------------------------------------------------------
/roles/docker-fluentd/files/etc/fluent.conf:
--------------------------------------------------------------------------------
1 |
2 | type tail
3 | path /var/log/auth.log
4 | pos_file /log/host.pos
5 | tag host.auth.log
6 | format syslog
7 |
8 |
9 |
10 | type null
11 |
12 |
--------------------------------------------------------------------------------
/roles/rkhunter/files/etc_cron.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # --cronjob suppresses colored output and interactive key presses
4 | # --update keeps definitions up to date
5 | # --quiet suppresses all output
6 | /usr/bin/rkhunter --cronjob --update --quiet
7 |
--------------------------------------------------------------------------------
/roles/Ansibles.build-essential/vars/Suse.yml:
--------------------------------------------------------------------------------
1 | # file: build-essential/vars/Suse.yml
2 |
3 | buildessential_packages:
4 | - autoconf
5 | - bison
6 | - flex
7 | - gcc
8 | - gcc-c++
9 | - kernel-default-devel
10 | - make
11 | - m4
12 |
--------------------------------------------------------------------------------
/roles/Ansibles.build-essential/vars/Debian.yml:
--------------------------------------------------------------------------------
1 | # file: build-essential/vars/Debian.yml
2 |
3 | buildessential_packages:
4 | - autoconf
5 | - binutils-doc
6 | - bison
7 | - build-essential
8 | - flex
9 | - gettext
10 | - ncurses-dev
11 |
--------------------------------------------------------------------------------
/roles/(legacy)/rackspace-multicast/defaults/main.yml:
--------------------------------------------------------------------------------
1 | rackspace_multicast_interface_name: eth2
2 |
3 | # Remmber to enable firewall inbound/outbound rules
4 | #sudo iptables -I INPUT -d 224.0.0.0/4 -j ACCEPT
5 | #sudo iptables -I OUTPUT -d 224.0.0.0/4 -j ACCEPT
6 |
--------------------------------------------------------------------------------
/roles/docker/templates/logrotate.j2:
--------------------------------------------------------------------------------
1 | "/var/lib/docker/containers/*/*-json.log" {
2 | daily
3 | rotate 5
4 | compress
5 | copytruncate
6 | notifempty
7 | missingok
8 | size 10M
9 | create 600 root
10 | su root
11 | }
12 |
--------------------------------------------------------------------------------
/roles/editors/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Make sure vim-tiny is uninstalled
2 | apt:
3 | pkg: "vim-tiny"
4 | state: absent
5 |
6 |
7 | - name: Make sure your vim is installed
8 | apt:
9 | pkg: "{{editors_vim_base}}"
10 | state: present
11 |
--------------------------------------------------------------------------------
/roles/Ansibles.monit/templates/etc_monit_conf.d_cron.j2:
--------------------------------------------------------------------------------
1 | check process cron with pidfile /var/run/crond.pid
2 | group system
3 | start program = "/etc/init.d/cron start"
4 | stop program = "/etc/init.d/cron stop"
5 | if 5 restarts within 5 cycles then timeout
6 |
--------------------------------------------------------------------------------
/provision.yml:
--------------------------------------------------------------------------------
1 | - include: site.yml
2 |
3 | - name: Set up rkhunter
4 | hosts: all
5 | roles:
6 | - role: rkhunter
7 | vars:
8 | rkhunter_propupd: yes
9 | rkhunter_update: yes
10 | rkhunter_update_apt: yes
11 | rkhunter_run_checks: yes
12 |
--------------------------------------------------------------------------------
/roles/docker-haproxy/files/conf/errors/400.http:
--------------------------------------------------------------------------------
1 | HTTP/1.0 400 Bad request
2 | Cache-Control: no-cache
3 | Connection: close
4 | Content-Type: text/html
5 |
6 |
400 Bad request
7 | Your browser sent an invalid request.
8 |
9 |
10 |
--------------------------------------------------------------------------------
/roles/docker-haproxy/files/conf/errors/403.http:
--------------------------------------------------------------------------------
1 | HTTP/1.0 403 Forbidden
2 | Cache-Control: no-cache
3 | Connection: close
4 | Content-Type: text/html
5 |
6 | 403 Forbidden
7 | Request forbidden by administrative rules.
8 |
9 |
10 |
--------------------------------------------------------------------------------
/roles/docker-haproxy/files/conf/errors/500.http:
--------------------------------------------------------------------------------
1 | HTTP/1.0 500 Server Error
2 | Cache-Control: no-cache
3 | Connection: close
4 | Content-Type: text/html
5 |
6 | 500 Server Error
7 | An internal server error occured.
8 |
9 |
10 |
--------------------------------------------------------------------------------
/roles/Ansibles.build-essential/vars/Fedora.yml:
--------------------------------------------------------------------------------
1 | # file: build-essential/vars/Fedora.yml
2 |
3 | buildessential_packages:
4 | - autoconf
5 | - gcc
6 | - bison
7 | - flex
8 | - gcc-c++
9 | - gettext
10 | - kernel-devel
11 | - make
12 | - m4
13 | - ncurses-devel
14 |
--------------------------------------------------------------------------------
/roles/docker-haproxy/files/conf/errors/504.http:
--------------------------------------------------------------------------------
1 | HTTP/1.0 504 Gateway Time-out
2 | Cache-Control: no-cache
3 | Connection: close
4 | Content-Type: text/html
5 |
6 | 504 Gateway Time-out
7 | The server didn't respond in time.
8 |
9 |
10 |
--------------------------------------------------------------------------------
/roles/logentries/defaults/main.yml:
--------------------------------------------------------------------------------
1 | logentries_account_key:
2 | logentries_source_filters_file: filters.py
3 | logentries_metrics_token:
4 | logentries_files_to_monitor: []
5 | # - filename: "/path/to/file.txt"
6 | # name: "log name"
7 | # type: "log type, e.g. python"
8 |
--------------------------------------------------------------------------------
/roles/docker-haproxy/files/conf/errors/502.http:
--------------------------------------------------------------------------------
1 | HTTP/1.0 502 Bad Gateway
2 | Cache-Control: no-cache
3 | Connection: close
4 | Content-Type: text/html
5 |
6 | 502 Bad Gateway
7 | The server returned an invalid or incomplete response.
8 |
9 |
10 |
--------------------------------------------------------------------------------
/roles/docker-scrapi/files/fluentd/fluent.conf:
--------------------------------------------------------------------------------
1 |
2 | type forward
3 |
4 |
5 |
6 | type elasticsearch
7 | host "elasticsearch"
8 | port "9200"
9 | logstash_format true
10 | include_tag_key true
11 | tag_key @tag
12 |
13 |
--------------------------------------------------------------------------------
/roles/docker-spa/files/nginx/conf.d/badges.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 4200;
3 | server_name staging-badges.cos.io;
4 | keepalive_timeout 15;
5 |
6 | root /badges;
7 | client_max_body_size 10M;
8 |
9 | location / {
10 | alias /badges/;
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/roles/(legacy)/docker-haproxy/templates/logrotate.j2:
--------------------------------------------------------------------------------
1 | "{{ docker_rsyslog_log_dir }}/*.log" {
2 | daily
3 | rotate 60
4 | compress
5 | delaycompress
6 | copytruncate
7 | notifempty
8 | missingok
9 | size 1k
10 | create 660 root
11 | su root
12 | }
13 |
--------------------------------------------------------------------------------
/roles/(legacy)/docker-rsyslog/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_rsyslog_name: rsyslog_1
2 | docker_rsyslog_conf_dir: /opt/rsyslog/conf
3 | docker_rsyslog_log_dir: /var/log/rsyslog
4 | docker_rsyslog_log_rotate_file: rsyslog
5 | docker_rsyslog_log_rotate_source_file: logrotate.j2
6 | docker_rsyslog_env: {}
7 |
--------------------------------------------------------------------------------
/roles/(legacy)/docker-rsyslog/templates/logrotate.j2:
--------------------------------------------------------------------------------
1 | "{{ docker_rsyslog_log_dir }}/*.log" {
2 | daily
3 | rotate 60
4 | compress
5 | delaycompress
6 | copytruncate
7 | notifempty
8 | missingok
9 | size 1k
10 | create 660 root
11 | su root
12 | }
13 |
--------------------------------------------------------------------------------
/roles/docker-haproxy/files/conf/errors/408.http:
--------------------------------------------------------------------------------
1 | HTTP/1.0 408 Request Time-out
2 | Cache-Control: no-cache
3 | Connection: close
4 | Content-Type: text/html
5 |
6 | 408 Request Time-out
7 | Your browser didn't send a complete request in time.
8 |
9 |
10 |
--------------------------------------------------------------------------------
/roles/docker-haproxy/files/conf/errors/503.http:
--------------------------------------------------------------------------------
1 | HTTP/1.0 503 Service Unavailable
2 | Cache-Control: no-cache
3 | Connection: close
4 | Content-Type: text/html
5 |
6 | 503 Service Unavailable
7 | No server is available to handle this request.
8 |
9 |
10 |
--------------------------------------------------------------------------------
/roles/jenkins-deployment/defaults/main.yml:
--------------------------------------------------------------------------------
1 | deploy_script_owner: jenkins
2 | deploy_script_source_file: "{{ root_source_conf_dir }}deploy/{{ docker_env }}/deploy.sh"
3 | deploy_script_file_dir: "/home/{{ deploy_script_owner }}/deploy/"
4 | deploy_script_file: "{{ deploy_script_file_dir }}deploy.sh"
5 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/templates/default.site.j2:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name {{inventory_hostname}};
4 |
5 | access_log {{nginx_log_dir}}/default.access.log;
6 |
7 | location / {
8 | root {{nginx_default_root}};
9 | index index.html index.htm;
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/roles/docker-spa/files/nginx/conf.d/isp.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 4200;
3 | server_name staging-isp.osf.io;
4 | keepalive_timeout 15;
5 |
6 | root /isp/dist;
7 | client_max_body_size 10M;
8 |
9 | location / {
10 | try_files $uri /index.html;
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/roles/security_checks/tasks/start_testing.yml:
--------------------------------------------------------------------------------
1 | - name: test csf | Enable testing mode
2 | lineinfile: dest=/etc/csf/csf.conf regexp="^TESTING\s?=" line='TESTING="1"' state=present
3 | tags:
4 | - csf
5 | - testing
6 | changed_when: false
7 |
8 |
9 | - include: restart_csf.yml
10 |
--------------------------------------------------------------------------------
/roles/docker-cas/files/shibboleth-sp/apache2/ports.conf:
--------------------------------------------------------------------------------
1 | # If you just change the port or add more ports here, you will likely also
2 | # have to change the VirtualHost statement in
3 | # /etc/apache2/sites-enabled/000-default.conf
4 |
5 | Listen 8080
6 |
7 | # vim: syntax=apache ts=4 sw=4 sts=4 sr noet
8 |
--------------------------------------------------------------------------------
/roles/csf/tasks/deps.yml:
--------------------------------------------------------------------------------
1 | - name: Install sendmail
2 | apt: pkg={{ item }}
3 | become: yes
4 | register: install_sendmail
5 | with_items:
6 | - sendmail-bin
7 | - sendmail
8 |
9 | - name: Install libwww-perl
10 | apt: pkg=libwww-perl
11 | become: yes
12 | register: install_libwww
13 |
--------------------------------------------------------------------------------
/roles/docker-spa/files/nginx/conf.d/lookit.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 4200;
3 | server_name staging-lookit.osf.io;
4 | keepalive_timeout 15;
5 |
6 | root /lookit/dist;
7 | client_max_body_size 10M;
8 |
9 | location / {
10 | try_files $uri /index.html;
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/roles/python/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install source files for building Python packages
2 | apt: pkg=python-dev
3 | become: yes
4 |
5 | - name: Ensure pip is installed
6 | apt: pkg=python-pip
7 | become: yes
8 |
9 | - name: Ensure virtualenv is installed
10 | pip: name=virtualenv state=present
11 |
--------------------------------------------------------------------------------
/roles/Ansibles.timezone/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # file: timezone/tasks/main.yml
2 |
3 | - name: ntp | timezone | Make sure tzdata is installed
4 | apt:
5 | pkg: tzdata
6 | state: present
7 |
8 | - name: ntp | timezone | Update the hardware clock
9 | command: dpkg-reconfigure -f noninteractive tzdata
10 |
--------------------------------------------------------------------------------
/roles/(legacy)/osf/meta/main.yml:
--------------------------------------------------------------------------------
1 |
2 | dependencies:
3 | - {role: mongo}
4 | - {role: python}
5 | - {role: lxml}
6 | - {role: bower}
7 | - {role: nginx}
8 | - {role: uwsgi}
9 | - {role: supervisor}
10 | - {role: github}
11 | - {role: r-lang}
12 | - {role: env-vars, envvars: "{{osf_env_vars}}"}
13 |
--------------------------------------------------------------------------------
/roles/(legacy)/osf/templates/nginx.conf.j2:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name {{inventory_hostname}};
4 |
5 | access_log {{nginx_log_dir}}/default.access.log;
6 |
7 | location / {
8 | include uwsgi_params;
9 | uwsgi_pass unix://{{ osf_uwsgi_socket }};
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/roles/(legacy)/osf/templates/uwsgi.conf.j2:
--------------------------------------------------------------------------------
1 | [program:uwsgi]
2 | directory={{osf_repo_dir}}
3 | command=uwsgi uwsgi.ini
4 | user=www-data
5 | autostart=true
6 | autorestart=true
7 | stdout_logfile=/var/log/uwsgi/uwsgi.log
8 | stderr_logfile=/var/log/uwsgi/uwsgi.log
9 | environment=PATH="{{ osf_virtualenv }}bin"
10 |
--------------------------------------------------------------------------------
/roles/(legacy)/postgresql/tasks/main.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | - name: Install postgresql
4 | apt: pkg={{ item }}
5 | become: yes
6 | with_items:
7 | - postgresql-9.3
8 | - postgresql-server-dev-9.3
9 | # NOTE: psycopg2 is required for the postgresql_db module
10 | - libpq-dev
11 | - python-psycopg2
12 |
--------------------------------------------------------------------------------
/roles/Ansibles.monit/templates/etc_monit_conf.d_ntp.j2:
--------------------------------------------------------------------------------
1 | check process ntpd with pidfile /var/run/ntpd.pid
2 | group system
3 | start program = "/etc/init.d/ntp start"
4 | stop program = "/etc/init.d/ntp stop"
5 | if failed host 127.0.0.1 port 123 type udp then alert
6 | if 5 restarts within 5 cycles then timeout
7 |
--------------------------------------------------------------------------------
/roles/Ansibles.monit/templates/etc_monit_conf.d_sshd.j2:
--------------------------------------------------------------------------------
1 | check process sshd with pidfile /var/run/sshd.pid
2 | group system
3 | start program "/etc/init.d/ssh start"
4 | stop program "/etc/init.d/ssh stop"
5 | if failed host 127.0.0.1 port 22 protocol ssh then restart
6 | if 5 restarts within 5 cycles then timeout
7 |
--------------------------------------------------------------------------------
/roles/docker-osf/files/conf/api/local.py:
--------------------------------------------------------------------------------
1 | from . import defaults
2 |
3 | API_BASE = 'v2/'
4 | #SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # necessary when offloading ssl
5 | STATIC_URL = '/{}static/'.format(API_BASE)
6 | SWAGGER_SETTINGS = dict(defaults.SWAGGER_SETTINGS, base_path='test-api.osf.io/v2/docs')
7 |
--------------------------------------------------------------------------------
/roles/docker-spa/files/nginx/conf.d/experimenter.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 4200;
3 | server_name staging-experimenter.osf.io;
4 | keepalive_timeout 15;
5 |
6 | root /experimenter/dist;
7 | client_max_body_size 10M;
8 |
9 | location / {
10 | try_files $uri /index.html;
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/roles/(legacy)/env-vars/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Ensure /etc/environment exists
4 | file:
5 | path: /etc/environment
6 | state: touch
7 |
8 | - name: Set global environment variables
9 | lineinfile:
10 | line: "{{item.var}}={{item.val}}"
11 | dest: /etc/environment
12 | with_items: envvars
13 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/tasks/modules/google_perftools_module.yml:
--------------------------------------------------------------------------------
1 | # file: roles/nginx/tasks/modules/google_perftools_module.yml
2 | # configure flag: --with-google_perftools_module
3 |
4 | - name: Modules | Make sure the libgoogle-perftools-dev package is installed
5 | apt:
6 | pkg: libgoogle-perftools-dev
7 | state: present
8 |
--------------------------------------------------------------------------------
/roles/docker-spa/files/nginx/conf.d/cos.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 4200;
3 | server_name staging-legacy.cos.io;
4 | keepalive_timeout 15;
5 | port_in_redirect off;
6 |
7 | root /cos/frozen/;
8 | client_max_body_size 10M;
9 |
10 | location / {
11 | alias /cos/frozen/;
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/roles/(legacy)/uwsgi/files/uwsgi.conf:
--------------------------------------------------------------------------------
1 | description "uWSGI"
2 | start on runlevel [2345]
3 | stop on runlevel [06]
4 | respawn
5 |
6 | env UWSGI=/usr/local/bin/uwsgi
7 | env LOGTO=/var/log/uwsgi/emperor.log
8 |
9 | exec $UWSGI --master --emperor /etc/uwsgi/apps-enabled --die-on-term --uid www-data --gid www-data --logto $LOGTO
10 |
--------------------------------------------------------------------------------
/roles/docker-openvpn/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # openvpn
2 |
3 | docker_openvpn_name: openvpn_1
4 | docker_openvpn_image: centerforopenscience/openvpn
5 | docker_openvpn_net: bridge
6 | docker_openvpn_hostname: "{{ hostname_name }}"
7 | docker_openvpn_privileged: no
8 | docker_openvpn_volumes: []
9 | docker_openvpn_volumes_from: []
10 |
--------------------------------------------------------------------------------
/roles/(legacy)/docker-logentries/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_logentries_name: logentries_1
2 | docker_logentires_logs_token: "..."
3 | docker_logentires_stats_token: "..."
4 | docker_logentries_command: "-l {{ docker_logsentires_logs_token }} -k {{ docker_logentires_stats_token }} -j -a host={{ hostname_name }}"
5 | docker_logentries_env: {}
6 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/tasks/modules/_authorized_ips.yml:
--------------------------------------------------------------------------------
1 | # file: nginx/tasks/modules/authorized_ips.yml
2 |
3 | - name: Modules | Updated the authorized_ip file
4 | template:
5 | src: ../../templates/modules/authorized_ips.j2
6 | dest: "{{nginx_dir}}/authorized_ips"
7 | owner: root
8 | group: root
9 | mode: 0644
10 |
--------------------------------------------------------------------------------
/roles/docker-dor/files/nginx/conf.d/dor.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80 default_server;
3 |
4 | location /static {
5 | alias /code/static;
6 | }
7 |
8 | location / {
9 | # Pass requests to uwsgi application
10 | include uwsgi_params;
11 | uwsgi_pass uwsgi://server:8000;
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/roles/docker-jam/files/nginx/conf.d/jam.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 1212 default_server;
3 |
4 | location /static {
5 | alias /code/static;
6 | }
7 |
8 | location / {
9 | # Pass requests to uwsgi application
10 | include uwsgi_params;
11 | uwsgi_pass unix:///tmp/uwsgi.sock;
12 | }
13 | }
--------------------------------------------------------------------------------
/roles/generic-users/meta/main.yml:
--------------------------------------------------------------------------------
1 | # file: generic-users/meta/main.yml
2 |
3 | galaxy_info:
4 | author: pjan vandaele
5 | company: Ansibles
6 | description: Manage users
7 | min_ansible_version: 1.4
8 | license: MIT
9 | platforms:
10 | - name: Ubuntu
11 | versions:
12 | - all
13 | categories:
14 | - system
15 |
--------------------------------------------------------------------------------
/roles/docker-scrapi/files/nginx/conf.d/scrapi.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80 default_server;
3 |
4 | location /static {
5 | alias /code/static;
6 | }
7 |
8 | location / {
9 | # Pass requests to uwsgi application
10 | include uwsgi_params;
11 | uwsgi_pass unix:///tmp/uwsgi.sock;
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/roles/security_checks/tasks/tests/check_configuration.yml:
--------------------------------------------------------------------------------
1 | # Check csf configuration
2 | # For some reason, csf logs configuration errors to stdout, so fail when the
3 | # word "error" is found in stdout
4 | - name: check csf configuration
5 | command: csf -c
6 | register: csf_conf_check
7 | failed_when: "'error' in csf_conf_check.stdout.lower()"
8 |
--------------------------------------------------------------------------------
/roles/security_checks/tasks/tests/deny_brute_force_ssh.yml:
--------------------------------------------------------------------------------
1 | # CSF checks
2 |
3 | - include: ../start_testing.yml
4 |
5 | - name: test csf | Brute Force SSH is blocked
6 | connection: local
7 | sudo: no
8 | shell: ssh {{ inventory_hostname }} -o PubkeyAuthentication=no
9 | when: check_portscan
10 |
11 | - include: ../stop_testing.yml
12 |
--------------------------------------------------------------------------------
/roles/security_checks/defaults/main.yml:
--------------------------------------------------------------------------------
1 |
2 | check_mailers: yes
3 | check_csf: yes
4 |
5 | check_portscan: yes
6 | # csf should hang connections if a portscan is detected; the port scan denial
7 | # test will wait this number of seconds before declaring a port scan denied
8 | check_portscan_timeout: 3
9 | # Remote hosts to attempt to portscan
10 |
--------------------------------------------------------------------------------
/roles/ssh/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # file: roles/ssh/tasks/main.yml
2 |
3 | - name: SSH | Update the ssh security configuration (/etc/ssh/sshd_config)
4 | template: src=etc_ssh_sshd_config.j2 dest=/etc/ssh/sshd_config owner=root group=root mode=0644
5 | notify:
6 | - restart ssh
7 |
8 | - include: test.yml
9 | when: ssh_test
10 | tags:
11 | - test
12 |
--------------------------------------------------------------------------------
/roles/(legacy)/bower/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Make sure nodejs and npm are installed
2 | apt: pkg={{item}} state=present
3 | with_items:
4 | - nodejs
5 | - npm
6 |
7 | - name: Symlink "nodejs" -> "node"
8 | file: src="/usr/bin/nodejs" dest="/usr/bin/node" state=link
9 | become: yes
10 |
11 | - name: Install bower
12 | npm: name=bower global=yes
13 |
--------------------------------------------------------------------------------
/roles/(legacy)/gitlab/tasks/ruby.yml:
--------------------------------------------------------------------------------
1 | - name: Uninstall Ruby 1.8
2 | apt: pkg="ruby1.8" state=absent
3 |
4 | - name: Check if ruby exists
5 | command: "ruby --version"
6 | register: ruby_version_check
7 |
8 | # - debug: var=ruby_version_check
9 |
10 | - include: install_ruby_from_source.yml
11 | when: "ruby_version_check.stdout.find('2.1.2') == -1"
12 |
--------------------------------------------------------------------------------
/roles/csf-docker/files/csf-docker-wait.conf:
--------------------------------------------------------------------------------
1 | description "CSF Docker Wait"
2 |
3 | start on starting docker
4 | stop on started docker or stopped docker
5 |
6 | instance $JOB
7 | normal exit 2
8 | task
9 |
10 | script
11 | while [ 1 ]
12 | do
13 | iptables -L LOGDROPOUT 1> /dev/null && break
14 | sleep 1
15 | done
16 |
17 | exit 0
18 | end script
19 |
--------------------------------------------------------------------------------
/roles/Ansibles.timezone/meta/main.yml:
--------------------------------------------------------------------------------
1 | # file: timezone/meta/main.yml
2 |
3 | galaxy_info:
4 | author: pjan vandaele
5 | company: Ansibles
6 | description: set/update the timezone
7 | min_ansible_version: 1.2
8 | license: MIT
9 | platforms:
10 | - name: Ubuntu
11 | versions:
12 | - all
13 | categories:
14 | - system
15 |
16 | dependencies: []
17 |
--------------------------------------------------------------------------------
/roles/csf-docker/files/csf-docker-started.conf:
--------------------------------------------------------------------------------
1 | description "CSF Docker Started"
2 |
3 | start on started docker
4 | stop on stopped docker
5 |
6 | instance $JOB
7 | normal exit 2
8 | task
9 |
10 | script
11 | while [ 1 ]
12 | do
13 | iptables -L DOCKER 1> /dev/null && break
14 | sleep 1
15 | done
16 |
17 | /etc/csf/csfdocker.sh
18 | exit 0
19 | end script
20 |
--------------------------------------------------------------------------------
/roles/docker-haproxy/files/conf/errors/README:
--------------------------------------------------------------------------------
1 | These files are default error files that can be customized
2 | if necessary. They are complete HTTP responses, so that
3 | everything is possible, including using redirects or setting
4 | special headers.
5 |
6 | They can be used with the 'errorfile' keyword like this :
7 |
8 | errorfile 503 /etc/haproxy/errors/503.http
9 |
10 |
--------------------------------------------------------------------------------
/roles/ssh/tasks/test.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: test ssh | Password authentication disallowed
3 | connection: local
4 | # Force password login; login should fail
5 | shell: ssh {{ssh_test_user}}@localhost -p {{ssh_test_port}} -o PubkeyAuthentication=no
6 | when: ssh_passwordauthentication == "no"
7 | register: login_attempt
8 | failed_when: login_attempt|success
9 |
10 |
11 |
--------------------------------------------------------------------------------
/roles/chkrootkit/tasks/main.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: Make sure chkrootkit is installed
3 | apt:
4 | pkg: "chkrootkit"
5 | state: present
6 |
7 | - name: Update chkrootkit configuration
8 | template: src=etc_chkrootkit.conf.j2 dest=/etc/chkrootkit.conf owner=root group=root mode=0644
9 |
10 | - name: Run chrkrootkit
11 | shell: chkrootkit
12 |
13 | # TODO: copy chkrootkit.conf
14 |
--------------------------------------------------------------------------------
/roles/(legacy)/lxml/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install lxml dependencies
2 | apt: "pkg={{ item }} state=present"
3 | become: yes
4 | with_items:
5 | - libxml2
6 | - libxml2-dev
7 | - libxslt1-dev
8 | - lib32z1-dev
9 | - libssl-dev
10 | - zlib1g-dev
11 |
12 | - name: install lxml
13 | pip: name=lxml virtualenv={{lxml_virtualenv}}
14 | environment:
15 | CFLAGS: "-O0"
16 |
--------------------------------------------------------------------------------
/roles/(legacy)/osf/templates/uwsgi.ini.j2:
--------------------------------------------------------------------------------
1 | [uwsgi]
2 | chdir = /opt/apps/osf
3 | module = main:app
4 | socket = {{osf_uwsgi_socket}}
5 | virtualenv = {{osf_virtualenv}}
6 | env = OSF_PRODUCTION=1
7 | processes = {{osf_uwsgi_processes}}
8 | threads = {{osf_uwsgi_threads}}
9 | harakiri = {{osf_uwsgi_harakiri}}
10 | cpu-affinity = 1
11 | buffer-size = {{osf_uwsgi_buffersize}}
12 | show-config
13 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/tasks/modules/http_realip_module.yml:
--------------------------------------------------------------------------------
1 | # file: nginx/tasks/modules/http_realip_module.yml
2 | # configure flag: --with-http_realip_module
3 |
4 | - name: Modules | Update the http_realip_module configuration
5 | template:
6 | src: ../../templates/modules/http_realip.conf.j2
7 | dest: "{{nginx_dir}}/conf.d/http_realip.conf"
8 | owner: root
9 | group: root
10 | mode: 0644
11 |
--------------------------------------------------------------------------------
/roles/network_interfaces/tasks/main.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: Update /etc/network/interfaces
3 | template: src=interfaces.j2 dest=/etc/network/interfaces mode=0644
4 |
5 | - name: Restart networking
6 | command: /etc/init.d/networking restart
7 | # for some reason, this command seems to return a 1 exit code even if restarting occurred successfully.
8 | # look into this in the future.
9 | ignore_errors: yes
10 |
--------------------------------------------------------------------------------
/group_vars/pubkeys/bgeiger.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDGiOukAdwTEVkPx2fzGKfFxBJahUJHnGhr/sX0NfEoO8zbBZXm/mx7Jw0uTyYiYkFrVdLTOTM68DeUu/g613nVzmm7PcHwrQ5LSL1T9RFmA/j3PCT7RXLuytz0sa9eP9eC7AT6I0qtVjsWIk4LJ21JhZdBBi5GkmGNhj0/AiWfUgDp4p+O23BGE2Le3Yz/XZDVVBk06aR0wDf1K7KrhJCsQkdHbz2207XZyjhSOsswXCTURYBc2mfWKXko+R4pwzIjJqAawsqmYu9hs9juDq0aU3SQipYEVFxmXvuHGDUTXjBNOk1WmKXzh+yVQ9gOHa7s+bmaiSFxpKUALuIBAwT1 bgeiger@pobox.com
2 |
--------------------------------------------------------------------------------
/group_vars/pubkeys/cseto.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC+DpqR120PSV9hJiB3OVmnphLNKMGbqYGOJzCG5lScWO5OAI4UjgMGfumgkjTKvHo54Sre3f1wqxgy9SUCKVgww1sUnoUNZ/wqhCuYCx6XV+A9wP2ZpfrgMWy+LpJpZtceuyvbElCEO3181q7s2j02U8+cr2j38aR6d1VeFVq8cQKkNjbypOaGXMEDDLbHzLz6469myNoV6/DoQFbPG+Az56DdK09FnaydMXw5G+Rkhz7cGNIIb0fKt+nMY7H4LxrE4P6NxOyhnxx3wxWAh8tcAkPZh252SiYcxXUbrpqOjYLdO7tMyXTTrD1NgsRO4xyqEK8dGB8KWoF1uSBN1093 chrisseto@osf.io
2 |
--------------------------------------------------------------------------------
/group_vars/pubkeys/michael.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCr6BXe1LEo4+Gc0rNg0w5IABZTmCGanN25ElvRTOnmRYYQWac2v5dl59RI3uRLBBzo/KYXCGSb2lXNbka8S9C+KCSBA9V5U4Wlau7DuArL0fwl6xMFkKZor+fC2NGLFIpXX39xe2deMrUkIYnO14DJpHdcopCn2yFodOilpsUqqHFuelryYMIXP66uRd6Mquj4ILiBvRWSgmX2HNiS7nrBXgfomlQq5RLQnoOkB6EgG8A1g1/sSg7prwZKQpoJfo/eyXeMbG7trefGDz0vLEuUwXfgByYxkmgOYA6ttyC+GIymXaPV05uRd1HQ6A/5U7M+HikFSnKmfCwINUt1q9h5 michael@Citidel.local
--------------------------------------------------------------------------------
/group_vars/pubkeys/sloria.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6D05yHt+UeRI6YAAjnU1yHLS02wVOAIAHnGE3U7axrJdDn49ft94/CNqMqxWo9lDS7VoaxtBQfVZioqRdN+PuCB5lc6hJ1M5oRC+8YN1g8javLUygL7U80VDcSBd/2L5wJI572tJBEUg2R1Syume5RgVIbN2ieEY82AU0TDZZvN1VM0Be7+wSXs1u6Czzq5qulH9GyuJfr83vPmvEW+FIGj1rbR/+t/ghbc/38GZnAvpxWZvi9i4mEV5GME2UqSwiNdE75yMLI89BC/hHymKP5BhQNg1i6MjWchx+PwWrnmyECtuuUbbrDFd6bHlJfKwXA5L8FxjMnHC0NmaQ+5AX sloria@stevens-air
2 |
--------------------------------------------------------------------------------
/playbooks/testing.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # Enable testing mode on vagrant boxes
4 | - name: Enable csf testing mode
5 | hosts: vagrantbox
6 | tasks:
7 | - name: csf | enable testing mode
8 | lineinfile: dest=/etc/csf/csf.conf regexp="^TESTING\s?=" line='TESTING="1"' state=present
9 | - name: csf restart
10 | shell: csf -r
11 | - name: lfd restart
12 | service: name=lfd state=restarted
13 |
--------------------------------------------------------------------------------
/roles/Ansibles.monit/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: python
3 | python: "2.7"
4 | before_install:
5 | - sudo apt-get update -qq
6 | - sudo apt-get install -qq python-apt python-pycurl
7 | install:
8 | - pip install ansible==1.5.0
9 | script:
10 | - echo localhost > inventory
11 | - ansible-playbook --syntax-check -i inventory test.yml
12 | - ansible-playbook -i inventory test.yml --connection=local --sudo
13 |
--------------------------------------------------------------------------------
/roles/Ansibles.timezone/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: python
3 | python: "2.7"
4 | before_install:
5 | - sudo apt-get update -qq
6 | - sudo apt-get install -qq python-apt python-pycurl
7 | install:
8 | - pip install ansible==1.5.0
9 | script:
10 | - echo localhost > inventory
11 | - ansible-playbook --syntax-check -i inventory test.yml
12 | - ansible-playbook -i inventory test.yml --connection=local --sudo
13 |
--------------------------------------------------------------------------------
/roles/generic-users/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: python
3 | python: "2.7"
4 | before_install:
5 | - sudo apt-get update -qq
6 | - sudo apt-get install -qq python-apt python-pycurl
7 | install:
8 | - pip install ansible==1.5.0
9 | script:
10 | - echo localhost > inventory
11 | - ansible-playbook --syntax-check -i inventory test.yml
12 | - ansible-playbook -i inventory test.yml --connection=local --sudo
13 |
--------------------------------------------------------------------------------
/roles/(legacy)/rackspace-multicast/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Set net.ipv4.icmp_echo_ignore_broadcasts in /etc/sysctl.conf
2 | sysctl:
3 | name: net.ipv4.icmp_echo_ignore_broadcasts
4 | value: 0
5 | state: present
6 | tags:
7 | - install
8 | - upgrade
9 |
10 | - name: Add a route for the multicast network range
11 | command: "ip route add 224.0.0.0/4 dev {{ rackspace_multicast_interface_name }}"
12 |
--------------------------------------------------------------------------------
/common.yml:
--------------------------------------------------------------------------------
1 | # Execute common roles
2 | - name: Common Ubuntu setup
3 | hosts: all
4 | roles:
5 | - role: apt
6 | - role: hostname
7 | - role: skel
8 | # - role: network_interfaces
9 | - role: generic-users
10 | - role: Ansibles.timezone
11 | - role: ssh
12 | - role: htop
13 | - role: ntp
14 | - role: tmux
15 | - role: logrotate
16 | - role: rsyslog
17 | - role: editors
18 |
--------------------------------------------------------------------------------
/group_vars/pubkeys/fabian.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC252BsSRc29V4SfHF9gzVh5+z5mQwbp7kKrnTROMju95V+LrbwwiB2kWgSPFhydCETHXATr3w5bPZ4jU81p/XSD3kwPGk03rnHpJV/FSmJJNHkROWwhFyxaFpw22uMy5YGulpkIs09Qu+0hst6WgRWEBMsjBg24kuqOQqUxqeBwtJoKlZVpN3cJQ53A+gPeUSri3GEqJT7itUFqo9me35HsaJe2KGcCeOToptOJ6exYJDK8cZ+mwPhc/NrzUjSMT0MFQvmvzRfKrVw4H4e9tmo2onEgYcDlY3O2lLh8Vu2rUCYAvvsFdUBnciOuiQav+C1NZPVEc9o3POSvVUqiKXT fabian@fabian-ThinkPad-W510
2 |
--------------------------------------------------------------------------------
/roles/Ansibles.build-essential/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: python
3 | python: "2.7"
4 | before_install:
5 | - sudo apt-get update -qq
6 | - sudo apt-get install -qq python-apt python-pycurl
7 | install:
8 | - pip install ansible==1.5.0
9 | script:
10 | - echo localhost > inventory
11 | - ansible-playbook --syntax-check -i inventory test.yml
12 | - ansible-playbook -i inventory test.yml --connection=local --sudo
13 |
--------------------------------------------------------------------------------
/roles/rsyslog/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Enable rsyslog cron.log
2 | lineinfile:
3 | dest: /etc/rsyslog.d/50-default.conf
4 | regexp: ^(#|)cron.*
5 | line: cron.* /var/log/cron.log
6 | tags:
7 | - install
8 | - upgrade
9 |
10 |
11 | - name: Restart rsyslog service
12 | service:
13 | name: rsyslog
14 | state: restarted
15 | tags:
16 | - install
17 | - upgrade
18 |
--------------------------------------------------------------------------------
/group_vars/pubkeys/joshcarp.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBKbtgvnsfuvm719xE9FcX3599VKi7slCKH8KV0sCRY2o7PladpDgej8u2J5meTkTOhbOkQiREaWoot9pibvUZzfzR3fxg8hMc51bGXUqWssawD4wgPV6Ms/25DAm4iOkh7pyIRPiNy4I+VcabHb6XUql2g5QAVvEKWMNw0kTWSEG56tBsBPmFeDAxMw15PV+4N5oQH3SfjS3Oxb5Y6jij3moxd+VEKUQbY0bYxm5Yn2bE43o4km/3of1gdzmgmDLH3mljQFNdA9BFbrZ5dRdJKfC5KG5l7L9n1yUF4kNjyatkTqP1nbu0AGERJKNNk/mxG61tSOTcnFY7zD2GJYzv josh@centerforopenscience.org
2 |
--------------------------------------------------------------------------------
/group_vars/pubkeys/jspies.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCtZtjCIActQOVwioLMqV8tAul/9blajcG2ItBwlR7F7X8FixY7Zp3LuqqsMzj+lMAK8S9xwPECYvmlfjShCS/8+ws8wjHHoe2v3qu2b3Nzzi2Zw6OVdJ3utgpNDzVpPDGKLBYZHYPJwn+kYdOQYO0KvbgvTU7Gk3SJhryBb1JKtim3AifNqfk3DUZT2pe5nQfN8rGerKwOof3W2J6x4mzCjFDiCDFY5igVzw4HjUcNeAMvgI08excfqRMby3XJ3F2uy5SKzXK5nEI1b82E2yuV1OTA+2xx9ImONmjevK3N9wMMJWfBQ8Sbkno83ir+oVmavZ3umTrkH6pjSPpwLtXt jeff@centerforopenscience.org
2 |
--------------------------------------------------------------------------------
/gitlab.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | - name: Set up gitlab
4 | hosts: gitlab
5 | roles:
6 | - role: gitlab
7 | vars_prompt:
8 | - name: "gitlab_force_db"
9 | prompt: "Should the GitLab database tables be created and seeded (WARNING: This will delete all data stored in the database)?"
10 | default: no
11 | - name: "gitlab_set_api_key"
12 | prompt: "Should the GitLab admin API key be reset?"
13 | default: false
14 |
--------------------------------------------------------------------------------
/group_vars/pubkeys/lyndsysimon.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDe31+sFDSuT6r18FYQP+zyfJssop451DVa8ciS/CedP+MD/zgJ+3yoGSfB9vZqnmfDMnmbE3qBf/+q++zPOUenJn9ai2gfNquIbEwkoe9E9VadBQRuLUHN9tSRsZ2FUMCmD2VPMB/9C56k4jSV3nuvJf3qR/cuPtJN2AXYdx6G6E9l52zvvIpmcWLJr32Tcd9y1n05ugg54Th0IrbuNjfYBHoq48Ym89l6uV/PChyGqrgpRe5/chx3QX0ftf1ecJ3DuoT87WqL1noaCaRsROH5aqNQMK3ShZWvFPxrvMVfk5s1BLJ1APrImN/x7thRe4D17ZjFW2uaMBK+UU/K3jyb lyndsy@centerforopenscience.org
2 |
--------------------------------------------------------------------------------
/roles/(legacy)/docker-clean/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Delete all docker containers
2 | become: yes
3 | shell: "docker rm -f $(docker ps -aq)"
4 | ignore_errors: yes
5 | tags:
6 | - clean
7 | - clean_containers
8 | - upgrade
9 |
10 |
11 | - name: Delete all docker images
12 | become: yes
13 | shell: "docker rmi -f $(docker images -q)"
14 | ignore_errors: yes
15 | tags:
16 | - clean
17 | - clean_images
18 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/tasks/modules/http_gzip_static_module.yml:
--------------------------------------------------------------------------------
1 | # file: nginx/tasks/modules/http_gzip_static_module.yml
2 | # configure flag: --with-http_gzip_static_module
3 |
4 | - name: Modules | Update the http_gzip_static_module configuration
5 | template:
6 | src: ../../templates/modules/http_gzip_static.conf.j2
7 | dest: "{{nginx_dir}}/conf.d/http_gzip_static.conf"
8 | owner: root
9 | group: root
10 | mode: 0644
11 |
--------------------------------------------------------------------------------
/roles/Ansibles.monit/meta/main.yml:
--------------------------------------------------------------------------------
1 | # file: monit/meta/main.yml
2 |
3 | galaxy_info:
4 | author: pjan vandaele
5 | company: Ansibles
6 | description: "Installs monit monitoring and management tool (and attaches it to ssh, cron, ntpd)."
7 | min_ansible_version: 1.4
8 | license: MIT
9 | platforms:
10 | - name: Ubuntu
11 | versions:
12 | - all
13 | categories:
14 | - system
15 | - monitoring
16 |
17 | dependencies: []
18 |
--------------------------------------------------------------------------------
/roles/docker-shibboleth-sp/files/apache2/ports.conf:
--------------------------------------------------------------------------------
1 | # If you just change the port or add more ports here, you will likely also
2 | # have to change the VirtualHost statement in
3 | # /etc/apache2/sites-enabled/000-default.conf
4 |
5 | Listen 80
6 |
7 |
8 | Listen 443
9 |
10 |
11 |
12 | Listen 443
13 |
14 |
15 | # vim: syntax=apache ts=4 sw=4 sts=4 sr noet
16 |
--------------------------------------------------------------------------------
/roles/docker-sentry/files/nginx/conf.d/sentry.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 9000;
3 | client_max_body_size 20M;
4 |
5 | location / {
6 | proxy_pass http://sentry:9000;
7 | proxy_redirect off;
8 | proxy_set_header Host $host;
9 | proxy_set_header X-Real-IP $remote_addr;
10 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
11 | proxy_set_header X-Forwarded-Proto $scheme;
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/roles/docker-postgres/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_postgres_name: postgres_1
2 | docker_postgres_image: postgres:9.4
3 | docker_postgres_data_dir: /opt/postgres/data/
4 | docker_postgres_hostname: "{{ hostname_name }}"
5 | docker_postgres_net: bridge
6 | docker_postgres_expose: []
7 | docker_postgres_ports: []
8 | docker_postgres_env: {}
9 | docker_postgres_links: []
10 | docker_postgres_volumes:
11 | - "{{ docker_postgres_data_dir }}:/var/lib/postgresql/data"
12 |
--------------------------------------------------------------------------------
/docker-haproxy.yml:
--------------------------------------------------------------------------------
1 | - name: Set up haproxy
2 | hosts: all
3 | pre_tasks:
4 | - fail: msg="One or more tags must be specified to run this playbook"
5 | vars:
6 | docker_haproxy: no
7 | docker_haproxy_name: "{{ docker_env }}_haproxy_1"
8 | docker_haproxy_conf_dir: "/opt/{{ docker_env }}_haproxy/conf/"
9 | docker_haproxy_ssl_dir: "/opt/{{ docker_env }}_haproxy/ssl/"
10 | roles:
11 | - role: docker-haproxy
12 | when: docker_haproxy
13 |
--------------------------------------------------------------------------------
/roles/docker-cassandra/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_cassandra_name: cassandra_1
2 | docker_cassandra_image: centerforopenscience/cassandra:2.2
3 | docker_cassandra_data_dir: /opt/cassandra/data/
4 | docker_cassandra_hostname: "{{ hostname_name }}"
5 | docker_cassandra_env: {}
6 | docker_cassandra_expose: []
7 | docker_cassandra_ports: []
8 | docker_cassandra_volumes:
9 | - "{{ docker_cassandra_data_dir }}:/var/lib/cassandra"
10 | docker_cassandra_volumes_from: []
11 |
--------------------------------------------------------------------------------
/docker-varnish.yml:
--------------------------------------------------------------------------------
1 | - name: Set up varnish
2 | hosts: all
3 | pre_tasks:
4 | - fail: msg="One or more tags must be specified to run this playbook"
5 | vars:
6 | docker_varnish: no
7 | docker_varnish_name: "{{ docker_env }}_varnish_1"
8 | docker_varnish_conf_dir: "/opt/{{ docker_env }}_varnish/conf/"
9 | docker_varnish_data_dir: "/opt/{{ docker_env }}_varnish/data/"
10 | roles:
11 | - role: docker-varnish
12 | when: docker_varnish
13 |
--------------------------------------------------------------------------------
/roles/docker-celery-flower/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_celery_flower_name: celery_flower_1
2 | docker_celery_flower_image: centerforopenscience/celery-flower:latest
3 | docker_celery_flower_command: flower
4 | docker_celery_flower_hostname: "{{ hostname_name }}"
5 | docker_celery_flower_net: bridge
6 | docker_celery_flower_env: {}
7 | docker_celery_flower_links: []
8 | docker_celery_flower_expose:
9 | - 5555
10 | docker_celery_flower_ports: []
11 | docker_celery_flower_volumes: []
12 |
--------------------------------------------------------------------------------
/roles/docker-jenkins/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_jenkins_name: jenkins_1
2 | docker_jenkins_image: centerforopenscience/jenkins
3 | docker_jenkins_data_dir: /opt/jenkins/data/
4 | docker_jenkins_hostname: "{{ hostname_name }}"
5 | docker_jenkins_env: {}
6 | docker_jenkins_net: bridge
7 | docker_jenkins_expose:
8 | - 8080
9 | - 50000
10 | docker_jenkins_ports: []
11 | docker_jenkins_volumes:
12 | - "{{ docker_jenkins_data_dir }}:/var/jenkins_home"
13 | docker_jenkins_volumes_from: []
14 |
--------------------------------------------------------------------------------
/roles/docker-rabbitmq/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_rabbitmq_name: rabbitmq_1
2 | docker_rabbitmq_image: rabbitmq:3-management
3 | docker_rabbitmq_hostname: "{{ hostname_name }}"
4 | docker_rabbitmq_env:
5 | RABBITMQ_NODENAME: "rabbit@{{ ansible_hostname }}" # rabbitmqctl does not handle periods in hostname
6 | docker_rabbitmq_data_dir: /opt/rabbitmq/data/
7 | docker_rabbitmq_net: bridge
8 | docker_rabbitmq_ports: []
9 | docker_rabbitmq_volumes:
10 | - "{{ docker_rabbitmq_data_dir }}:/var/lib/rabbitmq"
11 |
--------------------------------------------------------------------------------
/roles/docker-dor/files/local.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | SECRET_KEY = 'Super#Secret!Key'
4 |
5 | DEBUG = True
6 | TEMPLATE_DEBUG = True
7 |
8 | DATABASES = {
9 | 'default': {
10 | 'ENGINE': 'django.db.backends.postgresql_psycopg2',
11 | 'NAME': 'dor_staging',
12 | # 'USER': 'postgres',
13 | # 'PASSWORD': '',
14 | 'HOST': os.environ.get('POSTGRES_PORT_5432_TCP_ADDR', ''),
15 | 'PORT': os.environ.get('POSTGRES_PORT_5432_TCP_PORT', ''),
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/roles/docker-cos/files/server/local.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | SECRET_KEY = 'Super#Secret!Key'
4 |
5 | DEBUG = True
6 | TEMPLATE_DEBUG = True
7 |
8 | DATABASES = {
9 | 'default': {
10 | 'ENGINE': 'django.db.backends.postgresql_psycopg2',
11 | 'NAME': 'cos_staging',
12 | # 'USER': 'postgres',
13 | # 'PASSWORD': '',
14 | 'HOST': os.environ.get('POSTGRES_PORT_5432_TCP_ADDR', ''),
15 | 'PORT': os.environ.get('POSTGRES_PORT_5432_TCP_PORT', ''),
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/docker-newrelic-pluginagent.yml:
--------------------------------------------------------------------------------
1 | - name: Setup docker newrelic pluginagent
2 | hosts: all
3 | pre_tasks:
4 | - fail: msg="One or more tags must be specified to run this playbook"
5 | vars:
6 | docker_newrelic_pluginagent: yes
7 | docker_newrelic_pluginagent_name: "{{ docker_env }}_newrelic_pluginagent_1"
8 | docker_newrelic_pluginagent_conf_dir: "/opt/{{ docker_env }}_newrelic_pluginagent/conf/"
9 | roles:
10 | - role: docker-newrelic-pluginagent
11 | when: docker_newrelic_pluginagent
12 |
--------------------------------------------------------------------------------
/roles/(legacy)/sentry/defaults/main.yml:
--------------------------------------------------------------------------------
1 | sentry_db_name: sentry
2 | sentry_db_user: sentry
3 | sentry_db_pass: sentry
4 | sentry_db_host: localhost
5 | sentry_db_port: 5432
6 |
7 | sentry_superuser_username: sentry
8 | sentry_superuser_password: sentry
9 | sentry_superuser_email: admin@cos.io
10 |
11 | sentry_github_app_id: '8bb1d955c630fbded6fa'
12 | sentry_github_app_secret: 'b94fdf02f85a519c896b1468d18158d380a24ef0'
13 |
14 | sentry_url_prefix: http://localhost
15 |
16 | sentry_host: 0.0.0.0
17 | sentry_port: 9000
18 |
--------------------------------------------------------------------------------
/roles/docker-osf/files/adminserver/uwsgi.ini:
--------------------------------------------------------------------------------
1 | [uwsgi]
2 | uid = www-data
3 | gid = www-data
4 |
5 | # Flask-related settings
6 | chdir = /code
7 | module = admin.base.wsgi:application
8 | env = OSF_PRODUCTION=1
9 |
10 | # process-related settings
11 | master = true
12 | processes = 2
13 | threads = 1
14 | harakiri = 120
15 | buffer-size = 8192
16 | socket = :8001
17 | stats = 127.0.0.1:1717
18 | vacuum = true
19 |
20 | # greenlet settings
21 | gevent=2000
22 | gevent-monkey-patch=true
23 |
24 | master
25 | show-config
26 |
--------------------------------------------------------------------------------
/roles/docker-unoconv/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # servers
2 |
3 | docker_unoconv: no
4 | docker_unoconv_name: unoconv_1
5 | docker_unoconv_image: centerforopenscience/unoconv:latest
6 | docker_unoconv_command: gosu www-data /opt/libreoffice6.0/program/python -u /usr/local/bin/unoconv --listener --server=0.0.0.0 --port=2002 -vvv
7 | docker_unoconv_env: {}
8 | docker_unoconv_net: bridge
9 | docker_unoconv_links: []
10 | docker_unoconv_expose:
11 | - 2002
12 | docker_unoconv_ports: []
13 | docker_unoconv_volumes: []
14 | docker_unoconv_volumes_from: []
15 |
--------------------------------------------------------------------------------
/deploy.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Set up users and groups
4 | hosts:
5 | - osf-staging
6 | - osf-production
7 | roles:
8 | - role: generic-users
9 |
10 | - hosts:
11 | - osf-staging
12 | roles:
13 | - role: osf
14 | vars_prompt:
15 | - name: osf_repo_branch
16 | prompt: "Which branch from CenterForOpenScience/osf do you want to check out on staging?"
17 | default: develop
18 | private: no
19 |
20 | - hosts:
21 | - osf-production
22 | roles:
23 | - role: osf
24 | vars:
25 | osf_repo_branch: master
26 |
--------------------------------------------------------------------------------
/vagranthosts:
--------------------------------------------------------------------------------
1 |
2 | [nginx]
3 | 192.168.111.222
4 |
5 | [webservers]
6 | 192.168.111.222
7 |
8 | [elasticsearch]
9 | 192.168.111.223
10 |
11 | [gitlab]
12 | 192.168.111.224
13 |
14 |
15 | [osf-vagrant]
16 | 192.168.111.225 hostname_name=osf-web-01
17 |
18 |
19 | [waterbutler-vagrant]
20 | 192.168.111.111 hostname_name=waterbutler-01
21 |
22 |
23 | [scrapi-vagrant]
24 | 192.168.111.130 hostname_name=scrapi-01
25 |
26 | [share-vagrant]
27 | 192.168.111.140 hostname_name=share-01
28 |
29 |
30 | [cas-vagrant]
31 | 192.168.111.150 hostname_name=cas-01
32 |
--------------------------------------------------------------------------------
/roles/csf-docker/files/csfdocker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | iptables -C ALLOWIN -i docker0 -j ACCEPT 2> /dev/null
3 | if [ $? = 1 ]; then
4 | iptables -A ALLOWIN -i docker0 -j ACCEPT
5 | iptables -A ALLOWOUT -o docker0 -j ACCEPT
6 | fi
7 |
8 | iptables -C FORWARD ! -i docker0 -j LOCALINPUT 2> /dev/null
9 | if [ $? = 1 ]; then
10 | iptables -I FORWARD ! -o docker0 -p tcp -j INVALID
11 | iptables -I FORWARD ! -o docker0 -j LOCALOUTPUT
12 | iptables -I FORWARD ! -i docker0 -p tcp -j INVALID
13 | iptables -I FORWARD ! -i docker0 -j LOCALINPUT
14 | fi
15 |
--------------------------------------------------------------------------------
/roles/docker-share-reg/files/local.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | SECRET_KEY = 'Secret!Key'
4 |
5 | DEBUG = True
6 |
7 | DOMAIN = 'http://localhost:8000'
8 |
9 | DATABASES = {
10 | 'default': {
11 | 'ENGINE': 'django.db.backends.postgresql_psycopg2',
12 | 'NAME': 'share_registration',
13 | # 'USER': 'postgres',
14 | # 'PASSWORD': '',
15 | 'HOST': os.environ.get('POSTGRES_PORT_5432_TCP_ADDR', ''),
16 | 'PORT': os.environ.get('POSTGRES_PORT_5432_TCP_PORT', ''),
17 | }
18 | }
19 |
20 | STATIC_URL = '{}/static/'.format(DOMAIN)
21 |
--------------------------------------------------------------------------------
/site.yml:
--------------------------------------------------------------------------------
1 |
2 | # site.yml
3 | # This playbook is responsible for provisioning all machines in an inventory
4 |
5 | - name: Collecting facts
6 | hosts: all
7 | gather_facts: yes
8 |
9 | - include: common.yml
10 | - include: security.yml
11 |
12 | # Set up webservers
13 | - name: Provision webservers
14 | hosts: webservers
15 | roles:
16 | - role: uwsgi
17 | - role: nginx
18 |
19 |
20 | - name: Set up sentry
21 | hosts: sentry
22 | roles:
23 | - role: sentry
24 |
25 |
26 | - name: Set up elasticsearch
27 | hosts: elasticsearch
28 | roles:
29 | - role: elasticsearch
30 |
--------------------------------------------------------------------------------
/roles/docker-fluentd/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_fluentd_name: fluentd_1
2 | docker_fluentd_image: centerforopenscience/fluentd:latest
3 | docker_fluentd_hostname: "{{ hostname_name }}"
4 | docker_fluentd_net: bridge
5 | docker_fluentd_source_conf_dir: etc/
6 | docker_fluentd_conf_dir: /opt/fluentd/conf/
7 | docker_fluentd_data_dir: /opt/fluentd/data/
8 | docker_fluentd_env: {}
9 | docker_fluentd_links: []
10 | docker_fluentd_expose:
11 | - 24224
12 | docker_fluentd_ports: []
13 | docker_fluentd_volumes:
14 | - "{{ docker_fluentd_data_dir }}:/data"
15 | - "{{ docker_fluentd_conf_dir }}:/etc/fluent:ro"
16 |
--------------------------------------------------------------------------------
/roles/docker-jam/files/conf/local.yml:
--------------------------------------------------------------------------------
1 | DEBUG: false
2 |
3 | # SERVER SETTINGS
4 | FORK: false # Maybe true, false or intger
5 | PORT: 1212
6 | HOST: 0.0.0.0
7 |
8 | MONGO_URI: mongodb://mongo:27017/
9 | MONGO_DATABASE_NAME: jam
10 |
11 | ELASTICSEARCH_URI: http://elasticsearch:9200/
12 |
13 | NAMESPACE_BACKENDS:
14 | state: mongo
15 | logger: mongo
16 | storage: mongo
17 |
18 | NAMESPACEMANAGER_BACKENDS:
19 | state: mongo
20 | logger: mongo
21 | storage: mongo
22 |
23 | NAMESPACE_DEFAULT_BACKENDS:
24 | state: mongo
25 | logger: mongo
26 | storage: mongo
27 |
28 | JWT_SECRET: Something!Super@Secret
--------------------------------------------------------------------------------
/roles/fwknop/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Update APT package cache
2 | apt:
3 | update_cache: yes
4 | become: yes
5 |
6 | - name: Install fwknopd server (v2.6.7)
7 | apt:
8 | pkg: fwknop-server=2.6.0-2
9 | become: yes
10 |
11 | - name: copy fwknopd.conf
12 | template:
13 | src: fwknopd.conf.j2
14 | dest: /etc/fwknop/fwknopd.conf
15 | become: yes
16 |
17 | - name: copy access.conf
18 | template:
19 | src: access.conf.j2
20 | dest: /etc/fwknop/access.conf
21 | become: yes
22 |
23 | - name: fwknopd server restart
24 | service:
25 | name: fwknop-server
26 | state: restarted
27 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/tasks/modules/http_echo_module.yml:
--------------------------------------------------------------------------------
1 | # file: nginx/tasks/modules/http_echo_module.yml
2 | # configure flag: --add-module=/tmp/nginx_echo
3 |
4 | - name: Modules | Download the http_echo_module source
5 | get_url:
6 | url: "{{nginx_echo_url}}"
7 | dest: "/tmp/nginx-echo-module.tar.gz"
8 |
9 | - name: Modules | Unpack the http_echo_module source
10 | command: tar -xvzf /tmp/nginx-echo-module.tar.gz chdir=/tmp creates=/tmp/echo-nginx-module-{{nginx_echo_version}}
11 |
12 | - name: Modules | Copy the http_echo_module source folder
13 | command: sudo cp -R /tmp/echo-nginx-module-{{nginx_echo_version}} /tmp/nginx_echo
14 |
--------------------------------------------------------------------------------
/roles/Ansibles.build-essential/README.md:
--------------------------------------------------------------------------------
1 | ## Ansibles - build-essential [](https://travis-ci.org/Ansibles/build-essential)
2 |
3 | Ansible role which installs packages required for compiling C software from source.
4 |
5 |
6 | #### Requirements & Dependencies
7 | - Tested on Ansible 1.4 or higher.
8 |
9 |
10 | #### Variables
11 |
12 | none
13 |
14 |
15 | #### License
16 |
17 | Licensed under the MIT License. See the LICENSE file for details.
18 |
19 |
20 | #### Feedback, bug-reports, requests, ...
21 |
22 | Are [welcome](https://github.com/ansibles/build-essential/issues)!
23 |
--------------------------------------------------------------------------------
/roles/docker-elasticsearch/files/conf/logging.yml:
--------------------------------------------------------------------------------
1 | # you can override this using by setting a system property, for example -Des.logger.level=DEBUG
2 | es.logger.level: INFO
3 | rootLogger: ${es.logger.level}, console
4 | logger:
5 | # log action execution errors for easier debugging
6 | action: INFO
7 | # reduce the logging for aws, too much is logged under the default INFO
8 | com.amazonaws: WARN
9 |
10 | index.search.slowlog: TRACE
11 | index.indexing.slowlog: TRACE
12 |
13 | appender:
14 | console:
15 | type: console
16 | layout:
17 | type: consolePattern
18 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
19 |
--------------------------------------------------------------------------------
/roles/Ansibles.monit/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # file: monit/tasks/main.yml
2 |
3 | - name: Monit | Make sure monit is installed
4 | apt:
5 | pkg: monit
6 | state: present
7 |
8 | - name: Monit | Update the monit configuration (/etc/monit/monitrc)
9 | template:
10 | src: "etc_monit_monitrc.j2"
11 | dest: "/etc/monit/monitrc"
12 | notify:
13 | - restart monit
14 |
15 | - name: Monit | Copy the sshd/ntp/chron monit service files
16 | template:
17 | src: "etc_monit_conf.d_{{item}}.j2"
18 | dest: "/etc/monit/conf.d/{{item}}"
19 | with_items:
20 | - sshd
21 | - cron
22 | - ntp
23 | notify:
24 | - restart monit
25 |
--------------------------------------------------------------------------------
/roles/docker-memcached/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart docker memcached container
2 | become: yes
3 | shell: "docker restart {{ docker_memcached_name }}"
4 | tags:
5 | - restart
6 |
7 |
8 | - name: Run docker memcached container
9 | docker:
10 | command: "{{ docker_memcached_command }}"
11 | expose: "{{ docker_memcached_expose }}"
12 | hostname: "{{ hostname_name }}"
13 | image: "{{ docker_memcached_image }}"
14 | name: "{{ docker_memcached_name }}"
15 | ports: "{{ docker_memcached_ports }}"
16 | pull: always
17 | restart_policy: always
18 | state: reloaded
19 | tags:
20 | - install
21 | - upgrade
22 |
--------------------------------------------------------------------------------
/roles/(legacy)/github/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Make sure git is installed
4 | apt: name=git state=present
5 |
6 | - name: Get github.com RSA key
7 | sudo: False
8 | shell: ssh-keyscan -t rsa github.com
9 | connection: local
10 | register: rsaresult
11 | changed_when: False
12 | failed_when: "rsaresult.rc != 0"
13 |
14 | # TODO: figure out correct permissions for this file
15 | - name: Ensure github.com key in /etc/ssh/ssh_known_hosts
16 | sudo: True
17 | lineinfile:
18 | line="{{ rsaresult.stdout }}"
19 | dest="/etc/ssh/ssh_known_hosts"
20 | insertafter=EOF
21 | create=yes
22 | state=present
23 | mode=0777
24 |
--------------------------------------------------------------------------------
/roles/Ansibles.build-essential/meta/main.yml:
--------------------------------------------------------------------------------
1 | # file: build-essential/meta/main.yml
2 |
3 | galaxy_info:
4 | author: pjan vandaele
5 | company: Ansibles
6 | description: Install packages required for compiling C software from source.
7 | min_ansible_version: 1.4
8 | license: MIT
9 | platforms:
10 | - name: Ubuntu
11 | versions:
12 | - all
13 | - name: Debian
14 | version:
15 | - all
16 | - name: Fedora
17 | versions:
18 | - all
19 | - name: RedHat
20 | versions:
21 | - all
22 | - name: Suse
23 | versions:
24 | - all
25 | categories:
26 | - system
27 | - development
28 |
29 | dependencies: []
30 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # OS generated files
2 | ######################
3 | .DS_Store
4 | .DS_Store?
5 | ._*
6 | .Spotlight-V100
7 | .Trashes
8 | Icon?
9 | ehthumbs.db
10 | Thumbs.db
11 |
12 | # R
13 | #######################
14 | .Rhistory
15 |
16 | # Python
17 | #######################
18 | *.py[cod]
19 | *.so
20 | *.egg
21 | *.egg-info
22 | eggs
23 | parts
24 | var
25 | sdist
26 | develop-eggs
27 | .installed.cfg
28 | lib
29 | lib64
30 | __pycache__
31 | pip-log.txt
32 | .coverage
33 | .tox
34 | nosetests.xml
35 | *.mo
36 | .idea
37 | junk
38 |
39 |
40 | # Local config
41 | ##############
42 |
43 | hosts
44 | .vagrant
45 |
46 | !.gitkeep
47 |
48 | tempfiles
49 | .konchrc
50 |
--------------------------------------------------------------------------------
/roles/(legacy)/docker-logentries/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart docker logentries container
2 | become: yes
3 | shell: "docker restart {{ docker_rabbitmq_name }}"
4 | tags:
5 | - restart
6 |
7 |
8 | - name: Run docker logentries container
9 | docker:
10 | name: "{{ docker_logentries_name }}"
11 | command: "{{ docker_logentries_command }}"
12 | state: running
13 | hostname: "{{ hostname_name }}"
14 | restart_policy: always
15 | env: "{{ docker_logentries_env }}"
16 | image: "logentries/docker-logentries"
17 | volumes:
18 | - "/var/run/docker.sock:/var/run/docker.sock"
19 | tags:
20 | - install
21 | - upgrade
22 |
--------------------------------------------------------------------------------
/roles/docker-redis/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_redis_name: redis_1
2 | docker_redis_image: redis:3
3 | docker_redis_command: redis-server /usr/local/etc/redis/redis.conf
4 | docker_redis_conf_dir: /opt/redis/conf/
5 | docker_redis_conf_file: "{{ docker_redis_conf_dir }}redis.conf"
6 | docker_redis_source_conf_file: redis.conf
7 | docker_redis_data_dir: /opt/redis/data/
8 | docker_redis_env: {}
9 | docker_redis_hostname: "{{ hostname_name }}"
10 | docker_redis_net: bridge
11 | docker_redis_expose:
12 | - 6379
13 | docker_redis_ports: []
14 | docker_redis_volumes:
15 | - "{{ docker_redis_conf_file }}:/usr/local/etc/redis/redis.conf:ro"
16 | - "{{ docker_redis_data_dir }}:/data"
17 |
--------------------------------------------------------------------------------
/roles/(legacy)/gitlab/tasks/install_ruby_from_source.yml:
--------------------------------------------------------------------------------
1 | - name: Download Ruby 2.1
2 | get_url:
3 | url=ftp://ftp.ruby-lang.org/pub/ruby/2.1/ruby-2.1.2.tar.gz
4 | dest=/tmp/ruby.tar.gz
5 |
6 | - name: Extract ruby source
7 | command: tar -xzf ruby.tar.gz chdir=/tmp/ creates=/tmp/ruby-2.1.2
8 |
9 | - name: Install ruby from source (configure)
10 | command: ./configure
11 | chdir=/tmp/ruby-2.1.2
12 |
13 | - name: Install ruby from source (make)
14 | command: make
15 | chdir=/tmp/ruby-2.1.2
16 |
17 | - name: Install ruby from source (make install)
18 | command: make install
19 | chdir=/tmp/ruby-2.1.2
20 | creates=/usr/local/bin/ruby
21 | become: yes
22 |
--------------------------------------------------------------------------------
/roles/(legacy)/mongo/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # Source: http://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/
2 |
3 | ##### Install #####
4 |
5 | - name: Import public key used by apt
6 | apt_key:
7 | keyserver: hkp://keyserver.ubuntu.com:80
8 | id: 7F0CEB10
9 | state: present
10 | become: yes
11 |
12 | - name: Add mongo apt repository
13 | apt_repository: repo='deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' update_cache=yes
14 |
15 | - name: Install latest stable version of mongodb
16 | apt: name=mongodb-org state=present
17 | become: yes
18 |
19 | - name: Ensure mongo service is running
20 | service: name=mongod state=started
21 |
--------------------------------------------------------------------------------
/roles/transparent-huge-pages/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # Disables transparent-huge-pages for applications like tokumx and redis
2 |
3 | - name: Disables transparent-huge-pages (for tokumx)
4 | become: yes
5 | copy:
6 | src: disable-transparent-hugepages
7 | dest: /etc/init.d/disable-transparent-hugepages
8 | mode: 0755
9 | tags:
10 | - install
11 |
12 |
13 | - name: Register the file with init.d
14 | become: yes
15 | command: update-rc.d disable-transparent-hugepages defaults
16 | tags:
17 | - install
18 |
19 |
20 | - name: Run service disable transparent hugepages
21 | become: yes
22 | shell: service disable-transparent-hugepages start
23 | tags:
24 | - install
25 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/tasks/modules/http_stub_status_module.yml:
--------------------------------------------------------------------------------
1 | # file: nginx/tasks/modules/http_stub_status_module.yml
2 | # configure flag: --with-http_stub_status_module
3 |
4 | - include: _authorized_ips.yml
5 |
6 | - name: Modules | Make sure the nginx status configuration is updated
7 | template:
8 | src: ../../templates/modules/nginx_status.j2
9 | dest: "{{nginx_dir}}/sites-available/nginx_status"
10 | owner: root
11 | group: root
12 | mode: 0644
13 |
14 | - name: Modules | Enable the status stub sites-available
15 | file:
16 | path: "{{nginx_dir}}/sites-enabled/nginx_status"
17 | src: "{{nginx_dir}}/sites-available/nginx_status"
18 | state: link
19 | force: yes
20 |
--------------------------------------------------------------------------------
/roles/docker-openvpn/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart docker openvpn container
2 | become: yes
3 | shell: "docker restart {{ docker_openvpn_name }}"
4 | tags:
5 | - restart
6 |
7 |
8 | - name: Run docker openvpn container
9 | docker:
10 | hostname: "{{ docker_openvpn_hostname }}"
11 | image: "{{ docker_openvpn_image }}"
12 | name: "{{ docker_openvpn_name }}"
13 | net: "{{ docker_openvpn_net }}"
14 | privileged: "{{ docker_openvpn_privileged }}"
15 | pull: always
16 | restart_policy: always
17 | state: reloaded
18 | volumes: "{{ docker_openvpn_volumes }}"
19 | volumes_from: "{{ docker_openvpn_volumes_from }}"
20 | tags:
21 | - install
22 | - upgrade
23 |
--------------------------------------------------------------------------------
/roles/(legacy)/gitlab/tasks/database.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | - name: Make sure postgres python driver is installed
4 | pip: name=
5 |
6 | - name: Create a user for gitlab
7 | postgresql_user: name={{ gitlab_user }}
8 | password={{ gitlab_db_pass }}
9 | port={{gitlab_db_port}}
10 | state=present
11 | become_user: postgres # the superuser created after installing postgres
12 | become: yes
13 |
14 |
15 | - name: Create the gitlab database
16 | postgresql_db: name={{ gitlab_db_name }}
17 | port={{ gitlab_db_port }}
18 | owner={{ gitlab_user }}
19 | state=present
20 | become_user: postgres
21 | become: yes
22 |
--------------------------------------------------------------------------------
/roles/(legacy)/osf/defaults/main.yml:
--------------------------------------------------------------------------------
1 | osf_apps_dir: "/opt/apps/"
2 |
3 | osf_repo_dir: "{{osf_apps_dir}}osf/"
4 |
5 | # Where to put virtualenvs
6 | # Must have trailing slash
7 | osf_virtualenvs_dir: "/opt/envs/"
8 |
9 | # Location of the osf virtualenv
10 | osf_virtualenv: "{{osf_virtualenvs_dir}}osf/"
11 |
12 | # The branch to check out
13 | osf_repo_branch: master
14 |
15 | osf_env_vars:
16 | - {var: "OSF_PRODUCTION", val: "0"}
17 | - {var: "OSF_STAGING", val: "1"}
18 |
19 |
20 | # UWSGI ini vars
21 | # The place for uwsgi to create the sock file
22 | osf_uwsgi_socket: /tmp/osf.sock
23 | osf_uwsgi_processes: 4
24 | osf_uwsgi_threads: 2
25 | osf_uwsgi_harakiri: 60
26 | osf_uwsgi_buffersize: 8192
27 |
--------------------------------------------------------------------------------
/roles/hostname/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: ansible_hostname
2 | debug:
3 | msg: "{{ ansible_fqdn }}"
4 |
5 |
6 | - name: hostname_name
7 | debug:
8 | msg: "{{ hostname_name }}"
9 |
10 |
11 | - name: Update the hostname
12 | copy:
13 | content: "{{ hostname_name }}"
14 | dest: /etc/hostname
15 | owner: root
16 | group: root
17 | mode: 0644
18 | when: hostname_name != ""
19 |
20 |
21 | - name: Update /etc/hosts
22 | replace:
23 | dest: /etc/hosts
24 | regexp: '{{ ansible_fqdn }}'
25 | replace: '{{ hostname_name }}'
26 | backup: yes
27 | when: hostname_name != ""
28 |
29 |
30 | - name: Run hostname cmd
31 | hostname:
32 | name: "{{ hostname_name }}"
33 | when: hostname_name != ""
34 |
--------------------------------------------------------------------------------
/roles/Ansibles.timezone/README.md:
--------------------------------------------------------------------------------
1 | ## Ansibles - timezone [](https://travis-ci.org/Ansibles/timezone)
2 |
3 | Ansible role for setting/updating the timezone
4 |
5 |
6 | #### Requirements & Dependencies
7 | - Tested on Ansible 1.3 or higher.
8 |
9 |
10 | #### Variables
11 |
12 | ```yaml
13 | timezone_zone: UTC # valid tz database string
14 | ```
15 |
16 | For a list of valid strings, check [this](http://en.wikipedia.org/wiki/List_of_tz_database_time_zones).
17 |
18 |
19 | #### License
20 |
21 | Licensed under the MIT License. See the LICENSE file for details.
22 |
23 |
24 | #### Feedback, bug-reports, requests, ...
25 |
26 | Are [welcome](https://github.com/ansibles/timezone/issues)!
27 |
--------------------------------------------------------------------------------
/roles/docker-haproxy/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_haproxy_name: haproxy_1
2 | docker_haproxy_image: centerforopenscience/haproxy:latest
3 | docker_haproxy_source_conf_dir: roles/docker-haproxy/files/conf/
4 | docker_haproxy_source_ssl_dir: roles/docker-haproxy/files/ssl/
5 | docker_haproxy_conf_dir: /opt/haproxy/conf/
6 | docker_haproxy_ssl_dir: "{{ docker_haproxy_conf_dir }}ssl/"
7 | docker_haproxy_hostname: "{{ hostname_name }}"
8 | docker_haproxy_env: {}
9 | docker_haproxy_links: []
10 | docker_haproxy_net: bridge
11 | docker_haproxy_expose: []
12 | docker_haproxy_ports: []
13 | docker_haproxy_volumes:
14 | - "{{ docker_haproxy_conf_dir }}:/usr/local/etc/haproxy/:ro"
15 | - "{{ docker_haproxy_ssl_dir }}:/etc/ssl/private:ro"
16 | docker_haproxy_volumes_from: []
17 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/tasks/modules/http_auth_request_module.yml:
--------------------------------------------------------------------------------
1 | # file: nginx/tasks/modules/http_auth_request_module.yml
2 | # configure flag: --add-module=/tmp/nginx_auth_request
3 |
4 | - name: Modules | Download the http_auth_request_module source
5 | get_url:
6 | url: "{{nginx_auth_request_url}}"
7 | dest: "/tmp/nginx-auth-request-module.tar.gz"
8 |
9 | - name: Modules | Unpack the http_auth_request_module source
10 | command: tar -xvzf /tmp/nginx-auth-request-module.tar.gz chdir=/tmp creates=/tmp/ngx_http_auth_request_module-{{nginx_auth_request_release}}
11 |
12 | - name: Modules | Copy the http_auth_request_module source folder
13 | command: sudo cp -R /tmp/ngx_http_auth_request_module-{{nginx_auth_request_release}} /tmp/nginx_auth_request
14 |
15 |
--------------------------------------------------------------------------------
/roles/docker-elasticsearch/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_elasticsearch_name: elasticsearch_1
2 | docker_elasticsearch_image: elasticsearch:1.7
3 | docker_elasticsearch_source_conf_dir: conf/
4 | docker_elasticsearch_conf_dir: /opt/elasticsearch/conf/
5 | docker_elasticsearch_data_dir: /opt/elasticsearch/data/
6 | docker_elasticsearch_env:
7 | ES_HEAP_SIZE: "{{ (ansible_memtotal_mb / 2) | int }}m"
8 | docker_elasticsearch_hostname: "{{ hostname_name }}"
9 | docker_elasticsearch_net: bridge
10 | docker_elasticsearch_expose:
11 | - 9200
12 | - 9300
13 | docker_elasticsearch_ports: []
14 | docker_elasticsearch_volumes:
15 | - "{{ docker_elasticsearch_conf_dir }}:/usr/share/elasticsearch/config"
16 | - "{{ docker_elasticsearch_data_dir }}:/usr/share/elasticsearch/data"
17 |
--------------------------------------------------------------------------------
/roles/docker-jenkins/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart docker jenkins container
2 | become: yes
3 | shell: "docker restart {{ docker_jenkins_name }}"
4 | tags:
5 | - restart
6 |
7 |
8 | - name: Run docker jenkins container
9 | docker:
10 | env: "{{ docker_jenkins_env }}"
11 | expose: "{{ docker_jenkins_expose }}"
12 | hostname: "{{ docker_jenkins_hostname }}"
13 | image: "{{ docker_jenkins_image }}"
14 | name: "{{ docker_jenkins_name }}"
15 | net: "{{ docker_jenkins_net }}"
16 | ports: "{{ docker_jenkins_ports }}"
17 | pull: always
18 | restart_policy: always
19 | state: reloaded
20 | volumes: "{{ docker_jenkins_volumes }}"
21 | volumes_from: "{{ docker_jenkins_volumes_from }}"
22 | tags:
23 | - install
24 | - upgrade
25 |
--------------------------------------------------------------------------------
/roles/network_interfaces/templates/interfaces.j2:
--------------------------------------------------------------------------------
1 | # This file describes the network interfaces available on your system
2 | # and how to activate them. For more information, see interfaces(5).
3 |
4 | # The loopback network interface
5 | auto lo
6 | iface lo inet loopback
7 |
8 | auto {% for interface in network_interfaces %}{{interface.name}} {% endfor %}
9 |
10 | {% if network_config %}
11 | source {{network_config}}
12 | {% endif %}
13 |
14 | {% for interface in network_interfaces %}
15 | iface {{ interface.name }} inet static
16 | address {{ interface.address }}
17 | {% if interface.netmask is defined %}
18 | netmask {{ interface.netmask }}
19 | {% endif %}
20 | {% if interface.gateway is defined %}
21 | gateway {{ interface.gateway }}
22 | {% endif %}
23 |
24 | {% endfor %}
25 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/tasks/modules/headers_more_module.yml:
--------------------------------------------------------------------------------
1 | # file: nginx/tasks/modules/headers_more_module.yml
2 | # configure flag: --add-module=/tmp/nginx_headers_more
3 |
4 | - name: Modules | Download the headers_more_module source
5 | get_url:
6 | url: "{{nginx_headers_more_url}}"
7 | dest: "/tmp/nginx-headers-more-module-{{nginx_headers_more_version}}.tar.gz"
8 |
9 | - name: Modules | Unpack the headers_more_module source
10 | command: tar -xvzf /tmp/nginx-headers-more-module-{{nginx_headers_more_version}}.tar.gz chdir=/tmp creates=/tmp/headers-more-nginx-module-{{nginx_headers_more_version}}
11 |
12 | - name: Modules | Copy the headers_more_module source folder
13 | command: sudo cp -R /tmp/headers-more-nginx-module-{{nginx_headers_more_version}} /tmp/nginx_headers_more
14 |
--------------------------------------------------------------------------------
/roles/docker-mongo/files/mongo.conf:
--------------------------------------------------------------------------------
1 | # mongod.conf
2 |
3 | # for documentation of all options, see:
4 | # http://docs.mongodb.org/manual/reference/configuration-options/
5 |
6 | # Where and how to store data.
7 | storage:
8 | dbPath: /data/db
9 | journal:
10 | enabled: true
11 | # engine:
12 | # mmapv1:
13 | # wiredTiger:
14 |
15 | # where to write logging data.
16 | # systemLog:
17 | # destination: file
18 | # logAppend: true
19 | # path: /var/log/mongodb/mongod.log
20 |
21 | # network interfaces
22 | # net:
23 | # port: 27017
24 | # bindIp: 127.0.0.1
25 |
26 |
27 | #processManagement:
28 |
29 | #security:
30 |
31 | #operationProfiling:
32 |
33 | #replication:
34 |
35 | #sharding:
36 |
37 | ## Enterprise-Only Options:
38 |
39 | #auditLog:
40 |
41 | #snmp:
42 |
--------------------------------------------------------------------------------
/roles/rkhunter/tasks/main.yml:
--------------------------------------------------------------------------------
1 |
2 | - include: install.yml
3 |
4 | - name: Update rkhunter configuration
5 | template: src=etc_rkhunter.conf.j2 dest=/etc/rkhunter.conf owner=root group=root mode=0644
6 |
7 |
8 | # NOTE: This should only be run when config files are known to be good
9 | - name: Set baseline file properties
10 | command: rkhunter --propupd
11 | become: yes
12 | when: rkhunter_propupd|bool
13 |
14 | - name: Run checks
15 | command: rkhunter -c --skip-keypress
16 | become: yes
17 | when: rkhunter_run_checks|bool
18 | ignore_errors: yes
19 |
20 | # TODO: Perform configuration check: sudo rkhunter -C
21 | # Right, now getting error output: "Unknown configuration file option: "
22 | # But rkhunter seems to be running correctly
23 |
24 |
25 | - include: cron.yml
26 |
--------------------------------------------------------------------------------
/roles/csf-docker/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install script csf-docker-wait.conf in upstart
2 | become: yes
3 | copy:
4 | src: csf-docker-wait.conf
5 | dest: /etc/init/csf-docker-wait.conf
6 | mode: 0644
7 | tags:
8 | - install
9 |
10 |
11 | - name: Install script csf-docker-started.conf in upstart
12 | become: yes
13 | copy:
14 | src: csf-docker-started.conf
15 | dest: /etc/init/csf-docker-started.conf
16 | mode: 0644
17 | tags:
18 | - install
19 |
20 |
21 | - name: Install script csfdocker.sh
22 | become: yes
23 | copy:
24 | src: csfdocker.sh
25 | dest: /etc/csf/csfdocker.sh
26 | mode: 0755
27 | tags:
28 | - install
29 |
30 |
31 | - name: Restart CSF & LFD
32 | become: yes
33 | command: csf -ra
34 | tags:
35 | - install
36 |
--------------------------------------------------------------------------------
/roles/docker-scrapi/files/apiserver/uwsgi.ini:
--------------------------------------------------------------------------------
1 | [uwsgi]
2 | uid = www-data
3 | gid = www-data
4 |
5 | # add user-agent, http://uwsgi.unbit.narkive.com/jEtphIzE/default-log-format-explained#post5
6 | log-format = [pid: %(pid)|app: ?|req: ?/?] %(addr) (%(user)) {%(vars) vars in %(pktsize) bytes} [%(ctime)] %(method) %(uri) => generated %(rsize) bytes in %(msecs) msecs (%(proto) %(status)) %(headers) headers in %(hsize) bytes (%(switches) switches on core %(core)) "%(uagent)"
7 |
8 | # Django-related settings
9 | chdir = /code
10 | module = api.api.wsgi:application
11 |
12 | # process-related settings
13 | master = true
14 | processes = 4
15 | threads = 1
16 | harakiri = 120
17 | buffer-size = 8192
18 | socket = :8000
19 | stats = 127.0.0.1:1717
20 | vacuum = true
21 |
22 | master
23 | show-config
24 |
--------------------------------------------------------------------------------
/roles/docker-share-reg/files/uwsgi.ini:
--------------------------------------------------------------------------------
1 | [uwsgi]
2 | uid = www-data
3 | gid = www-data
4 |
5 | # add user-agent, http://uwsgi.unbit.narkive.com/jEtphIzE/default-log-format-explained#post5
6 | log-format = [pid: %(pid)|app: ?|req: ?/?] %(addr) (%(user)) {%(vars) vars in %(pktsize) bytes} [%(ctime)] %(method) %(uri) => generated %(rsize) bytes in %(msecs) msecs (%(proto) %(status)) %(headers) headers in %(hsize) bytes (%(switches) switches on core %(core)) "%(uagent)"
7 |
8 | # Django-related settings
9 | chdir = /code
10 | module = shareregistration.wsgi:application
11 |
12 | # process-related settings
13 | master = true
14 | processes = 4
15 | threads = 2
16 | harakiri = 120
17 | buffer-size = 8192
18 | socket = /tmp/uwsgi.sock
19 | stats = 127.0.0.1:1717
20 | chmod-socket = 666
21 | vacuum = true
22 |
--------------------------------------------------------------------------------
/roles/docker-nginx/files/conf/nginx.conf:
--------------------------------------------------------------------------------
1 | user nginx;
2 | worker_processes 1;
3 |
4 | error_log /var/log/nginx/error.log warn;
5 | pid /var/run/nginx.pid;
6 |
7 |
8 | events {
9 | worker_connections 1024;
10 | }
11 |
12 |
13 | http {
14 | include /etc/nginx/mime.types;
15 | default_type application/octet-stream;
16 |
17 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
18 | '$status $body_bytes_sent "$http_referer" '
19 | '"$http_user_agent" "$http_x_forwarded_for"';
20 |
21 | access_log /var/log/nginx/access.log main;
22 |
23 | sendfile on;
24 | #tcp_nopush on;
25 |
26 | keepalive_timeout 65;
27 |
28 | #gzip on;
29 |
30 | include /etc/nginx/conf.d/*.conf;
31 | }
32 |
--------------------------------------------------------------------------------
/roles/security_checks/tasks/tests/deny_port_scans.yml:
--------------------------------------------------------------------------------
1 | # CSF checks
2 |
3 | - include: ../start_testing.yml
4 |
5 | - name: test csf | Port scans are denied
6 | connection: local
7 | sudo: no
8 | # doalarm runs a command and returns an error if no output was captured in
9 | # {{check_portscan_timeout}} seconds
10 | # Here, a portscan is attempted; CSF should hang the connection, so doalarm() should
11 | # return an error
12 | shell: doalarm () { perl -e 'alarm shift; exec @ARGV' "$@"; } && doalarm {{check_portscan_timeout}} nc -z {{ inventory_hostname }} 1-1023
13 | when: check_portscan
14 | # If the above portscan command succeeded (return code 0), then csf is not denying port scans!
15 | register: portscan_result
16 | failed_when: "portscan_result.rc == 0"
17 |
18 | - include: ../stop_testing.yml
19 |
--------------------------------------------------------------------------------
/roles/docker-prerender/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_prerender_name: prerender_1
2 | docker_prerender_command: "gosu www-data node server.js"
3 | docker_prerender_image: centerforopenscience/prerender:latest
4 | docker_prerender_source_conf_dir: roles/docker-prerender/files/conf/
5 | docker_prerender_source_conf_file: "{{ docker_prerender_source_conf_dir }}server.js"
6 | docker_prerender_conf_dir: /opt/prerender/conf/
7 | docker_prerender_conf_file: "{{ docker_prerender_conf_dir }}server.js"
8 | docker_prerender_hostname: "{{ hostname_name }}"
9 | docker_prerender_env: {}
10 | docker_prerender_links: []
11 | docker_prerender_net: bridge
12 | docker_prerender_expose: []
13 | docker_prerender_ports: []
14 | docker_prerender_volumes:
15 | - "{{ docker_prerender_conf_file }}:/code/server.js:ro"
16 | docker_prerender_volumes_from: []
17 |
--------------------------------------------------------------------------------
/roles/docker-shibboleth-sp/files/conf/localLogout.html:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
7 |
8 |
9 |
10 | Local Logout
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 | Local Logout
19 |
20 | Status of Local Logout:
21 |
22 |
23 |
24 | You MUST close your browser to complete the logout process.
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/roles/docker-shibboleth-sp/files/conf/partialLogout.html:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
7 |
8 |
9 |
10 | Partial Logout
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 | Partial Logout
19 |
20 | You remain logged into one or more applications accessed during your session.
21 | To complete the logout process, please close/exit your browser completely.
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/roles/docker-dor/files/uwsgi.ini:
--------------------------------------------------------------------------------
1 | [uwsgi]
2 | uid = www-data
3 | gid = www-data
4 |
5 | # add user-agent, http://uwsgi.unbit.narkive.com/jEtphIzE/default-log-format-explained#post5
6 | log-format = [pid: %(pid)|app: ?|req: ?/?] %(addr) (%(user)) {%(vars) vars in %(pktsize) bytes} [%(ctime)] %(method) %(uri) => generated %(rsize) bytes in %(msecs) msecs (%(proto) %(status)) %(headers) headers in %(hsize) bytes (%(switches) switches on core %(core)) "%(uagent)"
7 |
8 | # Django-related settings
9 | chdir = /code
10 | module = RepoDir.wsgi:application
11 | env = DJANGO_SETTINGS_MODULE=RepoDir.settings
12 |
13 | # process-related settings
14 | master = true
15 | processes = 4
16 | threads = 1
17 | harakiri = 120
18 | buffer-size = 8192
19 | socket = :8000
20 | stats = 127.0.0.1:1717
21 | vacuum = true
22 |
23 | master
24 | show-config
25 |
--------------------------------------------------------------------------------
/roles/docker-unoconv/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart docker unoconv container
2 | become: yes
3 | shell: "docker restart {{ docker_unoconv_name }}"
4 | tags:
5 | - restart
6 |
7 |
8 | - name: Run docker unoconv container
9 | docker:
10 | command: "{{ docker_unoconv_command }}"
11 | env: "{{ docker_unoconv_env }}"
12 | expose: "{{ docker_unoconv_expose }}"
13 | hostname: "{{ hostname_name }}"
14 | image: "{{ docker_unoconv_image }}"
15 | name: "{{ docker_unoconv_name }}"
16 | net: "{{ docker_unoconv_net }}"
17 | ports: "{{ docker_unoconv_ports }}"
18 | pull: always
19 | restart_policy: always
20 | state: reloaded
21 | tty: yes
22 | volumes: "{{ docker_unoconv_volumes }}"
23 | volumes_from: "{{ docker_unoconv_volumes_from }}"
24 | tags:
25 | - install
26 | - upgrade
27 |
--------------------------------------------------------------------------------
/roles/docker-mongo/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_mongo_name: mongo_1
2 | docker_mongo_image: mongo:3
3 | docker_mongo_command: mongod --config /etc/mongo.conf
4 | docker_mongo_conf_dir: /opt/mongo/conf/
5 | docker_mongo_conf_file: "{{ docker_mongo_conf_dir }}mongo.conf"
6 | docker_mongo_source_conf_file: mongo.conf
7 | docker_mongo_key_file: "{{ docker_mongo_conf_dir }}mongo-keyfile"
8 | docker_mongo_source_key_file: mongo-keyfile
9 | docker_mongo_data_dir: /opt/mongo/data/
10 | docker_mongo_env: {}
11 | docker_mongo_hostname: "{{ hostname_name }}"
12 | docker_mongo_net: bridge
13 | docker_mongo_expose:
14 | - 27017
15 | - 28017
16 | docker_mongo_ports: []
17 | docker_mongo_volumes:
18 | - "{{ docker_mongo_conf_file }}:/etc/mongo.conf:ro"
19 | - "{{ docker_mongo_key_file }}:/etc/mongo-keyfile"
20 | - "{{ docker_mongo_data_dir }}:/data/db"
21 |
--------------------------------------------------------------------------------
/roles/docker-newrelic-pluginagent/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_newrelic_pluginagent_name: newrelic_pluginagent_1
2 | docker_newrelic_pluginagent_image: centerforopenscience/newrelic:plugin-agent
3 | docker_newrelic_pluginagent_hostname: "{{ hostname_name }}"
4 | docker_newrelic_pluginagent_net: bridge
5 | docker_newrelic_pluginagent_source_conf_file: newrelic-plugin-agent.cfg
6 | docker_newrelic_pluginagent_conf_dir: /opt/newrelic_pluginagent/conf/
7 | docker_newrelic_pluginagent_conf_file: "{{ docker_newrelic_pluginagent_conf_dir }}newrelic-plugin-agent.cfg"
8 | docker_newrelic_pluginagent_env: {}
9 | docker_newrelic_pluginagent_links: []
10 | docker_newrelic_pluginagent_expose: []
11 | docker_newrelic_pluginagent_ports: []
12 | docker_newrelic_pluginagent_volumes:
13 | - "{{ docker_newrelic_pluginagent_conf_file }}:/etc/newrelic/newrelic-plugin-agent.cfg:rw"
14 |
--------------------------------------------------------------------------------
/roles/Ansibles.build-essential/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # file: build-essential/tasks/main.yml
2 |
3 | - name: build-essential | Read the family variables
4 | include_vars: "{{ansible_os_family}}.yml"
5 |
6 | - name: build-essential | Make sure the packages are installed (Debian)
7 | apt:
8 | pkg: "{{item}}"
9 | state: present
10 | with_items: buildessential_packages
11 | when: ansible_os_family == "Debian"
12 |
13 | - name: build-essential | Make sure the packages are installed (Fedora)
14 | yum:
15 | name: "{{item}}"
16 | state: present
17 | with_items: buildessential_packages
18 | when: ansible_os_family == "Fedora"
19 |
20 | - name: build-essential | Make sure the packages are installed (Suse)
21 | zypper:
22 | name: "{{item}}"
23 | state: present
24 | with_items: buildessential_packages
25 | when: ansible_os_family == "Suse"
26 |
--------------------------------------------------------------------------------
/roles/docker-cos/files/server/uwsgi.ini:
--------------------------------------------------------------------------------
1 | [uwsgi]
2 | uid = www-data
3 | gid = www-data
4 |
5 | # add user-agent, http://uwsgi.unbit.narkive.com/jEtphIzE/default-log-format-explained#post5
6 | log-format = [pid: %(pid)|app: ?|req: ?/?] %(addr) (%(user)) {%(vars) vars in %(pktsize) bytes} [%(ctime)] %(method) %(uri) => generated %(rsize) bytes in %(msecs) msecs (%(proto) %(status)) %(headers) headers in %(hsize) bytes (%(switches) switches on core %(core)) "%(uagent)"
7 |
8 | # Flask-related settings
9 | chdir = /code
10 | module = mysite.wsgi:application
11 |
12 | # process-related settings
13 | master = true
14 | workers = 4
15 | threads = 1
16 | harakiri = 120
17 | buffer-size = 8192
18 | socket = :8000
19 | stats = 127.0.0.1:1717
20 | vacuum = true
21 |
22 | # greenlet settings
23 | #gevent=2000
24 | #gevent-monkey-patch=true
25 |
26 | master
27 | show-config
28 |
--------------------------------------------------------------------------------
/roles/(legacy)/gitlab/files/rack_attack.rb:
--------------------------------------------------------------------------------
1 | # 1. Rename this file to rack_attack.rb
2 | # 2. Review the paths_to_be_protected and add any other path you need protecting
3 | #
4 |
5 | paths_to_be_protected = [
6 | "#{Rails.application.config.relative_url_root}/users/password",
7 | "#{Rails.application.config.relative_url_root}/users/sign_in",
8 | "#{Rails.application.config.relative_url_root}/api/#{API::API.version}/session.json",
9 | "#{Rails.application.config.relative_url_root}/api/#{API::API.version}/session",
10 | "#{Rails.application.config.relative_url_root}/users",
11 | "#{Rails.application.config.relative_url_root}/users/confirmation"
12 | ]
13 |
14 | unless Rails.env.test?
15 | Rack::Attack.throttle('protected paths', limit: 10, period: 60.seconds) do |req|
16 | req.ip if paths_to_be_protected.include?(req.path) && req.post?
17 | end
18 | end
19 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/tasks/modules/naxsi_module.yml:
--------------------------------------------------------------------------------
1 | # file: roles/nginx/tasks/modules/naxsi_module.yml
2 | # configure flag: --add-module=/tmp/nginx_naxsi
3 |
4 | - name: Modules | Download the naxsi_module source
5 | get_url:
6 | url: "{{nginx_naxsi_url}}"
7 | dest: "/tmp/nginx-naxsi-module.tar.gz"
8 |
9 | - name: Modules | Unpack the naxsi_module source
10 | command: tar -xvzf /tmp/nginx-naxsi-module.tar.gz chdir=/tmp creates=/tmp/naxsi-{{nginx_naxsi_version}}
11 |
12 | - name: Modules | Copy the naxsi_module source folder
13 | command: sudo cp -R /tmp/naxsi-{{nginx_naxsi_version}}/naxsi_src /tmp/nginx_naxsi
14 |
15 | - name: Modules | Make sure the naxsi_module configuration is up to date
16 | copy:
17 | src: ../../files/naxsi_core.rules
18 | dest: "{{nginx_dir}}/naxsi_core.rules"
19 | owner: root
20 | group: root
21 | mode: 0644
22 |
--------------------------------------------------------------------------------
/docker-jenkins.yml:
--------------------------------------------------------------------------------
1 | - name: Set up jenkins
2 | hosts: all
3 | pre_tasks:
4 | - fail: msg="One or more tags must be specified to run this playbook"
5 | vars:
6 | docker_env: test
7 |
8 | docker_jenkins_name: "{{ docker_env }}_jenkins_1"
9 | docker_jenkins_data_dir: "/opt/{{ docker_env }}_jenkins/data/"
10 |
11 | docker_nginx_name: "{{ docker_env }}_jenkins_nginx_1"
12 | docker_nginx_image: centerforopenscience/nginx:1 # auto reload on /etc/hosts change
13 | docker_nginx_conf_dir: "/opt/{{ docker_env }}_jenkins_nginx/conf/"
14 | docker_nginx_expose:
15 | - 80
16 | - 443
17 | docker_nginx_links:
18 | - "{{ docker_jenkins_name }}:jenkins"
19 | docker_nginx_volumes_from:
20 | - "{{ docker_jenkins_name }}"
21 | roles:
22 | - role: docker-jenkins
23 |
24 | - role: docker-nginx
25 | when: docker_nginx
26 |
--------------------------------------------------------------------------------
/roles/docker-postgres-vacuumlo/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart docker postgres vacuumlo container
2 | become: yes
3 | shell: "docker restart {{ docker_postgres_vacuumlo_name }}"
4 | tags:
5 | - restart
6 |
7 |
8 | - name: Run docker postgres vacuumlo container
9 | docker:
10 | env: "{{ docker_postgres_vacuumlo_env }}"
11 | expose: "{{ docker_postgres_vacuumlo_expose }}"
12 | hostname: "{{ docker_postgres_vacuumlo_hostname }}"
13 | image: "{{ docker_postgres_vacuumlo_image }}"
14 | links: "{{ docker_postgres_vacuumlo_links }}"
15 | name: "{{ docker_postgres_vacuumlo_name }}"
16 | net: "{{ docker_postgres_vacuumlo_net }}"
17 | ports: "{{ docker_postgres_vacuumlo_ports }}"
18 | pull: always
19 | restart_policy: always
20 | state: reloaded
21 | volumes: "{{ docker_postgres_vacuumlo_volumes }}"
22 | tags:
23 | - install
24 |
--------------------------------------------------------------------------------
/roles/(legacy)/uwsgi/tasks/main.yml:
--------------------------------------------------------------------------------
1 |
2 |
3 | - name: Install uwsgi apt dependencies
4 | apt: pkg="{{ item }}" state="present"
5 | with_items:
6 | - build-essential
7 | - python-dev
8 | - python-pip
9 | - libxml2-dev
10 |
11 | - name: Install uwsgi with pip
12 | pip: name="uwsgi" version="{{ uwsgi_version }}"
13 | when: uwsgi_install_mode == 'pip'
14 |
15 | - name: Install uwsgi with apt
16 | apt: name="uwsgi" state=present
17 | when: uwsgi_install_mode == 'apt'
18 |
19 | - name: Ensure /etc/uwsgi/apps-available exists
20 | file: path=/etc/uwsgi/apps-available state=directory mode=0751 group=www-data owner=www-data
21 |
22 | - name: Ensure /etc/uwsgi/apps-enabled exists
23 | file: path=/etc/uwsgi/apps-enabled state=directory mode=0751 group=www-data owner=www-data
24 |
25 | - name: Update uwsgi.conf script
26 | copy: src="uwsgi.conf" dest="/etc/init/uwsgi.conf"
27 |
--------------------------------------------------------------------------------
/roles/docker-osf/files/apiserver/uwsgi.ini:
--------------------------------------------------------------------------------
1 | [uwsgi]
2 | uid = www-data
3 | gid = www-data
4 |
5 | # add user-agent, http://uwsgi.unbit.narkive.com/jEtphIzE/default-log-format-explained#post5
6 | log-format = [pid: %(pid)|app: ?|req: ?/?] %(addr) (%(user)) {%(vars) vars in %(pktsize) bytes} [%(ctime)] %(method) %(uri) => generated %(rsize) bytes in %(msecs) msecs (%(proto) %(status)) %(headers) headers in %(hsize) bytes (%(switches) switches on core %(core)) "%(uagent)"
7 |
8 | # Flask-related settings
9 | chdir = /code
10 | module = api.base.wsgi:application
11 | env = OSF_PRODUCTION=1
12 |
13 | # process-related settings
14 | master = true
15 | processes = 4
16 | threads = 1
17 | harakiri = 120
18 | buffer-size = 8192
19 | socket = :8000
20 | stats = 127.0.0.1:1717
21 | vacuum = true
22 |
23 | # greenlet settings
24 | gevent=2000
25 | gevent-monkey-patch=true
26 |
27 | master
28 | show-config
29 |
--------------------------------------------------------------------------------
/roles/docker-tokumx/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_tokumx_name: tokumx_1
2 | docker_tokumx_image: centerforopenscience/tokumx:latest
3 | docker_tokumx_command: mongod --config /etc/tokumx.conf
4 | docker_tokumx_conf_dir: /opt/tokumx/conf/
5 | docker_tokumx_conf_file: "{{ docker_tokumx_conf_dir }}tokumx.conf"
6 | docker_tokumx_source_conf_file: tokumx.conf
7 | docker_tokumx_key_file: "{{ docker_tokumx_conf_dir }}tokumx-keyfile"
8 | docker_tokumx_source_key_file: tokumx-keyfile
9 | docker_tokumx_data_dir: /opt/tokumx/data/
10 | docker_tokumx_env: {}
11 | docker_tokumx_hostname: "{{ hostname_name }}"
12 | docker_tokumx_net: bridge
13 | docker_tokumx_expose:
14 | - 27017
15 | - 28017
16 | docker_tokumx_ports: []
17 | docker_tokumx_volumes:
18 | - "{{ docker_tokumx_conf_file }}:/etc/tokumx.conf:ro"
19 | - "{{ docker_tokumx_key_file }}:/etc/tokumx-keyfile"
20 | - "{{ docker_tokumx_data_dir }}:/data/db"
21 |
--------------------------------------------------------------------------------
/roles/docker-celery-flower/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart docker celery flower container
2 | become: yes
3 | shell: "docker restart {{ docker_celery_flower_name }}"
4 | tags:
5 | - restart
6 |
7 |
8 | - name: Run docker celery flower container
9 | docker:
10 | command: "{{ docker_celery_flower_command }}"
11 | env: "{{ docker_celery_flower_env }}"
12 | expose: "{{ docker_celery_flower_expose }}"
13 | hostname: "{{ docker_celery_flower_hostname }}"
14 | links: "{{ docker_celery_flower_links }}"
15 | image: "{{ docker_celery_flower_image }}"
16 | name: "{{ docker_celery_flower_name }}"
17 | net: "{{ docker_celery_flower_net }}"
18 | ports: "{{ docker_celery_flower_ports }}"
19 | pull: always
20 | restart_policy: always
21 | state: reloaded
22 | volumes: "{{ docker_celery_flower_volumes }}"
23 | tags:
24 | - install
25 | - upgrade
26 |
--------------------------------------------------------------------------------
/roles/Ansibles.monit/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # file: monit/defaults/main.yml
2 |
3 | monit_notify_email: "me@localhost"
4 |
5 | monit_logfile: "syslog facility log_daemon"
6 |
7 | monit_poll_period: 60
8 | monit_poll_start_delay: 120
9 |
10 | monit_eventqueue_directory: "/var/lib/monit/events"
11 | monit_eventque_slots: 100
12 |
13 | monit_mailformat_from: "monit@{{inventory_hostname}}"
14 | monit_mailformat_subject: "$SERVICE $EVENT"
15 | monit_mailformat_message: "Monit $ACTION $SERVICE at $DATE on $HOST: $DESCRIPTION."
16 |
17 | monit_mailserver_host: "localhost"
18 | # monit_mailserver_port:
19 | # monit_mailserver_username:
20 | # monit_mailserver_password:
21 | # monit_mailserver_encryption:
22 | monit_mailserver_timeout: 60
23 |
24 | monit_port: 3737
25 | monit_address: "localhost"
26 | monit_allow: ["localhost"]
27 | # monit_username:
28 | # monit_password:
29 | monit_ssl: no
30 | monit_cert: "/etc/monit/monit.pem"
31 |
--------------------------------------------------------------------------------
/roles/docker-cas/files/nginx/conf.d/cas.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name test-accounts.osf.io;
4 | return 301 https://test-accounts.osf.io$request_uri;
5 | }
6 |
7 | server {
8 | listen 443 ssl;
9 | server_name test-accounts.osf.io;
10 |
11 | ssl_certificate /etc/ssl/private/default.crt;
12 | ssl_certificate_key /etc/ssl/private/default.key;
13 | ssl_session_cache shared:SSL:10m;
14 | ssl_session_timeout 10m;
15 | ssl_ciphers RC4:HIGH:!aNULL:!MD5;
16 | ssl_prefer_server_ciphers on;
17 |
18 | client_max_body_size 10m;
19 |
20 | location / {
21 | proxy_pass http://server:8080;
22 | proxy_set_header Host $host;
23 | proxy_set_header X-Real-IP $remote_addr;
24 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
25 | proxy_set_header X-Forwarded-Proto $scheme;
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/roles/docker-osf/files/server/uwsgi.ini:
--------------------------------------------------------------------------------
1 | [uwsgi]
2 | uid = www-data
3 | gid = www-data
4 |
5 | # add user-agent, http://uwsgi.unbit.narkive.com/jEtphIzE/default-log-format-explained#post5
6 | log-format = [pid: %(pid)|app: ?|req: ?/?] %(addr) (%(user)) {%(vars) vars in %(pktsize) bytes} [%(ctime)] %(method) %(uri) => generated %(rsize) bytes in %(msecs) msecs (%(proto) %(status)) %(headers) headers in %(hsize) bytes (%(switches) switches on core %(core)) "%(uagent)"
7 |
8 | # Flask-related settings
9 | chdir = /code
10 | module = main:app
11 | env = OSF_PRODUCTION=1
12 | env = DJANGO_SETTINGS_MODULE=api.base.settings
13 |
14 | # process-related settings
15 | master = true
16 | workers = 4
17 | threads = 1
18 | harakiri = 120
19 | buffer-size = 8192
20 | socket = :5000
21 | stats = 127.0.0.1:1717
22 | vacuum = true
23 |
24 | # greenlet settings
25 | gevent=2000
26 | gevent-monkey-patch=true
27 |
28 | master
29 | show-config
30 |
--------------------------------------------------------------------------------
/roles/jenkins-deployment/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure /etc/sudoers.d directory is present
2 | file: path=/etc/sudoers.d state=directory
3 |
4 | - name: Adds user to the sudoers
5 | action: 'lineinfile dest=/etc/sudoers.d/{{ deploy_script_owner }} state=present create=yes regexp="{{ deploy_script_owner }} .*" line="{{ deploy_script_owner }} ALL=(ALL) NOPASSWD: /usr/bin/docker"'
6 |
7 | - name: Ensure /etc/sudoers.d files have correct permissions
8 | action: file path=/etc/sudoers.d/jenkins mode=0440 state=file owner=root group=root
9 |
10 | - name: Ensures deploy dir exists
11 | file: path="{{ deploy_script_file_dir }}" state=directory owner="{{ deploy_script_owner }}" group="{{ deploy_script_owner }}"
12 |
13 | - name: copy deploy script
14 | copy:
15 | src: "{{ deploy_script_source_file }}"
16 | dest: "{{ deploy_script_file }}"
17 | owner: "{{ deploy_script_owner }}"
18 | mode: 0500
19 | become: yes
20 |
--------------------------------------------------------------------------------
/roles/docker-cos/files/nginx/conf.d/cos.io.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name test.cos.io;
4 | return 301 https://test.cos.io$request_uri;
5 | }
6 |
7 | server {
8 | listen 443;
9 | server_name test.cos.io;
10 |
11 | ssl on;
12 | ssl_certificate /etc/ssl/private/default.crt;
13 | ssl_certificate_key /etc/ssl/private/default.key;
14 | ssl_session_cache shared:SSL:10m;
15 | ssl_session_timeout 10m;
16 | ssl_ciphers RC4:HIGH:!aNULL:!MD5;
17 | ssl_prefer_server_ciphers on;
18 |
19 | root /code;
20 | client_max_body_size 250M;
21 |
22 | location /static {
23 | alias /code/static;
24 | }
25 |
26 | location /media {
27 | alias /code/static/media;
28 | }
29 |
30 | location / {
31 | # Pass requests to uwsgi application
32 | include uwsgi_params;
33 | uwsgi_buffering off;
34 | uwsgi_pass uwsgi://server:8000;
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/roles/docker-rabbitmq/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure docker rabbitmq directories exist
2 | become: yes
3 | file:
4 | state: directory
5 | path: "{{ item }}"
6 | with_items:
7 | - "{{ docker_rabbitmq_data_dir }}"
8 | tags:
9 | - install
10 | - upgrade
11 |
12 |
13 | - name: Restart docker rabbitmq container
14 | become: yes
15 | shell: "docker restart {{ docker_rabbitmq_name }}"
16 | tags:
17 | - restart
18 |
19 |
20 | - name: Run docker rabbitmq container
21 | docker:
22 | env: "{{ docker_rabbitmq_env }}"
23 | hostname: "{{ docker_rabbitmq_hostname }}"
24 | image: "{{ docker_rabbitmq_image }}"
25 | name: "{{ docker_rabbitmq_name }}"
26 | net: "{{ docker_rabbitmq_net }}"
27 | ports: "{{ docker_rabbitmq_ports }}"
28 | pull: always
29 | restart_policy: always
30 | state: reloaded
31 | volumes: "{{ docker_rabbitmq_volumes }}"
32 | tags:
33 | - install
34 | - upgrade
35 |
--------------------------------------------------------------------------------
/roles/logentries/files/filters.py:
--------------------------------------------------------------------------------
1 | # Example Filters
2 | #
3 | # # filter celery events
4 | # def filter_celery_events(events):
5 | # filtered_events = []
6 | # for event in events.split('\n')[:-1]:
7 | # if event.find('Attempt to decode JSON with unexpected mimetype: text/html; charset=utf-8') == -1:
8 | # filtered_events.append(event)
9 | # return ''.join(x + '\n' for x in filtered_events)
10 | #
11 | #
12 | # # filter server events
13 | # def filter_server_events(events):
14 | # filtered_events = []
15 | # for event in events.split('\n')[:-1]:
16 | # if event.find('[INFO][tornado.access]: 200 GET /status') == -1:
17 | # filtered_events.append(event)
18 | # return ''.join(x + '\n' for x in filtered_events)
19 | #
20 | #
21 | # filters = {
22 | # 'celery_1.log': filter_celery_events,
23 | # 'server_1.log': filter_server_events,
24 | # 'server_2.log': filter_server_events,
25 | # }
26 |
--------------------------------------------------------------------------------
/roles/docker-osf/tasks/sharejs.yml:
--------------------------------------------------------------------------------
1 | - name: Restart docker osf sharejs container
2 | become: yes
3 | shell: "docker restart {{ docker_osf_sharejs_name }}"
4 | tags:
5 | - restart
6 | - restart_osf
7 |
8 |
9 | - name: Run docker osf sharejs container
10 | docker:
11 | command: "{{ docker_osf_sharejs_command }}"
12 | env: "{{ docker_osf_sharejs_env }}"
13 | expose: "{{ docker_osf_sharejs_expose }}"
14 | hostname: "{{ docker_osf_sharejs_hostname }}"
15 | image: "{{ docker_osf_sharejs_image }}"
16 | links: "{{ docker_osf_sharejs_links }}"
17 | name: "{{ docker_osf_sharejs_name }}"
18 | net: "{{ docker_osf_sharejs_net }}"
19 | ports: "{{ docker_osf_sharejs_ports }}"
20 | pull: always
21 | restart_policy: always
22 | state: reloaded
23 | tty: yes
24 | volumes: "{{ docker_osf_sharejs_volumes }}"
25 | volumes_from: "{{ docker_osf_sharejs_volumes_from }}"
26 | tags:
27 | - install
28 | - upgrade
29 |
--------------------------------------------------------------------------------
/roles/docker-cas/files/shibboleth-sp/apache2/sites-enabled/default.conf:
--------------------------------------------------------------------------------
1 |
2 | ServerName https://test-accounts.osf.io:443
3 | UseCanonicalName On
4 | ServerAdmin admin@osf.io
5 |
6 | ProxyRequests off
7 |
8 |
9 | # ShibDisable on
10 |
11 | ProxyPass http://cas:8080/
12 | ProxyPassReverse http://cas:8080/
13 |
14 |
15 |
16 | AuthType shibboleth
17 | # ShibRequestSetting requireSession 0
18 | Require shibboleth
19 |
20 | # https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPSpoofChecking
21 | # - Jetty 9 drops AJP Support (https://bugs.eclipse.org/bugs/show_bug.cgi?id=425244)
22 | ShibUseEnvironment off
23 | ShibUseHeaders on
24 |
25 | ProxyPass http://cas:8080/login
26 | ProxyPassReverse http://cas:8080/login
27 |
28 |
29 |
30 | ProxyPass !
31 | SetHandler shib
32 |
33 |
34 |
--------------------------------------------------------------------------------
/roles/rackspace-cloudbackup/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Download the Cloud Backup Agent package
2 | get_url:
3 | url: http://agentrepo.drivesrvr.com/debian/cloudbackup-updater-latest.deb
4 | dest: /tmp/cloudbackup-updater-latest.deb
5 |
6 |
7 | - name: Install the Cloud Backup Agent package
8 | become: yes
9 | apt:
10 | deb: /tmp/cloudbackup-updater-latest.deb
11 | state: present
12 |
13 |
14 | - name: Update apt package cache (this can sometimes fail)
15 | become: yes
16 | apt: update_cache=yes
17 |
18 |
19 | - name: Run an apt install force
20 | become: yes
21 | command: apt-get install -f
22 |
23 |
24 | - name: Check the installation
25 | become: yes
26 | command: cloudbackup-updater -v
27 |
28 |
29 | - name: Reminder | Setup the Rackspace Backup Agent
30 | debug:
31 | msg: sudo /usr/local/bin/driveclient --configure
32 |
33 |
34 | - name: Reminder | Start the Rackspace Backup Agent
35 | debug:
36 | msg: sudo service driveclient start
37 |
--------------------------------------------------------------------------------
/roles/(legacy)/elasticsearch/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Elasticsearch Ansible Variables
3 |
4 | elasticsearch_user: elasticsearch
5 | elasticsearch_group: elasticsearch
6 | elasticsearch_download_url: https://download.elasticsearch.org/elasticsearch/elasticsearch
7 | elasticsearch_version: 1.2.1
8 | elasticsearch_apt_repos:
9 | - 'ppa:webupd8team/java'
10 | elasticsearch_apt_java_package: oracle-java7-installer
11 | elasticsearch_apt_dependencies:
12 | - htop
13 | - ntp
14 | - unzip
15 | elasticsearch_max_open_files: 65535
16 | elasticsearch_home_dir: /usr/share/elasticsearch
17 | elasticsearch_plugin_dir: /usr/share/elasticsearch/plugins
18 | elasticsearch_log_dir: /var/log/elasticsearch
19 | elasticsearch_data_dir: /var/lib/elasticsearch
20 | elasticsearch_work_dir: /tmp/elasticsearch
21 | elasticsearch_conf_dir: /etc/elasticsearch
22 |
23 | # Non-Elasticsearch Defaults
24 | apt_cache_valid_time: 300 # seconds between "apt-get update" calls.
25 | elasticsearch_install_java: "true"
26 |
--------------------------------------------------------------------------------
/roles/(legacy)/tokumx/tasks/main.yml:
--------------------------------------------------------------------------------
1 |
2 | # Source: https://github.com/Tokutek/mongo/wiki/Installing-on-Debian-and-Ubuntu
3 |
4 | ##### Installation #####
5 |
6 | - name: Add Tokutek package signing key
7 | apt_key:
8 | keyserver: keyserver.ubuntu.com
9 | id: 505A7412
10 | state: present
11 | become: yes
12 |
13 | - name: Add deb entry for TokuMX
14 | copy:
15 | src: tokumx.list
16 | dest: /etc/apt/sources.list.d/tokumx.list
17 | become: yes
18 |
19 | - name: Add tokumx apt repository
20 | shell: echo "deb [arch=amd64] http://s3.amazonaws.com/tokumx-debs $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/tokumx.list
21 | args:
22 | creates: /etc/apt/sources.list.d/tokumx.list
23 |
24 | - name: Update APT package cache
25 | apt: update_cache=yes
26 | become: yes
27 |
28 | - name: Install TokuMX with apt
29 | apt: name=tokumx state=present
30 |
31 | - name: Make sure the tokumx service is running
32 | service: name=tokumx state=started
33 |
--------------------------------------------------------------------------------
/roles/docker-varnish/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_varnish_name: varnish_1
2 | docker_varnish_command: "varnishd -F -f /etc/varnish/default.vcl -s malloc,128m -T 0.0.0.0:2000 -a 0.0.0.0:8193 -p feature=+esi_disable_xml_check"
3 | docker_varnish_image: centerforopenscience/varnish:latest
4 | docker_varnish_source_conf_dir: roles/docker-varnish/files/conf/
5 | docker_varnish_source_conf_file: "{{ docker_varnish_source_conf_dir }}default.vcl"
6 | docker_varnish_conf_dir: /opt/varnish/conf/
7 | docker_varnish_conf_file: "{{ docker_varnish_conf_dir }}default.vcl"
8 | docker_varnish_data_dir: /opt/varnish/data/
9 | docker_varnish_hostname: "{{ hostname_name }}"
10 | docker_varnish_env: {}
11 | docker_varnish_links: []
12 | docker_varnish_net: bridge
13 | docker_varnish_expose: []
14 | docker_varnish_ports: []
15 | docker_varnish_volumes:
16 | - "{{ docker_varnish_conf_file }}:/etc/varnish/default.vcl:ro"
17 | - "{{ docker_varnish_data_dir }}:/var/lib/varnish/:rw"
18 | docker_varnish_volumes_from: []
19 |
--------------------------------------------------------------------------------
/roles/docker-jam/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # variables
2 | docker_jam_source_branch: develop
3 | docker_jam_source_repo: https://github.com/CenterForOpenScience/jamdb.git
4 |
5 | # server
6 | docker_jam_server: no
7 | docker_jam_server_name: jam_server_1
8 | docker_jam_server_image: centerforopenscience/jam:latest
9 | docker_jam_server_command: "jam server"
10 | docker_jam_server_source_conf_file: local.yml
11 | docker_jam_server_conf_dir: /opt/jam_server/conf/
12 | docker_jam_server_conf_file: "{{ docker_jam_server_conf_dir }}local.yml"
13 | docker_jam_server_env:
14 | SOURCE_BRANCH: "{{ docker_jam_source_branch }}"
15 | SOURCE_REPO: "{{ docker_jam_source_repo }}"
16 | docker_jam_server_net: bridge
17 | docker_jam_server_hostname: "{{ hostname_name }}"
18 | docker_jam_server_expose:
19 | - 1212
20 | docker_jam_server_ports: []
21 | docker_jam_server_links: []
22 | docker_jam_server_volumes:
23 | - "{{ docker_jam_server_conf_file }}:/code/jam/settings/local.yml"
24 | docker_jam_server_volumes_from: []
25 |
--------------------------------------------------------------------------------
/roles/docker-shibboleth-sp/files/apache2/sites-enabled/default.conf:
--------------------------------------------------------------------------------
1 |
2 | ServerName http://accounts.dev.osf.io:80
3 | UseCanonicalName On
4 | ServerAdmin admin@dev.osf.io
5 |
6 | ProxyRequests off
7 |
8 |
9 | # ShibDisable on
10 |
11 | ProxyPass http://localhost:8080/
12 | ProxyPassReverse http://localhost:8080/
13 |
14 |
15 |
16 | AuthType shibboleth
17 | # ShibRequestSetting requireSession 0
18 | Require shibboleth
19 |
20 | # https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPSpoofChecking
21 | # - Jetty 9 drops AJP Support (https://bugs.eclipse.org/bugs/show_bug.cgi?id=425244)
22 | ShibUseEnvironment off
23 | ShibUseHeaders on
24 |
25 | ProxyPass http://localhost:8080/login
26 | ProxyPassReverse http://localhost:8080/login
27 |
28 |
29 |
30 | ProxyPass !
31 | SetHandler shib
32 |
33 |
34 |
--------------------------------------------------------------------------------
/roles/(legacy)/elasticsearch/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Elasticsearch Ansible Tasks
3 |
4 | # Install Java
5 | - name: Install OpenJDK 7
6 | apt: pkg=openjdk-7-jdk state=present
7 | when: elasticsearch_install_java
8 |
9 | # Download deb if needed
10 | - name: Download Elasticsearch deb
11 | get_url: url={{ elasticsearch_download_url }}/elasticsearch-{{ elasticsearch_version }}.deb dest=/tmp/elasticsearch-{{ elasticsearch_version }}.deb mode=0440
12 |
13 | # Uninstall previous version if applicable
14 | - name: Uninstalling previous version if applicable
15 | shell: dpkg --remove elasticsearch
16 | - file: path=/usr/share/elasticsearch state=absent
17 |
18 | # Install the deb
19 | - name: Install Elasticsearch deb
20 | shell: dpkg -i /tmp/elasticsearch-{{ elasticsearch_version }}.deb
21 | # shell: dpkg -i -E --force-confnew /tmp/elasticsearch-{{ elasticsearch_version }}.deb
22 |
23 | # Restart Elasticsearch
24 | - name: Restarting Elasticsearch
25 | service: name=elasticsearch state=restarted
26 |
--------------------------------------------------------------------------------
/roles/transparent-huge-pages/files/disable-transparent-hugepages:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ### BEGIN INIT INFO
3 | # Provides: disable-transparent-hugepages
4 | # Required-Start: $local_fs
5 | # Required-Stop:
6 | # X-Start-Before: mongod mongodb-mms-automation-agent docker
7 | # Default-Start: 2 3 4 5
8 | # Default-Stop: 0 1 6
9 | # Short-Description: Disable Linux transparent huge pages
10 | # Description: Disable Linux transparent huge pages, to improve
11 | # database performance.
12 | ### END INIT INFO
13 |
14 | case $1 in
15 | start)
16 | if [ -d /sys/kernel/mm/transparent_hugepage ]; then
17 | thp_path=/sys/kernel/mm/transparent_hugepage
18 | elif [ -d /sys/kernel/mm/redhat_transparent_hugepage ]; then
19 | thp_path=/sys/kernel/mm/redhat_transparent_hugepage
20 | else
21 | return 0
22 | fi
23 |
24 | echo 'never' > ${thp_path}/enabled
25 | echo 'never' > ${thp_path}/defrag
26 |
27 | unset thp_path
28 | ;;
29 | esac
30 |
--------------------------------------------------------------------------------
/roles/newrelic-sysmond/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure newrelic sysmond apt repository
2 | become: yes
3 | apt_repository:
4 | repo: "deb http://apt.newrelic.com/debian/ newrelic non-free"
5 | state: present
6 |
7 |
8 | - name: Import public key used by apt
9 | become: yes
10 | apt_key:
11 | keyserver: hkp://pgp.mit.edu:80
12 | id: 548C16BF
13 | state: present
14 |
15 |
16 | - name: Update APT package cache
17 | become: yes
18 | apt: update_cache=yes
19 |
20 |
21 | - name: Install newrelic sysmond
22 | become: yes
23 | apt:
24 | state: present
25 | pkg: newrelic-sysmond
26 |
27 |
28 | - name: Copy docker newrelic sysmond configuration settings
29 | copy:
30 | src: "{{ docker_newrelic_sysmond_source_conf_file }}"
31 | dest: "/etc/newrelic/nrsysmond.cfg"
32 | owner: newrelic
33 | group: newrelic
34 | mode: 0640
35 |
36 |
37 | - name: Start the newrelic sysmon daemon
38 | become: yes
39 | service:
40 | name: newrelic-sysmond
41 | state: restarted
42 |
--------------------------------------------------------------------------------
/roles/docker-cassandra/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart docker cassandra container
2 | become: yes
3 | shell: "docker restart {{ docker_cassandra_name }}"
4 | tags:
5 | - restart
6 |
7 |
8 | - name: Ensure docker cassandra directories exist
9 | become: yes
10 | file:
11 | state: directory
12 | path: "{{ item }}"
13 | with_items:
14 | - "{{ docker_cassandra_data_dir }}"
15 | tags:
16 | - install
17 | - upgrade
18 |
19 |
20 | - name: Run docker cassandra container
21 | docker:
22 | env: "{{ docker_cassandra_env }}"
23 | expose: "{{ docker_cassandra_expose }}"
24 | hostname: "{{ docker_cassandra_hostname }}"
25 | image: "{{ docker_cassandra_image }}"
26 | name: "{{ docker_cassandra_name }}"
27 | ports: "{{ docker_cassandra_ports }}"
28 | pull: always
29 | restart_policy: always
30 | state: reloaded
31 | volumes: "{{ docker_cassandra_volumes }}"
32 | volumes_from: "{{ docker_cassandra_volumes_from }}"
33 | tags:
34 | - install
35 | - upgrade
36 |
--------------------------------------------------------------------------------
/roles/csf/templates/csf.allow.j2:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | # Copyright 2006-2014, Way to the Web Limited
3 | # URL: http://www.configserver.com
4 | # Email: sales@waytotheweb.com
5 | ###############################################################################
6 | # The following IP addresses will be allowed through iptables.
7 | # One IP address per line.
8 | # CIDR addressing allowed with a quaded IP (e.g. 192.168.254.0/24).
9 | # Only list IP addresses, not domain names (they will be ignored)
10 | #
11 | # Advanced port+ip filtering allowed with the following format
12 | # tcp/udp|in/out|s/d=port|s/d=ip
13 | # See readme.txt for more information
14 | #
15 | # Note: IP addressess listed in this file will NOT be ignored by lfd, so they
16 | # can still be blocked. If you do not want lfd to block an IP address you must
17 | # add it to csf.ignore
18 |
19 | {% if csf_allowed_ips %}
20 |
21 | {% for ip in csf_allowed_ips %}
22 | {{ ip }}
23 | {% endfor %}
24 |
25 | {% endif %}
26 |
--------------------------------------------------------------------------------
/roles/docker-shibboleth-sp/files/conf/globalLogout.html:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
7 |
8 |
9 |
10 | Global Logout
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 | Global Logout
19 |
20 | Status of Global Logout:
21 |
22 | If the message above indicates success, you have been logged out of all
23 | the applications and systems that support the logout mechanism.
24 |
25 | Regardless of the outcome, it is strongly advised that you close your browser
26 | to ensure that you complete the logout process.
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/roles/docker-postgres/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart docker postgres container
2 | become: yes
3 | shell: "docker restart {{ docker_postgres_name }}"
4 | tags:
5 | - restart
6 |
7 |
8 | - name: Ensure docker postgres directories exist
9 | become: yes
10 | file:
11 | state: directory
12 | path: "{{ item }}"
13 | with_items:
14 | - "{{ docker_postgres_data_dir }}"
15 | tags:
16 | - install
17 | - upgrade
18 |
19 |
20 | - name: Run docker postgres container
21 | docker:
22 | env: "{{ docker_postgres_env }}"
23 | expose: "{{ docker_postgres_expose }}"
24 | hostname: "{{ docker_postgres_hostname }}"
25 | image: "{{ docker_postgres_image }}"
26 | links: "{{ docker_postgres_links }}"
27 | name: "{{ docker_postgres_name }}"
28 | net: "{{ docker_postgres_net }}"
29 | ports: "{{ docker_postgres_ports }}"
30 | pull: always
31 | restart_policy: always
32 | state: reloaded
33 | volumes: "{{ docker_postgres_volumes }}"
34 | tags:
35 | - install
36 | - upgrade
37 |
--------------------------------------------------------------------------------
/roles/docker-shibboleth-sp/files/conf/accessError.html:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
7 |
8 |
9 |
10 | Authorization Failed
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 | Authorization Failed
19 |
20 |
21 | Based on the information provided to this application about you, you are
22 | not authorized to access the resource at " "
23 |
24 |
25 |
26 | Please contact the administrator of this service or application if you
27 | believe this to be an error at
28 |
29 |
30 |
31 |
32 |
--------------------------------------------------------------------------------
/roles/security_checks/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: security_checks
2 |
3 | An ansible role for performing various vulnerability tests.
4 |
5 |
6 | ## CSF tests
7 |
8 | CSF tests will be run in TESTING mode. TESTING mode will be disabled after the tests complete.
9 |
10 | ### Port scan tests
11 |
12 | To enable port scan tests, define the following variables. In particular, the `check_portscan_hosts` variable should be a list of remote hosts to test.
13 |
14 | ```yaml
15 | check_portscan: yes
16 | # csf should hang connections if a portscan is detected; the port scan denial
17 | # test will wait this number of seconds before declaring a port scan denied
18 | check_portscan_timeout: 3
19 | # Remote hosts to attempt a port scan
20 | check_portscan_hosts:
21 | - 192.168.111.111
22 | ```
23 |
24 | When CSF detects a port scan, it will hang the attacker's connection. The port scan denial test in this role will attempt to do a port scan, waiting `check_portscan_timeout` seconds to find an open port. If an open port is found, the task will fail.
25 |
--------------------------------------------------------------------------------
/bastion.yml:
--------------------------------------------------------------------------------
1 | - name: Set up bastion
2 | hosts: all
3 | pre_tasks:
4 | - fail: msg="One or more tags must be specified to run this playbook"
5 | vars:
6 | docker_openvpn_name: "{{ docker_env }}_bastion_openvpn_1"
7 | # docker_openvpn_source_conf_dir: "..."
8 | docker_openvpn_conf_dir: "/opt/{{ docker_env }}_bastion_openvpn/conf/"
9 | docker_openvpn_volumes:
10 | - "{{ docker_openvpn_conf_dir }}:/etc/openvpn:rw"
11 | pre_tasks:
12 | - name: Copy openvpn configuration directory
13 | copy:
14 | src: "{{ docker_openvpn_source_conf_dir }}"
15 | dest: "{{ docker_openvpn_conf_dir }}"
16 | tags:
17 | - install
18 | - settings
19 | - upgrade
20 |
21 | - name: Set net.ipv4.ip_forward in /etc/sysctl.conf
22 | sysctl:
23 | name: net.ipv4.ip_forward
24 | value: 1
25 | state: present
26 | tags:
27 | - install
28 | - settings
29 | - upgrade
30 | roles:
31 | - role: docker-openvpn
32 | when: docker_openvpn
33 |
--------------------------------------------------------------------------------
/roles/docker-ember/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # variables
2 | docker_ember_source_branch:
3 | docker_ember_source_repo: https://github.com/CenterForOpenScience/
4 | docker_ember_working_dir: //
5 |
6 | # storage
7 | docker_ember: no
8 | docker_ember_name: ember_1
9 | docker_ember_image: centerforopenscience/:latest
10 | docker_ember_command: gosu www-data ember build --env production
11 | docker_ember_source_conf_dir: roles/docker-ember/files/
12 | docker_ember_code_dir: /opt/ember/code/
13 | docker_ember_conf_dir: /opt/ember/conf/
14 | docker_ember_env:
15 | SOURCE_BRANCH: "{{ docker_ember_source_branch }}"
16 | SOURCE_REPO: "{{ docker_ember_source_repo }}"
17 | WORKDIR: "{{ docker_ember_working_dir }}"
18 | docker_ember_hostname: "{{ hostname_name }}"
19 | docker_ember_net: bridge
20 | docker_ember_links: []
21 | docker_ember_volumes:
22 | - "{{ docker_ember_code_dir }}.env:{{ docker_ember_working_dir }}:rw"
23 | - "{{ docker_ember_conf_dir }}.env:{{ docker_ember_working_dir }}.env:rw"
24 | docker_ember_volumes_from: []
25 |
--------------------------------------------------------------------------------
/roles/security_checks/tasks/main.yml:
--------------------------------------------------------------------------------
1 |
2 | # http://www.unixmen.com/9-best-practices-to-secure-your-linux-desktop-and-server/
3 |
4 | - name: Security Check | Find Form Mail
5 | shell: find / -name "[Ff]orm[mM]ai*"
6 | when: check_mailers
7 | become: yes
8 | # Failed if any files are found
9 | register: result
10 | failed_when: result.stdout
11 | ignore_errors: yes
12 |
13 | # http://www.unixmen.com/9-best-practices-to-secure-your-linux-desktop-and-server/
14 |
15 | - name: Security Check | Find CGIeMail
16 | shell: find / -name "[Cc]giemai*"
17 | when: check_mailers
18 | become: yes
19 | # Fail if any files are found
20 | register: result
21 | failed_when: result.stdout
22 | ignore_errors: yes
23 |
24 |
25 | - include: tests/deny_port_scans.yml
26 | when: check_csf
27 |
28 | - include: tests/check_configuration.yml
29 |
30 |
31 | # /etc/motd http://www.unixmen.com/9-best-practices-to-secure-your-linux-desktop-and-server/
32 |
33 | # disable telnet http://www.unixmen.com/9-best-practices-to-secure-your-linux-desktop-and-server/
34 |
--------------------------------------------------------------------------------
/roles/docker-cas/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # variables
2 | docker_cas_source_branch: master
3 | docker_cas_source_repo: https://github.com/CenterForOpenScience/cas-overlay.git
4 |
5 | # servers
6 |
7 | docker_cas_server: no
8 | docker_cas_server_image: centerforopenscience/cas:4.1
9 | docker_cas_server_definitions: "-Dproject.parent.basedir=/etc/cas"
10 | docker_cas_server_source_conf_dir: etc
11 | docker_cas_server_conf_dir: /opt/cas_server/conf/
12 | docker_cas_server_hostname: "{{ hostname_name }}"
13 | docker_cas_server_name: "cas_server_1"
14 | docker_cas_server_net: bridge
15 | docker_cas_server_env:
16 | SOURCE_BRANCH: "{{ docker_cas_source_branch }}"
17 | SOURCE_REPO: "{{ docker_cas_source_repo }}"
18 | CAS_DB_USERNAME: postgres
19 | CAS_DB_PASSWORD:
20 | OSF_DB_PORT_27017_TCP_ADDR: 127.0.0.1
21 | OSF_DB_PORT_27017_TCP_PORT: 27017
22 | OSF_DB_NAME: osf20130903
23 | docker_cas_server_expose:
24 | - 8080
25 | - 8443
26 | docker_cas_server_ports: []
27 | docker_cas_server_links: []
28 | docker_cas_server_volumes:
29 | - "{{ docker_cas_server_conf_dir }}:/etc/cas/etc"
30 |
--------------------------------------------------------------------------------
/roles/docker-shibboleth-sp/files/conf/sslError.html:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
7 |
8 |
9 |
10 | POST Failed
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 | POST Failed
19 |
20 |
21 | You have attemped to submit information without the protection
22 | of SSL to this site.
23 |
24 |
25 |
26 | For the protection of your submission and the integrity of the site,
27 | this is not permitted. Please try accessing the server with a
28 | URL starting with https:// and report this problem
29 | to
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/roles/docker-dor/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # variables
2 | docker_dor_source_branch: master
3 | docker_dor_source_repo: https://github.com/CenterForOpenScience/DirectoryOfRepositories.git
4 |
5 | # server
6 | docker_dor_server: no
7 | docker_dor_server_name: dor_server_1
8 | docker_dor_server_image: centerforopenscience/dor:laster
9 | docker_dor_server_command: "uwsgi --ini /etc/uwsgi/uwsgi.ini"
10 | docker_dor_server_source_conf_file: uwsgi.ini
11 | docker_dor_server_conf_dir: /opt/dor_server/conf/
12 | docker_dor_server_conf_file: "{{ docker_dor_server_conf_dir }}uwsgi.ini"
13 | docker_dor_server_env:
14 | SOURCE_BRANCH: "{{ docker_dor_source_branch }}"
15 | SOURCE_REPO: "{{ docker_dor_source_repo }}"
16 | docker_dor_server_net: bridge
17 | docker_dor_server_hostname: "{{ hostname_name }}"
18 | docker_dor_server_expose:
19 | - 8000
20 | docker_dor_server_ports: []
21 | docker_dor_server_links: []
22 | docker_dor_server_volumes:
23 | - "{{ docker_dor_server_conf_file }}:/etc/uwsgi/uwsgi.ini"
24 | - "{{ docker_storage_conf_dir }}local.py:/code/RepoDir/settings/local.py:ro"
25 | docker_dor_server_volumes_from: []
26 |
--------------------------------------------------------------------------------
/roles/(legacy)/sentry/files/create_superuser.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.append('/etc/sentry')
3 |
4 | import os
5 | os.environ['DJANGO_SETTINGS_MODULE'] = 'conf'
6 |
7 | USERNAME, PASSWORD, EMAIL = sys.argv[1:]
8 |
9 | def main():
10 | from django.contrib.auth import get_user_model
11 | User = get_user_model()
12 |
13 | super_user = User.objects.filter(username=USERNAME)
14 |
15 | if super_user:
16 | super_user = super_user[0]
17 | super_user.username = USERNAME
18 | super_user.password = PASSWORD
19 | super_user.email = EMAIL
20 | super_user.is_staff = True
21 | super_user.is_superuser = True
22 | super_user.save()
23 |
24 | sys.exit(0)
25 |
26 | for user in User.objects.filter(is_superuser=True):
27 | user.delete(save=True)
28 |
29 | super_user = User.objects.create_user(
30 | username=USERNAME,
31 | password=PASSWORD,
32 | email=EMAIL,
33 | )
34 | super_user.is_staff = True
35 | super_user.is_superuser = True
36 | super_user.save()
37 |
38 | sys.exit(0)
39 |
40 | main()
41 |
--------------------------------------------------------------------------------
/roles/docker-postgres-vacuumlo/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_postgres_vacuumlo_name: vacuumlo_1
2 | docker_postgres_vacuumlo_image: centerforopenscience/postgres:9.4-vacuumlo
3 | docker_postgres_vacuumlo_hostname: "{{ hostname_name }}"
4 | docker_postgres_vacuumlo_schedule: 0 2 * * * # Daily @ 2 AM
5 | docker_postgres_vacuumlo_db_host:
6 | docker_postgres_vacuumlo_db_port:
7 | docker_postgres_vacuumlo_db_user:
8 | docker_postgres_vacuumlo_db_password:
9 | docker_postgres_vacuumlo_db_name: database_name
10 | docker_postgres_vacuumlo_net: bridge
11 | docker_postgres_vacuumlo_env:
12 | SCHEDULE: "{{ docker_postgres_vacuumlo_schedule }}"
13 | DB_HOST: "{{ docker_postgres_vacuumlo_db_host | default(omit) }}"
14 | DB_PORT: "{{ docker_postgres_vacuumlo_db_port | default(omit) }}"
15 | DB_USER: "{{ docker_postgres_vacuumlo_db_user | default(omit) }}"
16 | DB_PASSWORD: "{{ docker_postgres_vacuumlo_db_password | default(omit) }}"
17 | DB_NAME: "{{ docker_postgres_vacuumlo_db_name }}"
18 | docker_postgres_vacuumlo_expose: []
19 | docker_postgres_vacuumlo_ports: []
20 | docker_postgres_vacuumlo_links: []
21 | docker_postgres_vacuumlo_volumes: []
22 |
--------------------------------------------------------------------------------
/roles/(legacy)/nginx/tasks/modules/upload_progress_module.yml:
--------------------------------------------------------------------------------
1 | # file: nginx/tasks/modules/upload_progress_module.yml
2 | # configure flag: --add-module=/tmp/nginx_upload_progress
3 |
4 | # to be completed...
5 |
6 | - name: Modules | Download the upload_progress_module source
7 | get_url:
8 | url: "{{nginx_upload_progress_url}}"
9 | dest: "/tmp/nginx-upload-progress-module-{{nginx_upload_progress_version}}.tar.gz"
10 |
11 | - name: Modules | Unpack the upload_progress_module source
12 | command: tar -xvzf /tmp/nginx-upload-progress-module-{{nginx_upload_progress_version}}.tar.gz chdir=/tmp creates=/tmp/nginx-upload-progress-module-{{nginx_upload_progress_version}}
13 |
14 | - name: Modules | Copy the upload_progress_module source folder
15 | command: sudo cp -R /tmp/nginx-upload-progress-module-{{nginx_upload_progress_version}} /tmp/nginx_upload_progress
16 |
17 | - name: Modules | Make sure the upload_progress_module configuration is updated
18 | template:
19 | src: ../../templates/modules/upload_progress.j2
20 | dest: "{{nginx_dir}}/sites-available/upload_progress"
21 | owner: root
22 | group: root
23 | mode: 0644
24 |
--------------------------------------------------------------------------------
/roles/docker-cas/files/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | user www-data;
2 | worker_processes 2;
3 |
4 | error_log /var/log/nginx/error.log warn;
5 | pid /var/run/nginx.pid;
6 |
7 | events {
8 | worker_connections 1024;
9 | }
10 |
11 | http {
12 | include /etc/nginx/mime.types;
13 | default_type application/octet-stream;
14 |
15 | log_format main '$remote_addr - $upstream_cache_status $remote_user [$time_local] "$request" '
16 | '$status $body_bytes_sent "$http_referer" '
17 | '"$http_user_agent" "$http_x_forwarded_for"';
18 |
19 | access_log /var/log/nginx/access.log main;
20 |
21 | sendfile on;
22 | tcp_nopush on;
23 | tcp_nodelay on;
24 | keepalive_timeout 65;
25 | types_hash_max_size 2048;
26 | server_tokens off;
27 |
28 | gzip on;
29 | gzip_disable "msie6";
30 | gzip_min_length 1400;
31 | gzip_comp_level 2;
32 | gzip_buffers 4 32k;
33 | gzip_types text/plain text/css image/png image/gif image/jpeg application/javascript application/x-javascript text/xml text/javascript application/json;
34 |
35 | include /etc/nginx/conf.d/*.conf;
36 | }
37 |
--------------------------------------------------------------------------------
/roles/docker-cos/files/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | user www-data;
2 | worker_processes auto;
3 |
4 | error_log /var/log/nginx/error.log warn;
5 | pid /var/run/nginx.pid;
6 |
7 | events {
8 | worker_connections 1024;
9 | }
10 |
11 | http {
12 | include /etc/nginx/mime.types;
13 | default_type application/octet-stream;
14 |
15 | log_format main '$remote_addr - $upstream_cache_status $remote_user [$time_local] "$request" '
16 | '$status $body_bytes_sent "$http_referer" '
17 | '"$http_user_agent" "$http_x_forwarded_for"';
18 |
19 | access_log /var/log/nginx/access.log main;
20 |
21 | sendfile on;
22 | tcp_nopush on;
23 | tcp_nodelay on;
24 | keepalive_timeout 65;
25 | types_hash_max_size 2048;
26 | server_tokens off;
27 |
28 | gzip on;
29 | gzip_disable "msie6";
30 | gzip_min_length 1400;
31 | gzip_comp_level 2;
32 | gzip_buffers 4 32k;
33 | gzip_types text/plain text/css image/png image/gif image/jpeg application/javascript application/x-javascript text/xml text/javascript application/json;
34 |
35 | include /etc/nginx/conf.d/*.conf;
36 | }
37 |
--------------------------------------------------------------------------------
/roles/docker-jam/files/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | user www-data;
2 | worker_processes auto;
3 |
4 | error_log /var/log/nginx/error.log warn;
5 | pid /var/run/nginx.pid;
6 |
7 | events {
8 | worker_connections 1024;
9 | }
10 |
11 | http {
12 | include /etc/nginx/mime.types;
13 | default_type application/octet-stream;
14 |
15 | log_format main '$remote_addr - $upstream_cache_status $remote_user [$time_local] "$request" '
16 | '$status $body_bytes_sent "$http_referer" '
17 | '"$http_user_agent" "$http_x_forwarded_for"';
18 |
19 | access_log /var/log/nginx/access.log main;
20 |
21 | sendfile on;
22 | tcp_nopush on;
23 | tcp_nodelay on;
24 | keepalive_timeout 65;
25 | types_hash_max_size 2048;
26 | server_tokens off;
27 |
28 | gzip on;
29 | gzip_disable "msie6";
30 | gzip_min_length 1400;
31 | gzip_comp_level 2;
32 | gzip_buffers 4 32k;
33 | gzip_types text/plain text/css image/png image/gif image/jpeg application/javascript application/x-javascript text/xml text/javascript application/json;
34 |
35 | include /etc/nginx/conf.d/*.conf;
36 | }
37 |
--------------------------------------------------------------------------------
/roles/docker-osf/files/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | user www-data;
2 | worker_processes auto;
3 |
4 | error_log /var/log/nginx/error.log warn;
5 | pid /var/run/nginx.pid;
6 |
7 | events {
8 | worker_connections 1024;
9 | }
10 |
11 | http {
12 | include /etc/nginx/mime.types;
13 | default_type application/octet-stream;
14 |
15 | log_format main '$remote_addr - $upstream_cache_status $remote_user [$time_local] "$request" '
16 | '$status $body_bytes_sent "$http_referer" '
17 | '"$http_user_agent" "$http_x_forwarded_for"';
18 |
19 | access_log /var/log/nginx/access.log main;
20 |
21 | sendfile on;
22 | tcp_nopush on;
23 | tcp_nodelay on;
24 | keepalive_timeout 65;
25 | types_hash_max_size 2048;
26 | server_tokens off;
27 |
28 | gzip on;
29 | gzip_disable "msie6";
30 | gzip_min_length 1400;
31 | gzip_comp_level 2;
32 | gzip_buffers 4 32k;
33 | gzip_types text/plain text/css image/png image/gif image/jpeg application/javascript application/x-javascript text/xml text/javascript application/json;
34 |
35 | include /etc/nginx/conf.d/*.conf;
36 | }
37 |
--------------------------------------------------------------------------------
/roles/docker-spa/files/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | user www-data;
2 | worker_processes auto;
3 |
4 | error_log /var/log/nginx/error.log warn;
5 | pid /var/run/nginx.pid;
6 |
7 | events {
8 | worker_connections 1024;
9 | }
10 |
11 | http {
12 | include /etc/nginx/mime.types;
13 | default_type application/octet-stream;
14 |
15 | log_format main '$remote_addr - $upstream_cache_status $remote_user [$time_local] "$request" '
16 | '$status $body_bytes_sent "$http_referer" '
17 | '"$http_user_agent" "$http_x_forwarded_for"';
18 |
19 | access_log /var/log/nginx/access.log main;
20 |
21 | sendfile on;
22 | tcp_nopush on;
23 | tcp_nodelay on;
24 | keepalive_timeout 65;
25 | types_hash_max_size 2048;
26 | server_tokens off;
27 |
28 | gzip on;
29 | gzip_disable "msie6";
30 | gzip_min_length 1400;
31 | gzip_comp_level 2;
32 | gzip_buffers 4 32k;
33 | gzip_types text/plain text/css image/png image/gif image/jpeg application/javascript application/x-javascript text/xml text/javascript application/json;
34 |
35 | include /etc/nginx/conf.d/*.conf;
36 | }
37 |
--------------------------------------------------------------------------------
/roles/docker-scrapi/files/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | user www-data;
2 | worker_processes auto;
3 |
4 | error_log /var/log/nginx/error.log warn;
5 | pid /var/run/nginx.pid;
6 |
7 | events {
8 | worker_connections 1024;
9 | }
10 |
11 | http {
12 | include /etc/nginx/mime.types;
13 | default_type application/octet-stream;
14 |
15 | log_format main '$remote_addr - $upstream_cache_status $remote_user [$time_local] "$request" '
16 | '$status $body_bytes_sent "$http_referer" '
17 | '"$http_user_agent" "$http_x_forwarded_for"';
18 |
19 | access_log /var/log/nginx/access.log main;
20 |
21 | sendfile on;
22 | tcp_nopush on;
23 | tcp_nodelay on;
24 | keepalive_timeout 65;
25 | types_hash_max_size 2048;
26 | server_tokens off;
27 |
28 | gzip on;
29 | gzip_disable "msie6";
30 | gzip_min_length 1400;
31 | gzip_comp_level 2;
32 | gzip_buffers 4 32k;
33 | gzip_types text/plain text/css image/png image/gif image/jpeg application/javascript application/x-javascript text/xml text/javascript application/json;
34 |
35 | include /etc/nginx/conf.d/*.conf;
36 | }
37 |
--------------------------------------------------------------------------------
/roles/docker-sentry/files/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | user www-data;
2 | worker_processes auto;
3 |
4 | error_log /var/log/nginx/error.log warn;
5 | pid /var/run/nginx.pid;
6 |
7 | events {
8 | worker_connections 1024;
9 | }
10 |
11 | http {
12 | include /etc/nginx/mime.types;
13 | default_type application/octet-stream;
14 |
15 | log_format main '$remote_addr - $upstream_cache_status $remote_user [$time_local] "$request" '
16 | '$status $body_bytes_sent "$http_referer" '
17 | '"$http_user_agent" "$http_x_forwarded_for"';
18 |
19 | access_log /var/log/nginx/access.log main;
20 |
21 | sendfile on;
22 | tcp_nopush on;
23 | tcp_nodelay on;
24 | keepalive_timeout 65;
25 | types_hash_max_size 2048;
26 | server_tokens off;
27 |
28 | gzip on;
29 | gzip_disable "msie6";
30 | gzip_min_length 1400;
31 | gzip_comp_level 2;
32 | gzip_buffers 4 32k;
33 | gzip_types text/plain text/css image/png image/gif image/jpeg application/javascript application/x-javascript text/xml text/javascript application/json;
34 |
35 | include /etc/nginx/conf.d/*.conf;
36 | }
37 |
--------------------------------------------------------------------------------
/roles/rackspace-cloudmonitor/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure rackspace apt repository
2 | become: yes
3 | apt_repository:
4 | repo: "deb http://stable.packages.cloudmonitoring.rackspace.com/ubuntu-14.04-x86_64 cloudmonitoring main"
5 | state: present
6 |
7 |
8 | - name: Ensure rackspace apt key
9 | become: yes
10 | apt_key:
11 | url: https://monitoring.api.rackspacecloud.com/pki/agent/linux.asc
12 | state: present
13 |
14 |
15 | - name: Update apt package cache (this can sometimes fail)
16 | become: yes
17 | apt: update_cache=yes
18 |
19 |
20 | - name: Copy rackspace agent.plugin scripts
21 | copy:
22 | src: plugins/
23 | dest: /usr/lib/rackspace-monitoring-agent/plugins
24 | mode: o+x
25 |
26 |
27 | - name: Install new rackspace cloud monitoring agent
28 | become: yes
29 | apt:
30 | state: present
31 | pkg: rackspace-monitoring-agent
32 |
33 |
34 | - name: Reminder | Setup the Rackspace Monitoring Agent
35 | debug:
36 | msg: sudo rackspace-monitoring-agent --setup
37 |
38 |
39 | - name: Reminder | Start the Rackspace Monitoring Agent
40 | debug:
41 | msg: sudo service rackspace-monitoring-agent start
42 |
--------------------------------------------------------------------------------
/roles/Ansibles.monit/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License
2 |
3 | Copyright (c) 2014 Pieterjan Vandaele
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in
13 | all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | THE SOFTWARE.
22 |
--------------------------------------------------------------------------------
/roles/docker-postgres-barman/files/conf/crontab:
--------------------------------------------------------------------------------
1 | # Edit this file to introduce tasks to be run by cron.
2 | #
3 | # Each task to run has to be defined through a single line
4 | # indicating with different fields when the task will be run
5 | # and what command to run for the task
6 | #
7 | # To define the time you can provide concrete values for
8 | # minute (m), hour (h), day of month (dom), month (mon),
9 | # and day of week (dow) or use '*' in these fields (for 'any').#
10 | # Notice that tasks will be started based on the cron's system
11 | # daemon's notion of time and timezones.
12 | #
13 | # Output of the crontab jobs (including errors) is sent through
14 | # email to the user the crontab file belongs to (unless redirected).
15 | #
16 | # For example, you can run a backup of all your user accounts
17 | # at 5 a.m every week with:
18 | # 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/
19 | #
20 | # For more information see the manual pages of crontab(5) and cron(8)
21 | #
22 | # m h dom mon dow command
23 | # * * * * * /usr/bin/barman cron <- included in debian package @ 1 min
24 | # 30 23 * * * /usr/bin/barman backup main-db-server <- example full backup @ 11:30 pm (UTC?)
25 |
--------------------------------------------------------------------------------
/roles/generic-users/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License
2 |
3 | Copyright (c) 2014 Pieterjan Vandaele
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in
13 | all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | THE SOFTWARE.
22 |
--------------------------------------------------------------------------------
/roles/Ansibles.timezone/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License
2 |
3 | Copyright (c) 2014 Pieterjan Vandaele
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in
13 | all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | THE SOFTWARE.
22 |
--------------------------------------------------------------------------------
/roles/docker-shibboleth-sp/files/conf/sp-cert.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIC6zCCAdOgAwIBAgIJAOtPE/KVhfCGMA0GCSqGSIb3DQEBBQUAMBcxFTATBgNV
3 | BAMTDGZjZGIwYjUxMzQxMTAeFw0xNTExMDcyMTM4MjhaFw0yNTExMDQyMTM4Mjha
4 | MBcxFTATBgNVBAMTDGZjZGIwYjUxMzQxMTCCASIwDQYJKoZIhvcNAQEBBQADggEP
5 | ADCCAQoCggEBAMJRDaANHbZrNSMazVHR1m0ZBgW1WrXYnZjOF6QtiOXrpNqGsxOZ
6 | jWN7U6tyoqV06dwqx4fiGgz8TIqSnJpZlWOQa/bKLJscby4wIFRDniavBhUeFJlJ
7 | C/34dXV1Wl/rM+vjwb4GNtNxCEmQ5MCJH0ESy4DSORGo10UT2Fcnr/0Zy9tK+UBK
8 | GxU/TeAadl8LzueBzok5MxW6WpOoEyCit7sDSkSV+RFUqNrtVCGwung66arO630Z
9 | F9RETM4NziMzxvO9294zysSDqd19EfYQ28hqwpaOLiHBGbJoQ0Kptt+m3QzmfiLK
10 | +/0QHh62iDfUKoVGeIDW5YmYGLJaRF1+8eMCAwEAAaM6MDgwFwYDVR0RBBAwDoIM
11 | ZmNkYjBiNTEzNDExMB0GA1UdDgQWBBSIpSXmw+VcDrKNQ40nIHNh3NjzGTANBgkq
12 | hkiG9w0BAQUFAAOCAQEAEIxGbn32w3FT2LIs5jEa9Y+tftyzUVPZAAbJk5SvXyII
13 | Ho84BvQ2sJklXHMhVwQChnXGNXLSVVV3AwdtiFjuX0Skjn0a6LJMfI2vC+oE925N
14 | eE2QXeGFGxkh8HcimQqzxC11nWuVoG9ZphiYrsDEpcK9fNiPDaihJZRaqTGxJKki
15 | p+KH0Wmum3TuL9g0ZzGiIROVsHrO5jTi2UveVdEZaZuYAgEnDtgmwSIFeEo3RbUf
16 | BsHQzXK2Wt36Dqr86JglxMy0yH5zTJihThoBdTDiU/kFZ7QeReyDRPCG+zIfX2UI
17 | spsDYrxzjTsFCyhD1CQPtE+5cAQ8dOtAlh3f1Lx6pg==
18 | -----END CERTIFICATE-----
19 |
--------------------------------------------------------------------------------
/roles/Ansibles.build-essential/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License
2 |
3 | Copyright (c) 2014 Pieterjan Vandaele
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in
13 | all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | THE SOFTWARE.
22 |
--------------------------------------------------------------------------------
/roles/(legacy)/newrelic/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # Source: https://docs.newrelic.com/docs/servers/new-relic-servers-linux/installation-configuration/servers-installation-ubuntu-debian
2 |
3 | ##### Install #####
4 |
5 | - name: Ensure newrelic apt repository
6 | become: yes
7 | apt_repository:
8 | repo: "deb http://apt.newrelic.com/debian/ newrelic non-free"
9 | state: present
10 |
11 |
12 | - name: Ensure newrelic apt key
13 | become: yes
14 | apt_key:
15 | url: https://download.newrelic.com/548C16BF.gpg
16 | state: present
17 |
18 |
19 | - name: Update APT package cache
20 | apt: update_cache=yes
21 | become: yes
22 |
23 |
24 | - name: Install new relic system monitoring daemon
25 | become: yes
26 | apt:
27 | state: present
28 | pkg: newrelic-sysmond
29 |
30 |
31 | - name: Copy the newrelic system monitoring daemon configuration/license file
32 | become: yes
33 | template:
34 | group: newrelic
35 | mode: 0640
36 | src: nrsysmond.cfg.j2
37 | dest: "{{ newrelic_conf_dir }}/nrsysmond.cfg"
38 |
39 |
40 | - name: Start service newrelic system monitoring daemon
41 | become: yes
42 | service:
43 | name: newrelic-sysmond
44 | state: started
45 |
--------------------------------------------------------------------------------
/roles/(legacy)/gitlab/templates/database.j2:
--------------------------------------------------------------------------------
1 | #
2 | # PRODUCTION
3 | #
4 | production:
5 | adapter: postgresql
6 | encoding: unicode
7 | database: {{gitlab_db_name}}
8 | pool: 10
9 | username: {{gitlab_user}}
10 | password: {{gitlab_db_pass}}
11 | # host: localhost
12 | # port: 5432
13 | # socket: /tmp/postgresql.sock
14 |
15 | #
16 | # Development specific
17 | #
18 | development:
19 | adapter: postgresql
20 | encoding: unicode
21 | database: gitlabhq_development
22 | pool: 5
23 | username: postgres
24 | password:
25 | # socket: /tmp/postgresql.sock
26 |
27 | #
28 | # Staging specific
29 | #
30 | staging:
31 | adapter: postgresql
32 | encoding: unicode
33 | database: gitlabhq_staging
34 | pool: 5
35 | username: postgres
36 | password:
37 | # socket: /tmp/postgresql.sock
38 |
39 | # Warning: The database defined as "test" will be erased and
40 | # re-generated from your development database when you run "rake".
41 | # Do not set this db to the same as development or production.
42 | test: &test
43 | adapter: postgresql
44 | encoding: unicode
45 | database: gitlabhq_test
46 | pool: 5
47 | username: postgres
48 | password:
49 | # socket: /tmp/postgresql.sock
50 |
--------------------------------------------------------------------------------
/roles/docker-storage/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Perform storage copy operations
2 | become: yes
3 | copy:
4 | src: "{{ item.src | default(omit) }}"
5 | dest: "{{ item.dest | default(omit) }}"
6 | mode: "{{ item.mode | default(omit) }}"
7 | owner: "{{ item.owner | default(omit) }}"
8 | group: "{{ item.group | default(omit) }}"
9 | with_items: "{{ docker_storage_copy_ops }}"
10 | tags:
11 | - install
12 | - upgrade
13 | - settings
14 |
15 | - name: Perform storage file operations
16 | become: yes
17 | file:
18 | state: "{{ item.state | default(omit) }}"
19 | path: "{{ item.path | default(omit) }}"
20 | mode: "{{ item.mode | default(omit) }}"
21 | owner: "{{ item.owner | default(omit) }}"
22 | group: "{{ item.group | default(omit) }}"
23 | recurse: "{{ item.recurse | default(omit) }}"
24 | with_items: "{{ docker_storage_file_ops }}"
25 | tags:
26 | - install
27 | - upgrade
28 | - settings
29 |
30 |
31 | - name: Run docker storage container
32 | docker:
33 | image: busybox:latest
34 | name: "{{ docker_storage_name }}"
35 | volumes: "{{ docker_storage_volumes }}"
36 | tags:
37 | - install
38 | - upgrade
39 | - settings
40 |
--------------------------------------------------------------------------------
/roles/swap/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Set vm.swappiness in /etc/sysctl.conf
2 | sysctl:
3 | name: vm.swappiness
4 | value: "{{ swap_vm_swappiness }}"
5 | state: present
6 | tags:
7 | - install
8 |
9 |
10 | - name: Set vm.vfs_cache_pressure in /etc/sysctl.conf
11 | sysctl:
12 | name: vm.vfs_cache_pressure
13 | value: "{{ swap_vm_vfs_cache_pressure }}"
14 | state: present
15 | tags:
16 | - install
17 |
18 |
19 | - name: Create the file to be used for swap
20 | become: yes
21 | shell: fallocate -l {{ swap_swapfile_size }} /swapfile
22 | tags:
23 | - install
24 |
25 |
26 | - name: Secure swap file permissions
27 | become: yes
28 | shell: chmod 600 /swapfile
29 | tags:
30 | - install
31 |
32 |
33 | - name: Format the swapfile
34 | become: yes
35 | shell: mkswap /swapfile
36 | tags:
37 | - install
38 |
39 |
40 | - name: Add the file to the system as a swap file
41 | become: yes
42 | shell: swapon /swapfile
43 | tags:
44 | - install
45 |
46 |
47 | - name: Add the the swap file to /etc/fstab
48 | mount:
49 | src: /swapfile
50 | name: none
51 | fstype: swap
52 | opts: sw
53 | state: present
54 | tags:
55 | - install
56 |
--------------------------------------------------------------------------------
/roles/docker-mfr/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # variables
2 | docker_mfr_source_branch: master
3 | docker_mfr_source_repo: https://github.com/CenterForOpenScience/modular-file-renderer.git
4 |
5 |
6 | # servers
7 |
8 | docker_mfr_server: no
9 | docker_mfr_server_name: mfr_server
10 | docker_mfr_server_image: centerforopenscience/mfr:latest
11 | docker_mfr_server_source_conf_file: settings.json
12 | docker_mfr_server_conf_dir: /opt/mfr_server/conf/
13 | docker_mfr_server_conf_file: "{{ docker_mfr_server_conf_dir }}settings.json"
14 | docker_mfr_server_ssl_cert_file: ""
15 | docker_mfr_server_ssl_key_file: ""
16 | docker_mfr_server_cert_src: ""
17 | docker_mfr_server_instances: 2
18 | docker_mfr_server_start_port: 7780
19 | docker_mfr_server_env:
20 | ENV: "{{ docker_env | default('test') }}"
21 | SOURCE_BRANCH: "{{ docker_mfr_source_branch }}"
22 | SOURCE_REPO: "{{ docker_mfr_source_repo }}"
23 | docker_mfr_server_net: bridge
24 | docker_mfr_server_links: []
25 | docker_mfr_server_expose:
26 | - 7778
27 | docker_mfr_server_ports: []
28 | docker_mfr_server_volumes:
29 | - "{{ docker_mfr_server_conf_file }}:/home/.cos/mfr-{{ docker_env | default('test') }}.json"
30 | - "{{ docker_mfr_server_conf_dir }}ssl/:/home/.cos/ssl"
31 | docker_mfr_server_volumes_from: []
32 |
--------------------------------------------------------------------------------
/docker-logentries.yml:
--------------------------------------------------------------------------------
1 | - name: Setup docker logentries
2 | hosts: all
3 | pre_tasks:
4 | - fail: msg="One or more tags must be specified to run this playbook"
5 | vars:
6 | docker_fluentd: yes
7 | docker_fluentd_name: fluentd_1
8 | docker_fluentd_source_conf_dir: "{{ root_source_conf_dir }}logentries/fluentd/"
9 | docker_fluentd_conf_dir: /opt/fluentd/conf/
10 | docker_fluentd_conf_file: "{{ docker_fluentd_conf_dir }}fluent.conf"
11 | docker_fluentd_data_dir: /opt/fluentd/data/
12 | docker_fluentd_env:
13 | FLUENTD_GEMS: "fluent-plugin-order fluent-plugin-record-reformer fluent-plugin-jsonbucket" # fluent-plugin-rewrite-tag-filter (major version incompatible, use 1.6.0), fluent-plugin-docker-format (fixed but not released on rubygems.org), fluent-plugin-logentries removed (pr in for fix)
14 | docker_fluentd_volumes:
15 | - "{{ docker_fluentd_data_dir }}:/data"
16 | - "{{ docker_fluentd_conf_dir }}:/etc/fluent:ro"
17 | - "/var/log/:/var/log:ro"
18 | - "/var/lib/docker/containers/:/var/lib/docker/containers:ro"
19 | docker_fluentd_expose:
20 | - 24224
21 | docker_fluentd_ports: []
22 | roles:
23 | - role: rsyslog
24 |
25 | - role: docker-fluentd
26 | when: docker_fluentd
27 |
--------------------------------------------------------------------------------
/roles/(legacy)/docker-haproxy/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # variables
2 |
3 | docker_haproxy_name: haproxy_1
4 | docker_haproxy_env: {}
5 | docker_haproxy_conf_dir: /opt/haproxy/conf
6 | docker_haproxy_enable_https: false
7 | docker_haproxy_default_cert_src: ""
8 | docker_haproxy_certsd_dir_src: ""
9 | docker_haproxy_stats_user: User1
10 | docker_haproxy_stats_password: password
11 | docker_haproxy_backends: {}
12 | # - name: my_domain
13 | # domain: my.domain.com
14 | # servers:
15 | # - ip: "192.168.1.2"
16 | # port: 1111
17 | # - ip: "192.168.1.2"
18 | # port: 1112
19 | # - ip: "192.168.1.6"
20 | # port: 1111
21 | # - ip: "192.168.1.6"
22 | # port: 1112
23 |
24 |
25 | # support servers
26 |
27 | docker_rsyslog: no
28 | docker_rsyslog_name: haproxy_rsyslog_1
29 | docker_rsyslog_conf_dir: "{{ docker_haproxy_conf_dir }}/rsyslog"
30 | docker_rsyslog_log_dir: /var/log/haproxy
31 |
32 |
33 | # servers
34 |
35 | docker_haproxy_log_rotate_file: haproxy
36 | docker_haproxy_log_rotate_source_file: logrotate.j2
37 | docker_haproxy_hostname: "haproxy_1"
38 | docker_haproxy_links:
39 | - "{{ docker_rsyslog_name }}:rsyslog"
40 | docker_haproxy_links: []
41 | docker_haproxy_expose: []
42 | docker_haproxy_ports: []
43 |
--------------------------------------------------------------------------------
/roles/(legacy)/docker-rsyslog/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure docker rsyslog logrotate config
2 | template:
3 | src: "{{ docker_rsyslog_log_rotate_source_file }}"
4 | dest: "/etc/logrotate.d/{{ docker_rsyslog_log_rotate_file }}"
5 | owner: root
6 | group: root
7 | mode: 0644
8 | tags:
9 | - install
10 | - settings
11 |
12 |
13 | - name: Restart docker rsyslog container
14 | become: yes
15 | shell: "docker restart {{ docker_rsyslog_name }}"
16 | tags:
17 | - restart
18 |
19 |
20 | - name: Ensure docker rsyslog directories exist
21 | file:
22 | state: directory
23 | path: "{{ item }}"
24 | with_items:
25 | - "{{ docker_rsyslog_conf_dir }}"
26 | - "{{ docker_rsyslog_log_dir }}"
27 | tags:
28 | - install
29 | - upgrade
30 |
31 |
32 | - name: Run docker rsyslog container
33 | docker:
34 | name: "{{ docker_rsyslog_name }}"
35 | state: running
36 | image: "centerforopenscience/rsyslog:latest"
37 | restart_policy: always
38 | hostname: "{{ hostname_name }}"
39 | env: "{{ docker_rsyslog_env }}"
40 | volumes:
41 | - "{{ docker_rsyslog_conf_dir }}/:/etc/rsyslog.d/"
42 | - "{{ docker_rsyslog_log_log_dir }}:/log"
43 | tags:
44 | - install
45 | - upgrade
46 |
--------------------------------------------------------------------------------
/roles/ssh/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # file: roles/ssh/defaults/main.yml
2 |
3 |
4 | # sshd_config settings
5 | ssh_port: "22"
6 | ssh_protocol: "2"
7 |
8 | ssh_hostkeys:
9 | - /etc/ssh/ssh_host_rsa_key
10 | - /etc/ssh/ssh_host_dsa_key
11 | - /etc/ssh/ssh_host_ecdsa_key
12 | - /etc/ssh/ssh_host_ed25519_key
13 |
14 | ssh_useprivilegeseparation: "yes"
15 | ssh_keyregenerationinterval: "3600"
16 | ssh_serverkeybits: "768"
17 | ssh_syslogfacility: "AUTH"
18 | ssh_loglevel: "INFO"
19 | ssh_logingracetime: "120"
20 | ssh_permitrootlogin: "no"
21 | ssh_strictmodes: "yes"
22 | ssh_rsaauthentication: "yes"
23 | ssh_pubkeyauthentication: "yes"
24 | ssh_rhostsrsaauthentication: "no"
25 | ssh_hostbasedauthentication: "no"
26 | ssh_challengeresponseauthentication: "no"
27 | ssh_passwordauthentication: "no"
28 | ssh_x11forwarding: "yes"
29 | ssh_x11displayoffset: "10"
30 | ssh_printmotd: "no"
31 | ssh_printlastlog: "yes"
32 | ssh_tcpkeepalive: "yes"
33 | ssh_acceptenv: "LANG LC_*"
34 | ssh_subsystem: "sftp /usr/lib/openssh/sftp-server"
35 | ssh_usepam: "yes"
36 | ssh_usedns: "no"
37 | ssh_clientaliveinterval: "1750"
38 | ssh_clientalivecountmax: "0"
39 | ssh_ignorerhosts: "no"
40 |
41 | # run tests
42 | ssh_test: yes
43 | # User for testing
44 | ssh_test_user: vagrant
45 | ssh_test_port: 2222
46 |
--------------------------------------------------------------------------------
/roles/generic-users/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # file: generic-users/tasks/main.yml
2 |
3 | - name: Make sure zsh is installed
4 | apt: name=zsh state=present
5 |
6 | - name: Make sure all groups are present
7 | group:
8 | name: "{{item.name}}"
9 | # gid: "{{item.gid}}"
10 | state: present
11 | with_items: genericusers_groups
12 |
13 | - name: Make sure all removed groups are not present
14 | group:
15 | name: "{{item.name}}"
16 | state: absent
17 | with_items: genericusers_groups_removed
18 |
19 | - name: Make sure the users are present
20 | user:
21 | name: "{{item.name}}"
22 | groups: "{{','.join(item.groups)}}"
23 | append: "{{item.append}}"
24 | password: "{{item.pass}}"
25 | comment: ""
26 | shell: "{{item.shell}}"
27 | state: present
28 | with_items: genericusers_users
29 |
30 | - name: Make sure all removed users are not present
31 | user:
32 | name: "{{item.name}}"
33 | state: absent
34 | remove: yes
35 | with_items: genericusers_users_removed
36 |
37 | - name: Install the ssh keys for the users
38 | authorized_key:
39 | user: "{{item.0.name}}"
40 | key: "{{item.1}}"
41 | key_options: "{{item.0.ssh_key_options | default(omit)}}"
42 | with_subelements:
43 | - genericusers_users
44 | - ssh_keys
45 |
--------------------------------------------------------------------------------
/roles/docker-redis/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure docker redis directories exist
2 | become: yes
3 | file:
4 | state: directory
5 | path: "{{ item }}"
6 | with_items:
7 | - "{{ docker_redis_conf_dir }}"
8 | - "{{ docker_redis_data_dir }}"
9 | tags:
10 | - install
11 | - upgrade
12 |
13 |
14 | - name: Copy docker redis configuration settings
15 | copy:
16 | src: "{{ docker_redis_source_conf_file }}"
17 | dest: "{{ docker_redis_conf_file }}"
18 | mode: 0644
19 | tags:
20 | - install
21 | - settings
22 | - upgrade
23 |
24 |
25 | - name: Restart docker redis container
26 | become: yes
27 | shell: "docker restart {{ docker_redis_name }}"
28 | tags:
29 | - restart
30 |
31 |
32 | - name: Run docker redis container
33 | docker:
34 | command: "{{ docker_redis_command }}"
35 | env: "{{ docker_redis_env }}"
36 | expose: "{{ docker_redis_expose }}"
37 | name: "{{ docker_redis_name }}"
38 | hostname: "{{ docker_redis_hostname }}"
39 | net: "{{ docker_redis_net }}"
40 | ports: "{{ docker_redis_ports }}"
41 | pull: always
42 | restart_policy: always
43 | image: "{{ docker_redis_image }}"
44 | state: reloaded
45 | volumes: "{{ docker_redis_volumes }}"
46 | tags:
47 | - install
48 | - upgrade
49 |
--------------------------------------------------------------------------------
/roles/docker-waterbutler/tasks/celery.yml:
--------------------------------------------------------------------------------
1 | - name: Restart docker waterbutler celery container
2 | become: yes
3 | shell: "docker restart {{ docker_waterbutler_celery_name }}_{{ item }}"
4 | with_sequence: count={{ docker_waterbutler_celery_instances }}
5 | when: docker_waterbutler_celery_instances > 0
6 | tags:
7 | - restart
8 |
9 |
10 | - name: Run docker waterbutler celery container
11 | docker:
12 | command: "{{ docker_waterbutler_celery_command }}"
13 | env: "{{ docker_waterbutler_celery_env }}"
14 | expose: "{{ docker_waterbutler_celery_expose }}"
15 | hostname: "{{ docker_waterbutler_celery_hostname }}"
16 | image: "{{ docker_waterbutler_celery_image }}"
17 | links: "{{ docker_waterbutler_celery_links }}"
18 | name: "{{ docker_waterbutler_celery_name }}_{{ item }}"
19 | net: "{{ docker_waterbutler_celery_net }}"
20 | ports: "{{ docker_waterbutler_celery_ports }}"
21 | pull: always
22 | restart_policy: always
23 | state: reloaded
24 | tty: yes
25 | volumes: "{{ docker_waterbutler_celery_volumes }}"
26 | volumes_from: "{{ docker_waterbutler_celery_volumes_from }}"
27 | with_sequence: count={{ docker_waterbutler_celery_instances }}
28 | when: docker_waterbutler_celery_instances > 0
29 | tags:
30 | - install
31 | - upgrade
32 |
--------------------------------------------------------------------------------
/roles/docker-nginx/files/conf/conf.d/default.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name localhost;
4 |
5 | #charset koi8-r;
6 | #access_log /var/log/nginx/log/host.access.log main;
7 |
8 | location / {
9 | root /usr/share/nginx/html;
10 | index index.html index.htm;
11 | }
12 |
13 | #error_page 404 /404.html;
14 |
15 | # redirect server error pages to the static page /50x.html
16 | #
17 | error_page 500 502 503 504 /50x.html;
18 | location = /50x.html {
19 | root /usr/share/nginx/html;
20 | }
21 |
22 | # proxy the PHP scripts to Apache listening on 127.0.0.1:80
23 | #
24 | #location ~ \.php$ {
25 | # proxy_pass http://127.0.0.1;
26 | #}
27 |
28 | # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
29 | #
30 | #location ~ \.php$ {
31 | # root html;
32 | # fastcgi_pass 127.0.0.1:9000;
33 | # fastcgi_index index.php;
34 | # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
35 | # include fastcgi_params;
36 | #}
37 |
38 | # deny access to .htaccess files, if Apache's document root
39 | # concurs with nginx's one
40 | #
41 | #location ~ /\.ht {
42 | # deny all;
43 | #}
44 | }
45 |
--------------------------------------------------------------------------------
/roles/docker-cos/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # variables
2 | docker_cos_source_branch: master
3 | docker_cos_source_repo: https://github.com/centerforopenscience/cos.io.git
4 |
5 | # servers
6 |
7 | docker_cos_server: no
8 | docker_cos_server_name: cos_server_1
9 | docker_cos_server_image: centerforopenscience/cos:latest
10 | docker_cos_server_command: "uwsgi --ini /etc/uwsgi/uwsgi.ini"
11 | docker_cos_server_source_conf_file: server/uwsgi.ini
12 | docker_cos_server_source_app_file: server/local.py
13 | docker_cos_server_conf_dir: /opt/cos_server/conf/
14 | docker_cos_server_conf_file: "{{ docker_cos_server_conf_dir }}uwsgi.ini"
15 | docker_cos_server_app_file: "{{ docker_cos_server_conf_dir }}local.py"
16 | docker_cos_server_data_dir: /opt/cos_server/data/
17 | docker_cos_server_env:
18 | SOURCE_BRANCH: "{{ docker_cos_source_branch }}"
19 | SOURCE_REPO: "{{ docker_cos_source_repo }}"
20 | docker_cos_server_net: bridge
21 | docker_cos_server_hostname: "{{ hostname_name }}"
22 | docker_cos_server_expose:
23 | - 8000
24 | docker_cos_server_ports: []
25 | docker_cos_server_links: []
26 | docker_cos_server_volumes:
27 | - "{{ docker_cos_server_conf_file }}:/etc/uwsgi/uwsgi.ini"
28 | - "{{ docker_cos_server_app_file }}:/code/mysite/local_settings.py"
29 | - "{{ docker_cos_server_data_dir }}:/data"
30 | docker_cos_server_volumes_from: []
31 |
--------------------------------------------------------------------------------
/roles/docker-shibboleth-sp/files/conf/metadataError.html:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 |
7 |
8 |
9 |
10 | Unknown Identity Provider
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 | Unknown or Unusable Identity Provider
19 |
20 | The identity provider supplying your login credentials is not authorized
21 | for use with this service or does not support the necessary capabilities.
22 |
23 | To report this problem, please contact the site administrator at
24 | .
25 |
26 |
27 | Please include the following error message in any email:
28 | Identity provider lookup failed at ( )
29 |
30 | EntityID:
31 |
32 | :
33 |
34 |
35 |
36 |
--------------------------------------------------------------------------------
/roles/docker-fluentd/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure docker fluentd directories exist
2 | become: yes
3 | file:
4 | state: directory
5 | path: "{{ item }}"
6 | with_items:
7 | - "{{ docker_fluentd_conf_dir }}"
8 | - "{{ docker_fluentd_data_dir }}"
9 | tags:
10 | - install
11 | - upgrade
12 |
13 |
14 | - name: Copy docker fluentd configuration settings
15 | copy:
16 | src: "{{ docker_fluentd_source_conf_dir }}"
17 | dest: "{{ docker_fluentd_conf_dir }}"
18 | mode: 0644
19 | tags:
20 | - install
21 | - settings
22 | - upgrade
23 |
24 |
25 | - name: Restart docker fluentd container
26 | become: yes
27 | shell: "docker restart {{ docker_fluentd_name }}"
28 | tags:
29 | - restart
30 |
31 |
32 | - name: Run docker fluentd container
33 | docker:
34 | env: "{{ docker_fluentd_env }}"
35 | expose: "{{ docker_fluentd_expose }}"
36 | hostname: "{{ docker_fluentd_hostname }}"
37 | links: "{{ docker_fluentd_links }}"
38 | image: "{{ docker_fluentd_image }}"
39 | name: "{{ docker_fluentd_name }}"
40 | net: "{{ docker_fluentd_net }}"
41 | ports: "{{ docker_fluentd_ports }}"
42 | pull: always
43 | restart_policy: always
44 | state: reloaded
45 | volumes: "{{ docker_fluentd_volumes }}"
46 | tags:
47 | - install
48 | - upgrade
49 |
--------------------------------------------------------------------------------
/roles/docker-shibboleth-sp/files/conf/console.logger:
--------------------------------------------------------------------------------
1 | log4j.rootCategory=WARN, console
2 |
3 | # fairly verbose for DEBUG, so generally leave at INFO
4 | log4j.category.XMLTooling.XMLObject=INFO
5 | log4j.category.XMLTooling.KeyInfoResolver=INFO
6 | log4j.category.Shibboleth.IPRange=INFO
7 | log4j.category.Shibboleth.PropertySet=INFO
8 |
9 | # raise for low-level tracing of SOAP client HTTP/SSL behavior
10 | log4j.category.XMLTooling.libcurl=INFO
11 |
12 | # useful categories to tune independently:
13 | #
14 | # tracing of SAML messages and security policies
15 | #log4j.category.OpenSAML.MessageDecoder=DEBUG
16 | #log4j.category.OpenSAML.MessageEncoder=DEBUG
17 | #log4j.category.OpenSAML.SecurityPolicyRule=DEBUG
18 | # interprocess message remoting
19 | #log4j.category.Shibboleth.Listener=DEBUG
20 | # mapping of requests to applicationId
21 | #log4j.category.Shibboleth.RequestMapper=DEBUG
22 | # high level session cache operations
23 | #log4j.category.Shibboleth.SessionCache=DEBUG
24 | # persistent storage and caching
25 | #log4j.category.XMLTooling.StorageService=DEBUG
26 |
27 | # define the appender
28 |
29 | log4j.appender.console=org.apache.log4j.ConsoleAppender
30 | #log4j.appender.console.layout=org.apache.log4j.BasicLayout
31 | log4j.appender.console.layout=org.apache.log4j.PatternLayout
32 | log4j.appender.console.layout.ConversionPattern=%d{%Y-%m-%d %H:%M:%S} %p %c %x: %m%n
33 |
--------------------------------------------------------------------------------
/roles/docker-elasticsearch/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure docker elasticsearch directories exist
2 | become: yes
3 | file:
4 | state: directory
5 | path: "{{ item }}"
6 | with_items:
7 | - "{{ docker_elasticsearch_conf_dir }}"
8 | - "{{ docker_elasticsearch_data_dir }}"
9 | tags:
10 | - install
11 | - upgrade
12 |
13 |
14 | - name: Copy docker elasticsearch configuration settings
15 | copy:
16 | src: "{{ docker_elasticsearch_source_conf_dir }}"
17 | dest: "{{ docker_elasticsearch_conf_dir }}"
18 | mode: 0644
19 | tags:
20 | - install
21 | - settings
22 | - upgrade
23 |
24 |
25 | - name: Restart docker elasticsearch container
26 | become: yes
27 | shell: "docker restart {{ docker_elasticsearch_name }}"
28 | tags:
29 | - restart
30 |
31 |
32 | - name: Run docker elasticsearch container
33 | docker:
34 | env: "{{ docker_elasticsearch_env }}"
35 | expose: "{{ docker_elasticsearch_expose }}"
36 | hostname: "{{ docker_elasticsearch_hostname }}"
37 | image: "{{ docker_elasticsearch_image }}"
38 | name: "{{ docker_elasticsearch_name }}"
39 | net: "{{ docker_elasticsearch_net }}"
40 | ports: "{{ docker_elasticsearch_ports }}"
41 | pull: always
42 | restart_policy: always
43 | state: reloaded
44 | volumes: "{{ docker_elasticsearch_volumes }}"
45 | tags:
46 | - install
47 | - upgrade
48 |
--------------------------------------------------------------------------------
/roles/docker-osf/files/elasticsearch/nginx.conf:
--------------------------------------------------------------------------------
1 | user nginx;
2 | worker_processes 1;
3 |
4 | error_log /var/log/nginx/error.log warn;
5 | pid /var/run/nginx.pid;
6 |
7 | events {
8 | worker_connections 1024;
9 | }
10 |
11 | http {
12 | include /etc/nginx/mime.types;
13 | default_type application/octet-stream;
14 |
15 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
16 | '$status $body_bytes_sent "$http_referer" '
17 | '"$http_user_agent" "$http_x_forwarded_for"';
18 |
19 | access_log /var/log/nginx/access.log main;
20 |
21 | sendfile on;
22 | #tcp_nopush on;
23 |
24 | keepalive_timeout 65;
25 |
26 | gzip on;
27 | gzip_disable "msie6";
28 | gzip_comp_level 6;
29 | gzip_min_length 1100;
30 | gzip_buffers 16 8k;
31 | gzip_proxied any;
32 | gzip_types text/plain text/css text/js text/xml text/javascript application/javascript application/x-javascript application/json application/xml application/xml+rss;
33 | gzip_vary on;
34 |
35 | #auth_basic "Restricted";
36 | #auth_basic_user_file /etc/nginx/.htpasswd;
37 |
38 | server {
39 | listen 9200 default_server;
40 |
41 | location / {
42 | proxy_pass http://elasticsearch:9200;
43 | proxy_redirect off;
44 | }
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/roles/docker-prerender/files/conf/server.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 | var prerender = require('./lib');
3 |
4 | var server = prerender({
5 | workers: process.env.PRERENDER_NUM_WORKERS || 1,
6 | iterations: process.env.PRERENDER_NUM_ITERATIONS || 40,
7 | softIterations: process.env.PRERENDER_NUM_SOFT_ITERATIONS || 30
8 | });
9 |
10 |
11 | server.use(prerender.sendPrerenderHeader());
12 | // server.use(prerender.basicAuth());
13 | server.use(prerender.whitelist());
14 | // server.use(prerender.blacklist());
15 | // server.use(prerender.logger());
16 | server.use(prerender.removeScriptTags());
17 | server.use(prerender.httpHeaders());
18 |
19 | // Cache
20 | // server.use(require('prerender-redis-cache'));
21 | server.use(prerender.inMemoryHtmlCache());
22 | // server.use(prerender.s3HtmlCache());
23 |
24 | // Throttling header
25 | // server.use({
26 | // onPhantomPageCreate: function(phantom, req, res, next) {
27 | // req.prerender.page.run(function(resolve) {
28 | // var customHeaders = this.customHeaders;
29 | // customHeaders['X-THROTTLE-TOKEN'] = 'CHANGEME';
30 | // this.customHeaders = customHeaders;
31 | // resolve();
32 | // }).then(function() {
33 | // next();
34 | // }).catch(function() {
35 | // next();
36 | // });
37 | // }
38 | // });
39 |
40 | server.start();
41 |
--------------------------------------------------------------------------------
/group_vars/osf-benchmarking:
--------------------------------------------------------------------------------
1 | # vi: set ft=yaml :
2 |
3 |
4 | hostname_name: osf-benchmarking
5 |
6 | genericusers_users:
7 | - name: sloria
8 | pass: $5$rounds=110000$7urSbbWSvHpcvnjq$4RZ0qCwbl/1CYdcV/gBQTXPIXtcziXNC1PJmQjL5EaD
9 | uid: 3000
10 | shell: /usr/bin/zsh
11 | groups:
12 | - sudo
13 | append: yes
14 | ssh_keys:
15 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6D05yHt+UeRI6YAAjnU1yHLS02wVOAIAHnGE3U7axrJdDn49ft94/CNqMqxWo9lDS7VoaxtBQfVZioqRdN+PuCB5lc6hJ1M5oRC+8YN1g8javLUygL7U80VDcSBd/2L5wJI572tJBEUg2R1Syume5RgVIbN2ieEY82AU0TDZZvN1VM0Be7+wSXs1u6Czzq5qulH9GyuJfr83vPmvEW+FIGj1rbR/+t/ghbc/38GZnAvpxWZvi9i4mEV5GME2UqSwiNdE75yMLI89BC/hHymKP5BhQNg1i6MjWchx+PwWrnmyECtuuUbbrDFd6bHlJfKwXA5L8FxjMnHC0NmaQ+5AX sloria@stevens-air
16 | - name: jmcarp
17 | pass: $5$rounds=110000$PB1K0kI0ZbreZsTq$2HGwhAELlAKxU6ewhgVNtphhb0yLHX5Obd3t96xNFM8
18 | uid: 4000
19 | shell: /usr/bin/zsh
20 | groups:
21 | - sudo
22 | append: yes
23 | ssh_keys:
24 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBKbtgvnsfuvm719xE9FcX3599VKi7slCKH8KV0sCRY2o7PladpDgej8u2J5meTkTOhbOkQiREaWoot9pibvUZzfzR3fxg8hMc51bGXUqWssawD4wgPV6Ms/25DAm4iOkh7pyIRPiNy4I+VcabHb6XUql2g5QAVvEKWMNw0kTWSEG56tBsBPmFeDAxMw15PV+4N5oQH3SfjS3Oxb5Y6jij3moxd+VEKUQbY0bYxm5Yn2bE43o4km/3of1gdzmgmDLH3mljQFNdA9BFbrZ5dRdJKfC5KG5l7L9n1yUF4kNjyatkTqP1nbu0AGERJKNNk/mxG61tSOTcnFY7zD2GJYzv josh@centerforopenscience.org
25 |
--------------------------------------------------------------------------------
/roles/(legacy)/gitlab/templates/init_defaults.j2:
--------------------------------------------------------------------------------
1 |
2 | # Copy this lib/support/init.d/gitlab.default.example file to
3 | # /etc/default/gitlab in order for it to apply to your system.
4 |
5 | # RAILS_ENV defines the type of installation that is running.
6 | # Normal values are "production", "test" and "development".
7 | RAILS_ENV="production"
8 |
9 | # app_user defines the user that GitLab is run as.
10 | # The default is "git".
11 | app_user="{{gitlab_user}}"
12 |
13 | # app_root defines the folder in which gitlab and it's components are installed.
14 | # The default is "/home/$app_user/gitlab"
15 | app_root="{{gitlab_repo_dir}}"
16 |
17 | # pid_path defines a folder in which the gitlab and it's components place their pids.
18 | # This variable is also used below to define the relevant pids for the gitlab components.
19 | # The default is "$app_root/tmp/pids"
20 | pid_path="$app_root/tmp/pids"
21 |
22 | # socket_path defines the folder in which gitlab places the sockets
23 | #The default is "$app_root/tmp/sockets"
24 | socket_path="$app_root/tmp/sockets"
25 |
26 | # web_server_pid_path defines the path in which to create the pid file fo the web_server
27 | # The default is "$pid_path/unicorn.pid"
28 | web_server_pid_path="$pid_path/unicorn.pid"
29 |
30 | # sidekiq_pid_path defines the path in which to create the pid file for sidekiq
31 | # The default is "$pid_path/sidekiq.pid"
32 | sidekiq_pid_path="$pid_path/sidekiq.pid"
33 |
--------------------------------------------------------------------------------
/roles/docker-postgres-repmgr/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ensure docker postgres repmgr directories exist
2 | become: yes
3 | file:
4 | state: directory
5 | path: "{{ item }}"
6 | with_items:
7 | - "{{ docker_postgres_repmgr_conf_dir }}"
8 | - "{{ docker_postgres_repmgr_data_dir }}"
9 | tags:
10 | - install
11 | - settings
12 |
13 |
14 | - name: Copy docker postgres repmgr repmgr.conf
15 | become: yes
16 | template:
17 | src: repmgr.conf.j2
18 | dest: "{{ docker_postgres_repmgr_conf_file }}"
19 | mode: 0644
20 | tags:
21 | - install
22 | - settings
23 |
24 |
25 | - name: Restart docker postgres repmgr container
26 | become: yes
27 | shell: "docker restart {{ docker_postgres_repmgr_name }}"
28 | tags:
29 | - restart
30 |
31 |
32 | - name: Run docker postgres repmgr container
33 | docker:
34 | env: "{{ docker_postgres_repmgr_env }}"
35 | expose: "{{ docker_postgres_repmgr_expose }}"
36 | hostname: "{{ docker_postgres_repmgr_hostname }}"
37 | image: "{{ docker_postgres_repmgr_image }}"
38 | links: "{{ docker_postgres_repmgr_links }}"
39 | name: "{{ docker_postgres_repmgr_name }}"
40 | net: "{{ docker_postgres_repmgr_net }}"
41 | ports: "{{ docker_postgres_repmgr_ports }}"
42 | pull: always
43 | restart_policy: always
44 | state: reloaded
45 | volumes: "{{ docker_postgres_repmgr_volumes }}"
46 | tags:
47 | - install
48 |
--------------------------------------------------------------------------------
/roles/docker-shibboleth-sp/files/conf/syslog.logger:
--------------------------------------------------------------------------------
1 | log4j.rootCategory=INFO, syslog
2 |
3 | # fairly verbose for DEBUG, so generally leave at INFO
4 | log4j.category.XMLTooling.XMLObject=INFO
5 | log4j.category.XMLTooling.KeyInfoResolver=INFO
6 | log4j.category.Shibboleth.PropertySet=INFO
7 |
8 | # raise for low-level tracing of SOAP client HTTP/SSL behavior
9 | log4j.category.XMLTooling.libcurl=INFO
10 |
11 | # useful categories to tune independently:
12 | #
13 | # tracing of SAML messages and security policies
14 | #log4j.category.OpenSAML.MessageDecoder=DEBUG
15 | #log4j.category.OpenSAML.MessageEncoder=DEBUG
16 | #log4j.category.OpenSAML.SecurityPolicyRule=DEBUG
17 | # interprocess message remoting
18 | #log4j.category.Shibboleth.Listener=DEBUG
19 | # mapping of requests to applicationId
20 | #log4j.category.Shibboleth.RequestMapper=DEBUG
21 | # high level session cache operations
22 | #log4j.category.Shibboleth.SessionCache=DEBUG
23 | # persistent storage and caching
24 | #log4j.category.XMLTooling.StorageService=DEBUG
25 |
26 | # define the appender
27 |
28 | log4j.appender.syslog=org.apache.log4j.SyslogAppender
29 | log4j.appender.syslog.syslogName=shibboleth
30 | log4j.appender.syslog.syslogHost=localhost
31 | #log4j.appender.syslog.layout=org.apache.log4j.BasicLayout
32 | log4j.appender.syslog.layout=org.apache.log4j.PatternLayout
33 | log4j.appender.syslog.layout.ConversionPattern=%d{%Y-%m-%d %H:%M:%S} %p %c %x: %m%n
34 |
--------------------------------------------------------------------------------
/roles/docker-varnish/files/conf/default.vcl:
--------------------------------------------------------------------------------
1 | #
2 | # This is an example VCL file for Varnish.
3 | #
4 | # It does not do anything by default, delegating control to the
5 | # builtin VCL. The builtin VCL is called when there is no explicit
6 | # return statement.
7 | #
8 | # See the VCL chapters in the Users Guide at https://www.varnish-cache.org/docs/
9 | # and https://www.varnish-cache.org/trac/wiki/VCLExamples for more examples.
10 |
11 | # Marker to tell the VCL compiler that this VCL has been adapted to the
12 | # new 4.0 format.
13 | vcl 4.0;
14 |
15 | # Default backend definition. Set this to point to your content server.
16 | backend default {
17 | .host = "127.0.0.1";
18 | .port = "8080";
19 | }
20 |
21 | sub vcl_recv {
22 | # Happens before we check if we have this in cache already.
23 | #
24 | # Typically you clean up the request here, removing cookies you don't need,
25 | # rewriting the request, etc.
26 | }
27 |
28 | sub vcl_backend_response {
29 | # Happens after we have read the response headers from the backend.
30 | #
31 | # Here you clean the response headers, removing silly Set-Cookie headers
32 | # and other mistakes your backend does.
33 | }
34 |
35 | sub vcl_deliver {
36 | # Happens when we have all the pieces we need, and are about to send the
37 | # response to the client.
38 | #
39 | # You can do accounting or modifying the final object here.
40 | }
41 |
--------------------------------------------------------------------------------
/roles/docker-dor/files/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | user nginx;
2 | worker_processes 1;
3 |
4 | error_log /var/log/nginx/error.log warn;
5 | pid /var/run/nginx.pid;
6 |
7 | events {
8 | worker_connections 1024;
9 | }
10 |
11 | http {
12 | include /etc/nginx/mime.types;
13 | default_type application/octet-stream;
14 |
15 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
16 | '$status $body_bytes_sent "$http_referer" '
17 | '"$http_user_agent" "$http_x_forwarded_for"';
18 |
19 | access_log /var/log/nginx/access.log main;
20 |
21 | sendfile on;
22 | #tcp_nopush on;
23 |
24 | keepalive_timeout 65;
25 |
26 | gzip on;
27 | gzip_disable "msie6";
28 | gzip_comp_level 6;
29 | gzip_min_length 1100;
30 | gzip_buffers 16 8k;
31 | gzip_proxied any;
32 | gzip_types text/plain text/css text/js text/xml text/javascript application/javascript application/x-javascript application/json application/xml application/xml+rss;
33 | gzip_vary on;
34 |
35 | server {
36 | listen 80 default_server;
37 |
38 | location /static {
39 | alias /code/static;
40 | }
41 |
42 | location / {
43 | # Pass requests to uwsgi application
44 | include uwsgi_params;
45 | uwsgi_pass unix:///tmp/uwsgi.sock;
46 | }
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/roles/docker-sentry/tasks/celery.yml:
--------------------------------------------------------------------------------
1 | - debug:
2 | msg: "docker_sentry_celery_name: {{ docker_sentry_celery_name }}"
3 | tags:
4 | - install
5 | - upgrade
6 | - restart
7 | - restart_sentry
8 |
9 |
10 | - name: Restart docker sentry celery container
11 | become: yes
12 | shell: "docker restart {{ docker_sentry_celery_name }}_{{ item }}"
13 | with_sequence: count={{ docker_sentry_celery_instances }}
14 | when: docker_sentry_celery_instances > 0
15 | tags:
16 | - restart
17 | - restart_sentry
18 |
19 |
20 | - name: Run docker sentry celery container
21 | docker:
22 | command: "{{ docker_sentry_celery_command }}"
23 | env: "{{ docker_sentry_celery_env }}"
24 | expose: "{{ docker_sentry_celery_expose }}"
25 | hostname: "{{ docker_sentry_celery_hostname }}"
26 | image: "{{ docker_sentry_celery_image }}"
27 | links: "{{ docker_sentry_celery_links }}"
28 | name: "{{ docker_sentry_celery_name }}_{{ item }}"
29 | net: "{{ docker_sentry_celery_net }}"
30 | ports: "{{ docker_sentry_celery_ports }}"
31 | pull: always
32 | restart_policy: always
33 | state: reloaded
34 | tty: yes
35 | volumes: "{{ docker_sentry_celery_volumes }}"
36 | volumes_from: "{{ docker_sentry_celery_volumes_from }}"
37 | with_sequence: count={{ docker_sentry_celery_instances }}
38 | when: docker_sentry_celery_instances > 0
39 | tags:
40 | - install
41 | - upgrade
42 |
--------------------------------------------------------------------------------
/roles/docker-share-reg/files/nginx.conf:
--------------------------------------------------------------------------------
1 | user nginx;
2 | worker_processes 1;
3 |
4 | error_log /var/log/nginx/error.log warn;
5 | pid /var/run/nginx.pid;
6 |
7 | events {
8 | worker_connections 1024;
9 | }
10 |
11 | http {
12 | include /etc/nginx/mime.types;
13 | default_type application/octet-stream;
14 |
15 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
16 | '$status $body_bytes_sent "$http_referer" '
17 | '"$http_user_agent" "$http_x_forwarded_for"';
18 |
19 | access_log /var/log/nginx/access.log main;
20 |
21 | sendfile on;
22 | #tcp_nopush on;
23 |
24 | keepalive_timeout 65;
25 |
26 | gzip on;
27 | gzip_disable "msie6";
28 | gzip_comp_level 6;
29 | gzip_min_length 1100;
30 | gzip_buffers 16 8k;
31 | gzip_proxied any;
32 | gzip_types text/plain text/css text/js text/xml text/javascript application/javascript application/x-javascript application/json application/xml application/xml+rss;
33 | gzip_vary on;
34 |
35 | server {
36 | listen 80 default_server;
37 |
38 | location /static {
39 | alias /code/static;
40 | }
41 |
42 | location / {
43 | # Pass requests to uwsgi application
44 | include uwsgi_params;
45 | uwsgi_pass unix:///tmp/uwsgi.sock;
46 | }
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/roles/docker-shibboleth-sp/files/conf/security-policy.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------