├── ansible
├── roles
│ ├── prosody-egress
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── defaults
│ │ │ └── main.yml
│ ├── prosody
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── uninstall
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── files
│ │ │ ├── disable_messaging.pfw
│ │ │ ├── mod_muc_hide_all.lua
│ │ │ ├── prosody-log-filter.systemd
│ │ │ ├── mod_websocket_auth_token.patch
│ │ │ ├── prosody-log-filter.sh
│ │ │ ├── prosody-jvb-log-filter.systemd
│ │ │ ├── mod_muc_filter_access.lua
│ │ │ ├── muc_owner_allow_kick-0.12.patch
│ │ │ └── setup-prosody-jvb-service.sh
│ │ ├── vars
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ ├── main.yml
│ │ │ ├── install-from-url.yml
│ │ │ └── install-from-apt.yml
│ │ └── templates
│ │ │ └── jvb_muc_presence_filter.pfw.j2
│ ├── wavefront
│ │ ├── tests
│ │ │ ├── inventory
│ │ │ └── test.yml
│ │ ├── README.md
│ │ ├── tasks
│ │ │ ├── proxy
│ │ │ │ ├── RedHat.yml
│ │ │ │ └── Debian.yml
│ │ │ └── telegraf
│ │ │ │ ├── RedHat.yml
│ │ │ │ └── Debian.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── 10-wavefront.conf.j2
│ ├── consul-jigasi
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── jigasi.json.j2
│ ├── jitsi-videobridge
│ │ ├── vars
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── aws_credentials.j2
│ │ │ ├── jvb-udp-buffers.conf.j2
│ │ │ ├── monitor-terminating-instance.conf.j2
│ │ │ ├── shards.json.j2
│ │ │ ├── environments.json.j2
│ │ │ ├── terminate_instance_oracle.j2
│ │ │ └── config.j2
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ ├── main.yml
│ │ │ └── upgrade.yml
│ │ ├── files
│ │ │ ├── monitor-terminating-instance.service
│ │ │ ├── reconfigure-jvb-oracle.sh
│ │ │ ├── reconfigure-jvb.sh
│ │ │ ├── configure-jvb-oracle.py
│ │ │ └── jvb-stats-oracle.sh
│ │ └── handlers
│ │ │ └── main.yml
│ ├── rsyslog
│ │ ├── templates
│ │ │ ├── main.yml
│ │ │ ├── apparmor-usr.sbin.rsyslogd.j2
│ │ │ └── rsyslog-programrouting.conf.j2
│ │ ├── tasks
│ │ │ ├── main_deb.yml
│ │ │ └── deb_packages.yml
│ │ ├── vars
│ │ │ ├── main.yml
│ │ │ ├── default.yml
│ │ │ ├── Debian.yml
│ │ │ └── Ubuntu.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── defaults
│ │ │ └── main.yml
│ ├── nodejs
│ │ ├── tests
│ │ │ └── localhosts
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── role.yml
│ │ ├── tasks
│ │ │ ├── install-url.yml
│ │ │ └── main.yml
│ │ ├── vars
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ └── etc
│ │ │ │ └── apt
│ │ │ │ └── preferences.d
│ │ │ │ └── deb_nodesource_com_node.pref.2
│ │ ├── meta
│ │ │ └── main.yml
│ │ └── defaults
│ │ │ └── main.yml
│ ├── ntp
│ │ ├── .gitignore
│ │ ├── tests
│ │ │ └── role.yml
│ │ ├── vars
│ │ │ ├── RedHat.yml
│ │ │ └── Debian.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── .travis.yml
│ │ ├── README.md
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── consul-haproxy-jigasi
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── meta
│ │ │ └── main.yml
│ ├── google-chrome
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── jigasi-web
│ │ ├── templates
│ │ │ ├── jigasi_user.html.j2
│ │ │ └── nginx.site.j2
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── main.yml
│ ├── sshusers
│ │ ├── templates
│ │ │ ├── sudoers.d.rapid7.j2
│ │ │ └── sudoers.d.sshusers.j2
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── defaults
│ │ │ └── main.yml
│ ├── jitsi-meet
│ │ ├── files
│ │ │ ├── robots.txt
│ │ │ └── jidesha-0.1.1-fx.xpi
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ ├── nginx.yml
│ │ │ └── main.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── vars
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── local.html.j2
│ ├── consul-selenium-grid-hub
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ └── selenium-grid-hub.json.j2
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── meta
│ │ │ └── main.yml
│ ├── hcv-haproxy-rsyslog
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── jiconop
│ │ ├── vars
│ │ │ └── main.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ └── config.j2
│ │ ├── tasks
│ │ │ ├── install.yml
│ │ │ ├── main.yml
│ │ │ └── configure.yml
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── meta
│ │ │ └── main.yml
│ ├── haproxy-configure
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── jvb-ssl
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── jibri-pjsua
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── files
│ │ │ ├── jibri-pjsua.rsyslogd.conf
│ │ │ └── background.png
│ │ ├── tasks
│ │ │ ├── main.yml
│ │ │ └── install.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── jibri-icewm2.systemd.j2
│ │ │ ├── jibri-xorg2.systemd.j2
│ │ │ ├── jibri-camera.systemd.j2
│ │ │ └── pjsua.config.j2
│ │ └── defaults
│ │ │ └── main.yml
│ ├── sshmfa
│ │ ├── handlers
│ │ │ └── main.yaml
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── google_authenticator.j2
│ ├── jicofo
│ │ ├── uninstall
│ │ │ └── tasks
│ │ │ │ └── main.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── vars
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── jicofo-stats.sh.j2
│ │ │ └── config.j2
│ │ └── meta
│ │ │ └── main.yml
│ ├── jitsi-videobridge-ddns
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── aws_credentials.j2
│ │ │ └── cleanup_route53_dns.j2
│ │ └── tasks
│ │ │ └── main.yml
│ ├── sip-jibri-sidecar
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── templates
│ │ │ ├── rsyslog.config.j2
│ │ │ └── sidecar.systemd.j2
│ ├── consul-haproxy
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── haproxy.json.j2
│ ├── fail2ban
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── tcpdump-jigasi
│ │ └── files
│ │ │ ├── tcpdump-jigasi-cleanup.sh
│ │ │ └── tcpdump-jigasi.service
│ ├── coturn
│ │ ├── tasks
│ │ │ ├── install-apt.yml
│ │ │ └── main.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ └── templates
│ │ │ ├── check-files-limits-oracle.sh.j2
│ │ │ ├── check-files-limits.sh.j2
│ │ │ ├── coturn-mark-unhealthy-oracle.sh.j2
│ │ │ ├── coturn_set_alarms_systemd.j2
│ │ │ ├── coturn_set_alarms_service.j2
│ │ │ └── coturn-mark-unhealthy.sh.j2
│ ├── jitsi-upload-customizations
│ │ └── defaults
│ │ │ └── main.yml
│ ├── consul-install
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ └── rsyslog.config.j2
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── files
│ │ │ └── consul.service
│ ├── jitsi-repo
│ │ ├── templates
│ │ │ └── jitsi-repo.conf.j2
│ │ └── defaults
│ │ │ └── main.yml
│ ├── journald
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── jvb-rtcstats-push
│ │ ├── templates
│ │ │ ├── service.config.j2
│ │ │ ├── rsyslog.config.j2
│ │ │ └── systemd.j2
│ │ ├── tasks
│ │ │ ├── main.yml
│ │ │ └── configure.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ └── defaults
│ │ │ └── main.yml
│ ├── haproxy-tenant-pin
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── config.j2
│ │ │ ├── rsyslog.config.j2
│ │ │ └── tenant-pin.service.j2
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ ├── main.yml
│ │ │ └── configure.yml
│ ├── tcpdump-prosody-jvb
│ │ └── files
│ │ │ ├── tcpdump-prosody-jvb-cleanup.sh
│ │ │ └── tcpdump-prosody-jvb.service
│ ├── autoscaler-sidecar
│ │ ├── templates
│ │ │ ├── aws_credentials.j2
│ │ │ ├── reconfigure_wrapper.sh.j2
│ │ │ ├── rsyslog.config.j2
│ │ │ └── sidecar.systemd.j2
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ └── files
│ │ │ ├── terminate_instance_aws.sh
│ │ │ └── terminate_instance_oracle.sh
│ ├── hcv-haproxy-configure
│ │ ├── files
│ │ │ ├── haproxy-fact.sh
│ │ │ └── hook-configure-haproxy.sh
│ │ ├── templates
│ │ │ ├── haproxy_default.j2
│ │ │ ├── environment.json.j2
│ │ │ └── haproxy.service.j2
│ │ └── handlers
│ │ │ └── main.yml
│ ├── signal-sidecar
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ └── rsyslog.config.j2
│ │ ├── tasks
│ │ │ ├── main.yml
│ │ │ └── install.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ └── defaults
│ │ │ └── main.yml
│ ├── testrtc
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── testrtc.systemd.j2
│ │ │ ├── testrtc.upstart.j2
│ │ │ └── testrtc.credentials.sh.j2
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── defaults
│ │ │ └── main.yml
│ ├── jibri-java
│ │ ├── tasks
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── aws_credentials.j2
│ │ │ ├── monitor-terminating-instance-systemd.j2
│ │ │ ├── graceful_shutdown_terminate_oracle.j2
│ │ │ ├── environments.json.j2
│ │ │ └── terminate_instance_oracle.j2
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── files
│ │ │ ├── jibri.rsyslogd.conf
│ │ │ ├── graceful_shutdown_terminate.sh
│ │ │ ├── reconfigure-jibri-oracle.sh
│ │ │ └── reconfigure-jibri.sh
│ ├── jigasi-rtcstats-push
│ │ ├── templates
│ │ │ ├── service.config.j2
│ │ │ ├── rsyslog.config.j2
│ │ │ └── systemd.j2
│ │ ├── tasks
│ │ │ ├── main.yml
│ │ │ └── configure.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ └── defaults
│ │ │ └── main.yml
│ ├── jvb-colibri-proxy
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── consul-server-start
│ │ └── tasks
│ │ │ └── main.yml
│ ├── fluentd-jitsi
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── aws_credentials.j2
│ │ │ ├── fluentd_jitsi.conf.j2
│ │ │ ├── config.oci.j2
│ │ │ ├── bootstrap.conf.j2
│ │ │ ├── postinstall_ansible.conf.j2
│ │ │ ├── clouds.conf.j2
│ │ │ └── in_jvb.conf.j2
│ │ ├── files
│ │ │ ├── filters.conf
│ │ │ ├── jigasi.conf
│ │ │ ├── jicofo.conf
│ │ │ ├── haproxy-monitor.conf
│ │ │ ├── sip-jibri-selector.conf
│ │ │ ├── nginx.conf
│ │ │ ├── haproxy.conf
│ │ │ └── prosody.conf
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── meta
│ │ │ └── main.yml
│ ├── flush-handlers
│ │ └── tasks
│ │ │ └── main.yml
│ ├── hcv-haproxy-status-lock
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── monit
│ │ └── files
│ │ │ ├── morun
│ │ │ └── modebug
│ ├── hcv-haproxy-set-stick-table
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── jenkins-sshkey
│ │ └── tasks
│ │ │ └── main.yml
│ ├── selenium-grid
│ │ ├── files
│ │ │ ├── selenium-grid.datadog.conf
│ │ │ └── selenium-grid.rsyslogd.conf
│ │ ├── templates
│ │ │ ├── environments.json.j2
│ │ │ ├── selenium-xvfb.service.j2
│ │ │ ├── selenium-grid-hub.service.j2
│ │ │ ├── selenium-grid-extras-hub.service.j2
│ │ │ ├── selenium-grid-hub.json.j2
│ │ │ ├── hub_4444.json.j2
│ │ │ ├── selenium-grid-extras-node.service.j2
│ │ │ ├── selenium-grid-node.service.j2
│ │ │ ├── selenium-grid-node.json.j2
│ │ │ ├── node_5555.json.j2
│ │ │ └── selenium_grid_extras_config-hub.json.j2
│ │ └── meta
│ │ │ └── main.yml
│ ├── clean-system
│ │ └── templates
│ │ │ └── 20auto-upgrades.j2
│ ├── jigasi-haproxy-agent
│ │ ├── templates
│ │ │ ├── service.config.j2
│ │ │ ├── rsyslog.config.j2
│ │ │ └── systemd.j2
│ │ ├── tasks
│ │ │ ├── main.yml
│ │ │ └── configure.yml
│ │ └── meta
│ │ │ └── main.yml
│ ├── jigasi
│ │ ├── files
│ │ │ ├── reconfigure-jigasi.sh
│ │ │ ├── jigasi-stats-oracle.sh
│ │ │ ├── monitor-terminating-instance.service
│ │ │ └── postinstall-jigasi-oracle.sh
│ │ ├── tasks
│ │ │ └── main.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── monitor-terminating-instance.conf.j2
│ │ │ ├── terminate_instance_oracle.j2
│ │ │ ├── config.j2
│ │ │ └── environments.json.j2
│ │ └── meta
│ │ │ └── main.yml
│ ├── unattended-upgrades
│ │ ├── templates
│ │ │ └── 20auto-upgrades.j2
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── consul-signal
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── files
│ │ │ ├── clear-shard-state-consul.sh
│ │ │ └── set-shard-state-consul.sh
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── signal.json.j2
│ ├── haproxy-jigasi
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── handlers
│ │ │ └── main.yml
│ ├── nginx
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── files
│ │ │ └── status_server
│ │ ├── templates
│ │ │ └── 47-nginx.conf.j2
│ │ ├── tasks
│ │ │ └── rsyslog.yml
│ │ └── defaults
│ │ │ └── main.yml
│ ├── google-cloud
│ │ ├── tasks
│ │ │ ├── main.yml
│ │ │ ├── install.yml
│ │ │ └── configure.yml
│ │ └── defaults
│ │ │ └── main.yml
│ ├── jenkins
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ └── files
│ │ │ └── jenkins.service
│ ├── consul-standalone
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── standalone.json.j2
│ ├── jibri-kernel
│ │ └── defaults
│ │ │ └── main.yml
│ ├── jitsi-torture
│ │ ├── files
│ │ │ ├── auth0-authenticate
│ │ │ │ └── package.json
│ │ │ └── generate-jwt.js
│ │ ├── meta
│ │ │ └── main.yml
│ │ └── templates
│ │ │ ├── check-long.j2
│ │ │ └── check.j2
│ ├── haproxy-lua
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── hcv-haproxy-status
│ │ └── defaults
│ │ │ └── main.yml
│ ├── consul-agent
│ │ ├── tasks
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ └── consul.hcl.j2
│ │ ├── meta
│ │ │ └── main.yml
│ │ └── defaults
│ │ │ └── main.yml
│ ├── consul-server
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ └── consul.hcl.j2
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── files
│ │ │ └── consul-server-config.sh
│ ├── jitsi-dumper
│ │ └── defaults
│ │ │ └── main.yml
│ ├── common
│ │ ├── files
│ │ │ └── download.sh
│ │ └── defaults
│ │ │ └── main.yml
│ ├── chromedriver
│ │ └── defaults
│ │ │ └── main.yml
│ ├── iptables-jenkins
│ │ └── tasks
│ │ │ └── main.yml
│ ├── jitsi-videobridge-auth
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── jitsi-torture-checkout
│ │ └── defaults
│ │ │ └── main.yml
│ ├── jigasi-auth
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── defaults
│ │ │ └── main.yml
│ ├── geckodriver
│ │ └── defaults
│ │ │ └── main.yml
│ ├── pjsua
│ │ └── defaults
│ │ │ └── main.yml
│ ├── firefox
│ │ └── defaults
│ │ │ └── main.yml
│ ├── iptables-serf
│ │ └── tasks
│ │ │ └── main.yml
│ ├── iptables-coturn
│ │ └── tasks
│ │ │ └── main.yml
│ ├── openjdk-java
│ │ └── defaults
│ │ │ └── main.yml
│ ├── jitsi-upload-integrations
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── iptables-firezone
│ │ └── tasks
│ │ │ └── main.yml
│ ├── iptables
│ │ └── tasks
│ │ │ └── main.yml
│ └── iptables-selenium-grid
│ │ └── tasks
│ │ └── main.yml
├── tcpdump-jigasi.yml
├── haproxy-set-stick-table.yml
├── haproxy-status.yml
├── haproxy-status-lock.yml
├── configure-jitsi-repo.yml
├── stop-consul-services.yml
├── configure-users.yml
├── jenkins-server.yml
├── jvb-colibri-proxy-nginx.yaml
├── templates
│ └── torture_wrapper.j2.sh
├── clear-cloud-cache.yml
├── set-signal-state.yml
├── stop-shard-services.yml
├── build-coturn-oracle.yml
└── haproxy-health-value.yml
├── README.md
├── .ansible-lint
├── scripts
├── configure-users.sh
├── configure-jitsi-repo.sh
└── configure-firezone.sh
├── ansible.cfg
└── .gitmodules
/ansible/roles/prosody-egress/tasks/main.yml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ansible/roles/prosody/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
--------------------------------------------------------------------------------
/ansible/roles/prosody-egress/defaults/main.yml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ansible/roles/wavefront/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
--------------------------------------------------------------------------------
/ansible/roles/consul-jigasi/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
--------------------------------------------------------------------------------
/ansible/roles/rsyslog/templates/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 |
--------------------------------------------------------------------------------
/ansible/roles/nodejs/tests/localhosts:
--------------------------------------------------------------------------------
1 | [local]
2 | localhost
--------------------------------------------------------------------------------
/ansible/roles/ntp/.gitignore:
--------------------------------------------------------------------------------
1 | vagrant*
2 | .vagrant
3 |
--------------------------------------------------------------------------------
/ansible/roles/consul-haproxy-jigasi/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
--------------------------------------------------------------------------------
/ansible/roles/nodejs/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for nodejs
3 |
--------------------------------------------------------------------------------
/ansible/roles/google-chrome/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | google_chrome_beta_flag: false
--------------------------------------------------------------------------------
/ansible/roles/jigasi-web/templates/jigasi_user.html.j2:
--------------------------------------------------------------------------------
1 | {{ jigasi_web_username }}
--------------------------------------------------------------------------------
/ansible/roles/sshusers/templates/sudoers.d.rapid7.j2:
--------------------------------------------------------------------------------
1 | rapid7 ALL=(ALL) NOPASSWD:ALL
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # infra-configuration
2 | Scripts for configuration jitsi services
3 |
4 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-meet/files/robots.txt:
--------------------------------------------------------------------------------
1 | User-agent: *
2 | Allow: /$
3 | Disallow: /
4 |
--------------------------------------------------------------------------------
/ansible/roles/consul-selenium-grid-hub/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | selenium_grid_name: 'default'
3 |
--------------------------------------------------------------------------------
/ansible/roles/hcv-haproxy-rsyslog/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | haproxy_reconfigure_rsyslog: false
3 |
--------------------------------------------------------------------------------
/ansible/roles/jiconop/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jiconop_xmpp_domain: "{{ prosody_domain_name }}"
3 |
--------------------------------------------------------------------------------
/ansible/roles/ntp/tests/role.yml:
--------------------------------------------------------------------------------
1 | - hosts: localhost
2 | roles:
3 | - ansible-role-ntp
4 |
--------------------------------------------------------------------------------
/ansible/roles/sshusers/templates/sudoers.d.sshusers.j2:
--------------------------------------------------------------------------------
1 | %sshsudousers ALL=(ALL) NOPASSWD:ALL
2 |
--------------------------------------------------------------------------------
/ansible/roles/haproxy-configure/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | haproxy_configure_log_dest: ../../test-results
3 |
--------------------------------------------------------------------------------
/ansible/roles/jvb-ssl/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependencies:
3 | - { role: nginx, when: jvb_ssl_install_flag}
--------------------------------------------------------------------------------
/ansible/roles/ntp/vars/RedHat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ntp_service_name: ntpd
3 | ntp_config_driftfile: /var/lib/ntp/drift
4 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-pjsua/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependencies:
3 | - {"role":"pjsua", when: jibri_pjsua_install_flag}
--------------------------------------------------------------------------------
/ansible/roles/jigasi-web/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jigasi_web_install_flag: true
3 | jigasi_web_username: jigasi
4 |
--------------------------------------------------------------------------------
/ansible/roles/ntp/vars/Debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ntp_service_name: ntp
3 | ntp_config_driftfile: /var/lib/ntp/ntp.drift
4 |
--------------------------------------------------------------------------------
/ansible/roles/prosody/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: reload prosody
3 | service: name=prosody state=restarted
4 |
--------------------------------------------------------------------------------
/ansible/roles/jvb-ssl/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart nginx
3 | service: name=nginx state=restarted enabled=yes
4 |
--------------------------------------------------------------------------------
/ansible/roles/sshmfa/handlers/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart sshd
3 | service: name=ssh state=restarted enabled=yes
4 |
--------------------------------------------------------------------------------
/ansible/roles/jicofo/uninstall/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - service: name=jicofo state=stopped enabled=no
3 | ignore_errors: true
4 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge-ddns/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependencies:
3 | - { role: nodejs, when: jvb_ddns_install_flag }
4 |
--------------------------------------------------------------------------------
/ansible/roles/prosody/uninstall/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - service: name=prosody state=stopped enabled=no
3 | ignore_errors: true
4 |
--------------------------------------------------------------------------------
/ansible/roles/sip-jibri-sidecar/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependencies:
3 | - { role: nodejs, when: sip_jibri_install_flag }
4 |
5 |
--------------------------------------------------------------------------------
/ansible/roles/sshmfa/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | mfa_users: "{{ ssh_users_jitsi }}"
3 | mfa_security_users: "{{ ssh_users_security }}"
4 |
--------------------------------------------------------------------------------
/.ansible-lint:
--------------------------------------------------------------------------------
1 | # Ansible-lint completely ignores rules or tags listed below
2 | skip_list:
3 | - run_once[play]
4 | - no-changed-when
5 |
--------------------------------------------------------------------------------
/ansible/roles/prosody/files/disable_messaging.pfw:
--------------------------------------------------------------------------------
1 | KIND: message
2 | INSPECT: body
3 | LOG=[debug] Dropping message: $(stanza)
4 | DROP.
5 |
--------------------------------------------------------------------------------
/ansible/roles/consul-haproxy/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | consul_haproxy_private_ip: "{{ ansible_default_ipv4.address }}"
3 | consul_haproxy_public_ip:
4 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-pjsua/files/jibri-pjsua.rsyslogd.conf:
--------------------------------------------------------------------------------
1 | if $programname == 'ffmpeg' then {
2 | /var/log/local/jibri-ffmpeg.log
3 | ~
4 | }
5 |
6 |
--------------------------------------------------------------------------------
/ansible/roles/jiconop/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart jiconop
3 | ansible.builtin.service:
4 | name: jiconop
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/ansible/roles/rsyslog/templates/apparmor-usr.sbin.rsyslogd.j2:
--------------------------------------------------------------------------------
1 | network inet stream{% if rsyslog_inet6_input_enabled %},
2 | network inet6 stream{% endif %}
--------------------------------------------------------------------------------
/ansible/roles/fail2ban/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart fail2ban
3 | ansible.builtin.service:
4 | name: fail2ban
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-pjsua/files/background.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daimoc/infra-configuration/main/ansible/roles/jibri-pjsua/files/background.png
--------------------------------------------------------------------------------
/ansible/roles/nodejs/role.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Test the Node.js role
3 | hosts: all
4 | become: yes
5 | roles:
6 | - role: "ansible-nodejs-role"
7 |
--------------------------------------------------------------------------------
/ansible/roles/sshmfa/templates/google_authenticator.j2:
--------------------------------------------------------------------------------
1 | {{ item.mfa_key }}
2 | " RATE_LIMIT 3 30
3 | " WINDOW_SIZE 17
4 | " DISALLOW_REUSE
5 | " TOTP_AUTH
6 |
--------------------------------------------------------------------------------
/ansible/roles/tcpdump-jigasi/files/tcpdump-jigasi-cleanup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | /usr/bin/find /var/lib/tcpdump-jigasi -type f -mmin +300 -exec rm {} \;
4 |
--------------------------------------------------------------------------------
/ansible/roles/coturn/tasks/install-apt.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Install coturn package and dependencies from apt"
3 | ansible.builtin.apt:
4 | name: coturn
5 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-upload-customizations/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | upload_customizations_configure_flag: true
3 | upload_customizations_install_flag: true
4 |
--------------------------------------------------------------------------------
/ansible/roles/ntp/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart ntp
3 | ansible.builtin.service:
4 | name: "{{ ntp_service_name }}"
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/ansible/roles/rsyslog/tasks/main_deb.yml:
--------------------------------------------------------------------------------
1 | # Debian related tasks
2 | ---
3 | - name: Install deb packages
4 | ansible.builtin.include_tasks: deb_packages.yml
5 |
--------------------------------------------------------------------------------
/ansible/roles/consul-install/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart rsyslog service
3 | ansible.builtin.service:
4 | name: rsyslog
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-meet/files/jidesha-0.1.1-fx.xpi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/daimoc/infra-configuration/main/ansible/roles/jitsi-meet/files/jidesha-0.1.1-fx.xpi
--------------------------------------------------------------------------------
/ansible/roles/jitsi-repo/templates/jitsi-repo.conf.j2:
--------------------------------------------------------------------------------
1 | machine {{ jitsi_repo_host }}
2 | login {{ jitsi_repo_username }}
3 | password {{ jitsi_repo_password }}
4 |
--------------------------------------------------------------------------------
/ansible/roles/journald/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart journald
3 | ansible.builtin.service:
4 | name: systemd-journald
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/ansible/roles/jvb-rtcstats-push/templates/service.config.j2:
--------------------------------------------------------------------------------
1 | JVB_ADDRESS={{ jvb_rtcstats_push_jvb_address }}
2 | RTCSTATS_SERVER={{ jvb_rtcstats_push_rtcstats_server }}
--------------------------------------------------------------------------------
/ansible/roles/prosody/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | prosody_bosh_service_address: "{{ prosody_domain_name }}"
3 | prosody_xmpp_service_address: "{{ prosody_domain_name }}"
4 |
--------------------------------------------------------------------------------
/ansible/roles/haproxy-tenant-pin/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart tenant-pin
3 | ansible.builtin.service:
4 | name: tenant-pin
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/ansible/roles/hcv-haproxy-rsyslog/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart haproxy rsyslog
3 | ansible.builtin.service:
4 | name: rsyslog
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/ansible/roles/jiconop/templates/config.j2:
--------------------------------------------------------------------------------
1 | JICONOP_XMPP_DOMAIN={{ jiconop_xmpp_domain }}
2 | JICONOP_BOSH_URL={{ jiconop_bosh_url }}
3 | JICONOP_PORT={{ jiconop_port }}
4 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-meet/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependencies:
3 | - { role: jitsi-repo, when: jitsi_meet_install_flag }
4 | - { role: nginx, when: jitsi_meet_install_flag}
--------------------------------------------------------------------------------
/ansible/roles/jitsi-meet/tasks/nginx.yml:
--------------------------------------------------------------------------------
1 | - template: src=nginx.site.j2 dest=/etc/nginx/sites-available/{{ jitsi_meet_domain_name }} mode=0644
2 | notify: reload nginx
3 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/templates/aws_credentials.j2:
--------------------------------------------------------------------------------
1 | [default]
2 | aws_access_key_id = {{ aws_access_key_id }}
3 | aws_secret_access_key = {{ aws_secret_access_key }}
--------------------------------------------------------------------------------
/ansible/roles/tcpdump-prosody-jvb/files/tcpdump-prosody-jvb-cleanup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | /usr/bin/find /var/lib/tcpdump-prosody-jvb -type f -mmin +300 -exec rm {} \;
4 |
--------------------------------------------------------------------------------
/ansible/roles/autoscaler-sidecar/templates/aws_credentials.j2:
--------------------------------------------------------------------------------
1 | [default]
2 | aws_access_key_id = {{ aws_access_key_id }}
3 | aws_secret_access_key = {{ aws_secret_access_key }}
--------------------------------------------------------------------------------
/ansible/roles/hcv-haproxy-configure/files/haproxy-fact.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | FACT_CACHE_FILE="/tmp/haproxy-facts.json"
4 | [ -f "$FACT_CACHE_FILE" ] && cat $FACT_CACHE_FILE
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge-ddns/templates/aws_credentials.j2:
--------------------------------------------------------------------------------
1 | [default]
2 | aws_access_key_id = {{ aws_access_key_id }}
3 | aws_secret_access_key = {{ aws_secret_access_key }}
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependencies:
3 | - { role: jitsi-repo, when: jvb_install_flag }
4 | - { role: openjdk-java, when: jvb_install_flag }
5 |
--------------------------------------------------------------------------------
/ansible/roles/rsyslog/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # items:
3 | # Logstash host for beta environment
4 | logstash_endpoint_domain: beta-us-east-1-logstash.meet-beta.hipchat.ninja
5 |
--------------------------------------------------------------------------------
/ansible/roles/signal-sidecar/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart signal-sidecar
3 | ansible.builtin.service:
4 | name: signal-sidecar
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/ansible/roles/testrtc/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | dependencies:
4 | - { role: "nodejs", when: testrtc_install_flag }
5 | - { role: "nginx", when: testrtc_install_flag }
6 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-java/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include_tasks: install.yml
3 | when: jibri_install_flag
4 |
5 | - include_tasks: configure.yml
6 | when: jibri_configure_flag
--------------------------------------------------------------------------------
/ansible/roles/jigasi-rtcstats-push/templates/service.config.j2:
--------------------------------------------------------------------------------
1 | JIGASI_ADDRESS={{ jigasi_rtcstats_push_jigasi_address }}
2 | RTCSTATS_SERVER={{ jigasi_rtcstats_push_rtcstats_server }}
--------------------------------------------------------------------------------
/ansible/roles/jvb-colibri-proxy/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart nginx
3 | ansible.builtin.service:
4 | name: nginx
5 | state: restarted
6 | enabled: true
7 |
--------------------------------------------------------------------------------
/ansible/roles/nodejs/tasks/install-url.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install nodejs from internal repo URL
3 | apt: deb="{{ nodejs_url }}" state=present
4 | retries: 3
5 | delay: 1
6 |
--------------------------------------------------------------------------------
/ansible/roles/prosody/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include_tasks: install.yml
3 | when: prosody_install_flag
4 |
5 | - include_tasks: configure.yml
6 | when: prosody_configure_flag
--------------------------------------------------------------------------------
/ansible/roles/sshusers/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart sshd for sshusers
3 | ansible.builtin.service:
4 | name: ssh
5 | state: restarted
6 | enabled: true
7 |
--------------------------------------------------------------------------------
/ansible/roles/consul-server-start/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Start consul service
3 | ansible.builtin.service:
4 | name: consul
5 | enabled: true
6 | state: started
7 |
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart jitsi fluentd
3 | ansible.builtin.service:
4 | name: td-agent
5 | state: restarted
6 | enabled: true
7 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-java/templates/aws_credentials.j2:
--------------------------------------------------------------------------------
1 | [default]
2 | aws_access_key_id = {{ aws_cloudwatch_access_key_id }}
3 | aws_secret_access_key = {{ aws_cloudwatch_secret_access_key }}
--------------------------------------------------------------------------------
/ansible/roles/consul-haproxy-jigasi/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Enable consul service
3 | ansible.builtin.systemd:
4 | name: consul
5 | state: started
6 | enabled: true
7 |
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/templates/aws_credentials.j2:
--------------------------------------------------------------------------------
1 | [default]
2 | aws_access_key_id = {{ aws_cloudwatch_access_key_id }}
3 | aws_secret_access_key = {{ aws_cloudwatch_secret_access_key }}
--------------------------------------------------------------------------------
/ansible/roles/flush-handlers/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Force all notified handlers to run at this point, not waiting for normal sync points
3 | ansible.builtin.meta: flush_handlers
4 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-pjsua/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include_tasks: install.yml
3 | when: jibri_pjsua_install_flag
4 |
5 | - include_tasks: configure.yml
6 | when: jibri_pjsua_configure_flag
--------------------------------------------------------------------------------
/ansible/roles/hcv-haproxy-status-lock/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hcv_environment: all
3 | hcv_haproxy_status_lock_file: '/tmp/haproxy-status.lock'
4 | hcv_haproxy_status_lock_action: 'unlock'
5 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-pjsua/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart jibri cameras
3 | service: name="{{ item }}" state=restarted
4 | with_items:
5 | - jibri-camera-0
6 | - jibri-camera-1
--------------------------------------------------------------------------------
/ansible/roles/monit/files/morun:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | {
3 | echo "MONIT-WRAPPER $@"
4 | $@
5 | R=$?
6 | echo "MONIT-WRAPPER exit code $R"
7 | } 2>&1 >> /var/log/monit-debug.log
--------------------------------------------------------------------------------
/ansible/roles/nodejs/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for nodejs
3 | debian_repo_version: "{{ nodejs_version if nodejs_version.split('.')[1] == 'x' else nodejs_version.split('.')[0]+'.x' }}"
4 |
--------------------------------------------------------------------------------
/ansible/roles/sip-jibri-sidecar/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include_tasks: install.yml
3 | when: sip_jibri_install_flag
4 |
5 | - include_tasks: configure.yml
6 | when: sip_jibri_configure_flag
7 |
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/templates/fluentd_jitsi.conf.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
3 | @type record_transformer
4 |
5 | host ${hostname}
6 |
7 |
8 |
--------------------------------------------------------------------------------
/ansible/tcpdump-jigasi.yml:
--------------------------------------------------------------------------------
1 | - name: Main
2 | hosts: all
3 | gather_facts: false
4 | become_user: root
5 | become: true
6 | roles:
7 | - { role: "tcpdump-jigasi", "tags": "tcpdump-jigasi"}
8 |
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/files/filters.conf:
--------------------------------------------------------------------------------
1 |
2 | @type record_transformer
3 |
4 | host ${hostname}
5 | process ${tag_parts[0]}
6 | tag ${tag}
7 |
8 |
--------------------------------------------------------------------------------
/ansible/roles/hcv-haproxy-set-stick-table/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | backend_name: 'nodes'
3 | stick_table_entries: false
4 | stick_table_entries_file: false
5 | stick_table_filename: stick-table-entries.json
6 |
--------------------------------------------------------------------------------
/ansible/roles/jenkins-sshkey/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Ubuntu jenkins service key
2 | ansible.posix.authorized_key:
3 | user: "ubuntu"
4 | key: "{{ item }}"
5 | with_items: "{{ jenkins_deploy_keys }}"
6 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/templates/jvb-udp-buffers.conf.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
3 | net.core.rmem_max={{ jvb_udp_buffer_size }}
4 | net.core.netdev_max_backlog={{ jvb_udp_buffer_max_backlog }}
5 |
--------------------------------------------------------------------------------
/ansible/roles/rsyslog/vars/default.yml:
--------------------------------------------------------------------------------
1 | # Fallback OS defaults
2 | ---
3 | rsyslog_package_names:
4 | - rsyslog
5 | rsyslog_service_name: rsyslog
6 | rsyslog_file_owner: root
7 | rsyslog_file_group: root
8 |
--------------------------------------------------------------------------------
/ansible/roles/jicofo/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart jicofo
3 | ansible.builtin.service:
4 | name: jicofo
5 | state: restarted
6 | when: not jicofoservice.changed and jicofo_configure_flag
7 |
--------------------------------------------------------------------------------
/ansible/roles/nodejs/templates/etc/apt/preferences.d/deb_nodesource_com_node.pref.2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
3 | Package: *
4 | Pin: release o=Node Source
5 | Pin-Priority: {{ nodejs_nodesource_pin_priority }}
6 |
--------------------------------------------------------------------------------
/ansible/roles/selenium-grid/files/selenium-grid.datadog.conf:
--------------------------------------------------------------------------------
1 | init_config:
2 | default_timeout: 10
3 | default_url: http://localhost:5555/wd/hub/sessions
4 | default_slots: 1
5 |
6 | instances:
7 | [{}]
--------------------------------------------------------------------------------
/scripts/configure-users.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # e.g. ../all/bin/terraform/standalone
4 |
5 | ansible-playbook -v -i "127.0.0.1," -c local ansible/configure-users.yml --vault-password-file .vault-password.txt
6 |
--------------------------------------------------------------------------------
/ansible/haproxy-set-stick-table.yml:
--------------------------------------------------------------------------------
1 | - name: Main
2 | hosts: all
3 | become: true
4 | become_user: root
5 | gather_facts: false
6 | roles:
7 | - { role: "hcv-haproxy-set-stick-table", tags: "set-stick-table" }
8 |
--------------------------------------------------------------------------------
/ansible/haproxy-status.yml:
--------------------------------------------------------------------------------
1 | - name: Main
2 | hosts: tag_shard_role_haproxy
3 | become_user: root
4 | become: true
5 | strategy: free
6 | roles:
7 | - { role: "hcv-haproxy-status", tags: "hcv-haproxy-status"}
8 |
--------------------------------------------------------------------------------
/ansible/roles/prosody/files/mod_muc_hide_all.lua:
--------------------------------------------------------------------------------
1 | -- This module makes all MUCs in Prosody unavialable on disco#items query
2 |
3 | module:hook("muc-room-pre-create", function(event)
4 | event.room:set_hidden(true);
5 | end, -1);
--------------------------------------------------------------------------------
/ansible/roles/wavefront/README.md:
--------------------------------------------------------------------------------
1 | # Wavefront Ansible Role
2 |
3 | **Note**: I've copied this from wavefrontHQ's ansible role instead of using a
4 | submodule so I could adjust it to meet the requirements set by the COP team
5 |
--------------------------------------------------------------------------------
/ansible/roles/autoscaler-sidecar/templates/reconfigure_wrapper.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #first wait for cloud init to finish
3 | cloud-init status --wait
4 | #run reconfiguration script
5 | sudo {{ autoscaler_reconfigure_script }}
6 | exit $?
--------------------------------------------------------------------------------
/ansible/roles/clean-system/templates/20auto-upgrades.j2:
--------------------------------------------------------------------------------
1 | APT::Periodic::Update-Package-Lists "0";
2 | APT::Periodic::Download-Upgradeable-Packages "0";
3 | APT::Periodic::AutocleanInterval "0";
4 | APT::Periodic::Unattended-Upgrade "0";
--------------------------------------------------------------------------------
/ansible/roles/consul-install/templates/rsyslog.config.j2:
--------------------------------------------------------------------------------
1 | template(name="outfmt" type="string" string= "%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n")
2 | if $programname == 'consul' then {{ consul_log_dir }}/consul.log
3 | & stop
4 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi-haproxy-agent/templates/service.config.j2:
--------------------------------------------------------------------------------
1 | API_PORT={{ jigasi_haproxy_agent_listen_port }}
2 | MAX_PARTICIPANTS={{ jigasi_haproxy_agent_max_participants }}
3 | HEALTH_INTERVAL={{ jigasi_haproxy_agent_health_interval }}
--------------------------------------------------------------------------------
/ansible/roles/jigasi/files/reconfigure-jigasi.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #rebuild the configuration files and signal new shards to jigasi
4 | CONFIGURE_ONLY=true ANSIBLE_TAGS="setup,jigasi" /usr/local/bin/configure-jigasi-local.sh
5 | exit $?
--------------------------------------------------------------------------------
/scripts/configure-jitsi-repo.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # e.g. ../all/bin/terraform/standalone
4 |
5 | ansible-playbook -v -i "127.0.0.1," -c local ansible/configure-jitsi-repo.yml --vault-password-file .vault-password.txt
6 |
7 | exit $?
--------------------------------------------------------------------------------
/ansible/haproxy-status-lock.yml:
--------------------------------------------------------------------------------
1 | - name: Main
2 | hosts: tag_shard_role_haproxy
3 | become_user: root
4 | become: true
5 | strategy: free
6 | roles:
7 | - { role: "hcv-haproxy-status-lock", tags: "hcv-haproxy-status-lock"}
8 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi/files/jigasi-stats-oracle.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #pull our own instance and environment
4 | . /usr/local/bin/oracle_cache.sh
5 |
6 | #now run the python that pushes stats to statsd
7 | /usr/local/bin/jigasi-stats.py
8 |
--------------------------------------------------------------------------------
/ansible/roles/rsyslog/vars/Debian.yml:
--------------------------------------------------------------------------------
1 | # Ubuntu Family OS defaults
2 | ---
3 | rsyslog_package_names:
4 | - rsyslog
5 | rsyslog_service_name: rsyslog
6 | rsyslog_file_owner: syslog
7 | rsyslog_file_group: adm
8 | rsyslog_os_supported: yes
--------------------------------------------------------------------------------
/ansible/roles/rsyslog/vars/Ubuntu.yml:
--------------------------------------------------------------------------------
1 | # Ubuntu Family OS defaults
2 | ---
3 | rsyslog_package_names:
4 | - rsyslog
5 | rsyslog_service_name: rsyslog
6 | rsyslog_file_owner: syslog
7 | rsyslog_file_group: adm
8 | rsyslog_os_supported: yes
--------------------------------------------------------------------------------
/ansible/roles/selenium-grid/files/selenium-grid.rsyslogd.conf:
--------------------------------------------------------------------------------
1 | if $programname == 'java' then {
2 | /var/log/local/selenium-grid.log
3 | ~
4 | }
5 |
6 | if $programname == 'Xvfb' then {
7 | /var/log/local/selenium-xvfb.log
8 | ~
9 | }
--------------------------------------------------------------------------------
/ansible/roles/jiconop/tasks/install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install jiconop package
3 | ansible.builtin.apt:
4 | name: "{{ jiconop_deb_pkg_name }}={{ jiconop_deb_pkg_version }}"
5 | state: present
6 | notify:
7 | - Restart jiconop
8 |
--------------------------------------------------------------------------------
/ansible/roles/nodejs/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # Install Node.js using packages crafted by NodeSource
2 | ---
3 | - include_tasks: install-apt.yml
4 | when: nodejs_from_apt
5 |
6 | - include_tasks: install-url.yml
7 | when: not nodejs_from_apt
8 |
9 |
--------------------------------------------------------------------------------
/ansible/roles/sip-jibri-sidecar/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart sip-jibri sidecar service
3 | systemd:
4 | state: restarted
5 | daemon_reload: yes
6 | enabled: yes
7 | name: "{{ sip_jibri_sidecar_service_name }}"
8 |
--------------------------------------------------------------------------------
/ansible/roles/unattended-upgrades/templates/20auto-upgrades.j2:
--------------------------------------------------------------------------------
1 | APT::Periodic::Update-Package-Lists "{{ unattended_upgrades_update_package_lists_interval }}";
2 | APT::Periodic::Unattended-Upgrade "{{ unattended_upgrades_upgrade_interval }}";
3 |
--------------------------------------------------------------------------------
/ansible/roles/wavefront/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | gather_facts: true
5 | become: true
6 | roles:
7 | - { role: wavefront, wavefront_install_collector: "true", proxy_address: "localhost" }
8 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include_tasks: install.yml
3 | when: jvb_install_flag
4 |
5 | - include_tasks: upgrade.yml
6 | when: jvb_upgrade_flag
7 |
8 | - include_tasks: configure.yml
9 | when: jvb_configure_flag
--------------------------------------------------------------------------------
/ansible/configure-jitsi-repo.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Main
3 | hosts: all
4 | become_user: root
5 | become: true
6 | gather_facts: true
7 | vars_files:
8 | - secrets/repo.yml
9 | roles:
10 | - { role: "jitsi-repo", tags: "repo" }
11 |
--------------------------------------------------------------------------------
/ansible/roles/autoscaler-sidecar/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart autoscaler sidecar service
3 | ansible.builtin.systemd:
4 | state: restarted
5 | daemon_reload: true
6 | enabled: true
7 | name: "{{ autoscaler_sidecar_service_name }}"
8 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-meet/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include_tasks: install.yml
3 | when: jitsi_meet_install_flag
4 |
5 | - include_tasks: configure.yml
6 | when: jitsi_meet_configure_flag
7 |
8 | - include_tasks: nginx.yml
9 | when: jitsi_meet_nginx_only_flag
--------------------------------------------------------------------------------
/ansible/roles/consul-signal/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | consul_signal_cloud_provider: "{{ cloud_provider | default('aws') }}"
3 | consul_signal_private_ip: "{{ ansible_default_ipv4.address }}"
4 | consul_signal_public_ip: "{{ ansible_ec2_public_ipv4 | default('') }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/jicofo/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jicofo_hostname: "{{ prosody_domain_name }}"
3 | jicofo_auth_domain: "auth.{{ prosody_domain_name }}"
4 | jicofo_auth_user: focus
5 | jicofo_auth_password: "{{ prosody_focus_user_secret }}"
6 | jicofo_make_jvb_checks: true
7 |
--------------------------------------------------------------------------------
/ansible/roles/selenium-grid/templates/environments.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | {% if selenium_grid_enable_consul %}
3 | "consul_server": "{{ selenium_grid_consul_server_url }}",
4 | {% endif %}
5 | "grid": "{{ selenium_grid_name }}",
6 | "grid_role": "{{ selenium_grid_role }}"
7 | }
--------------------------------------------------------------------------------
/ansible/stop-consul-services.yml:
--------------------------------------------------------------------------------
1 | - name: Main
2 | hosts: all
3 | become: true
4 | become_user: root
5 | gather_facts: false
6 | tasks:
7 | - name: Stop consul
8 | ansible.builtin.service:
9 | name: consul
10 | state: stopped
11 |
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/templates/config.oci.j2:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | user={{ oci_logging_user_id }}
3 | fingerprint={{ oci_logging_private_key_fingerprint }}
4 | tenancy={{ oci_logging_tenancy }}
5 | region={{ oci_logging_region }}
6 | key_file=/var/lib/td-agent/.oci/private.pem
7 |
--------------------------------------------------------------------------------
/ansible/roles/haproxy-jigasi/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | haproxy_conf_path: /etc/haproxy
3 | haproxy_jigasi_agent_enabled: false
4 | haproxy_jigasi_max_servers: 500
5 | haproxy_jigasi_path_to_config_script: /usr/local/bin/configure-haproxy-jigasi.sh
6 | haproxy_jigasi_servers: []
7 |
--------------------------------------------------------------------------------
/ansible/roles/jiconop/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jiconop_bosh_url: "http://localhost/http-bind"
3 | jiconop_configure_flag: true
4 | jiconop_deb_pkg_name: "jiconop"
5 | jiconop_deb_pkg_version: "*"
6 | jiconop_enabled: true
7 | jiconop_install_flag: true
8 | jiconop_port: 9615
9 |
--------------------------------------------------------------------------------
/ansible/roles/nginx/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart nginx
3 | ansible.builtin.service:
4 | name: nginx
5 | state: restarted
6 | enabled: true
7 |
8 | - name: Stop nginx
9 | ansible.builtin.service:
10 | name: nginx
11 | state: stopped
12 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-java/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependencies:
3 | - { role: jitsi-repo, when: jibri_install_flag }
4 | - { role: chromedriver, when: jibri_install_flag }
5 | - { role: google-chrome, when: jibri_install_flag}
6 | - { role: openjdk-java, when: jibri_install_flag }
--------------------------------------------------------------------------------
/ansible/roles/jicofo/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include install tasks
3 | ansible.builtin.include_tasks: install.yml
4 | when: jicofo_install_flag
5 |
6 | - name: Include configure tasks
7 | ansible.builtin.include_tasks: configure.yml
8 | when: jicofo_configure_flag
9 |
--------------------------------------------------------------------------------
/ansible/roles/jiconop/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include install tasks
3 | ansible.builtin.include_tasks: install.yml
4 | when: jiconop_install_flag
5 |
6 | - name: Include configure tasks
7 | ansible.builtin.include_tasks: configure.yml
8 | when: jiconop_configure_flag
9 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include install tasks
3 | ansible.builtin.include_tasks: install.yml
4 | when: jigasi_install_flag
5 |
6 | - name: Include configure tasks
7 | ansible.builtin.include_tasks: configure.yml
8 | when: jigasi_configure_flag
9 |
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include install steps
3 | ansible.builtin.include_tasks: install.yml
4 | when: fluentd_install_flag
5 |
6 | - name: Include configure steps
7 | ansible.builtin.include_tasks: configure.yml
8 | when: fluentd_configure_flag
9 |
--------------------------------------------------------------------------------
/ansible/roles/haproxy-tenant-pin/templates/config.j2:
--------------------------------------------------------------------------------
1 | TENANT_MAP_PATH={{ haproxy_tenant_pin_map_path }}
2 | CONSUL_URL=http://localhost:8500
3 | ENVIRONMENT={{ hcv_environment }}
4 | DAEMON_MODE=True
5 | DAEMON_TICK_DURATION={{ haproxy_tenant_pin_tick_duration }}
6 | STATSD_ENABLED=True
7 |
--------------------------------------------------------------------------------
/ansible/roles/jiconop/tasks/configure.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # eventually build a config file for jiconop
3 | - name: Install jiconop configuration file
4 | ansible.builtin.template:
5 | mode: 0644
6 | src: "config.j2"
7 | dest: "/etc/jitsi/jiconop/config"
8 | notify: Restart jiconop
9 |
--------------------------------------------------------------------------------
/ansible/roles/consul-selenium-grid-hub/templates/selenium-grid-hub.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "service": {
3 | "name": "selenium-grid-hub",
4 | "tags":["{{ selenium_grid_name }}"],
5 | "meta": {
6 | "grid":"{{ selenium_grid_name }}"
7 | },
8 | "port": 5555
9 | }
10 | }
--------------------------------------------------------------------------------
/ansible/roles/jitsi-meet/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart nginx
3 | service: name=nginx state=restarted enabled=yes
4 |
5 | - name: reload nginx
6 | service: name=nginx state=reloaded
7 |
8 | - name: reload prosody plugins
9 | service: name=prosody state=restarted
10 |
--------------------------------------------------------------------------------
/ansible/roles/testrtc/templates/testrtc.systemd.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=TestRTC service at port {{ testrtc_port }}
3 |
4 | [Service]
5 | WorkingDirectory={{ testrtc_base_path }}
6 | Type=idle
7 | ExecStart=dev_appserver.py --port {{ testrtc_port }} --enable_host_checking false out/app.yaml
8 |
--------------------------------------------------------------------------------
/ansible/roles/google-cloud/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include install tasks
3 | ansible.builtin.include_tasks: install.yml
4 | when: google_cloud_install_flag
5 |
6 | - name: Include configure tasks
7 | ansible.builtin.include_tasks: configure.yml
8 | when: google_cloud_configure_flag
9 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: Restart jigasi
2 | ansible.builtin.service:
3 | name: jigasi
4 | state: restarted
5 | when: not jigasi_configure_only_flag
6 |
7 | - name: Perform systemctl daemon-reload
8 | ansible.builtin.systemd:
9 | daemon_reload: true
10 |
--------------------------------------------------------------------------------
/ansible/roles/jvb-rtcstats-push/templates/rsyslog.config.j2:
--------------------------------------------------------------------------------
1 | template(name="SimpleJSONFormat" type="string" string= "%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n")
2 | if $programname == '{{ jvb_rtcstats_push_service_name }}' then {{ jvb_rtcstats_push_log_dir }}/agent.log;SimpleJSONFormat
3 | & stop
--------------------------------------------------------------------------------
/ansible/roles/sip-jibri-sidecar/templates/rsyslog.config.j2:
--------------------------------------------------------------------------------
1 | template(name="SimpleJSONFormat" type="string" string= "%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n")
2 | if $programname == '{{ sip_jibri_sidecar_service_name }}' then {{ sip_jibri_log_dir }}/sidecar.log;SimpleJSONFormat
3 | & stop
4 |
5 |
--------------------------------------------------------------------------------
/ansible/roles/autoscaler-sidecar/templates/rsyslog.config.j2:
--------------------------------------------------------------------------------
1 | template(name="SimpleJSONFormat" type="string" string= "%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n")
2 | if $programname == '{{ autoscaler_sidecar_service_name }}' then {{ autoscaler_log_dir }}/sidecar.log;SimpleJSONFormat
3 | & stop
4 |
5 |
--------------------------------------------------------------------------------
/ansible/roles/coturn/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart coturn
3 | ansible.builtin.service:
4 | name: coturn
5 | state: restarted
6 |
7 | - name: Restart coturn systemd
8 | ansible.builtin.systemd:
9 | name: coturn
10 | state: restarted
11 | daemon_reload: true
12 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi-haproxy-agent/templates/rsyslog.config.j2:
--------------------------------------------------------------------------------
1 | template(name="SimpleJSONFormat" type="string" string= "%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n")
2 | if $programname == '{{ jigasi_haproxy_agent_service_name }}' then {{ jigasi_haproxy_agent_log_dir }}/agent.log;SimpleJSONFormat
3 | & stop
--------------------------------------------------------------------------------
/ansible/roles/jigasi-rtcstats-push/templates/rsyslog.config.j2:
--------------------------------------------------------------------------------
1 | template(name="SimpleJSONFormat" type="string" string= "%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n")
2 | if $programname == '{{ jigasi_rtcstats_push_service_name }}' then {{ jigasi_rtcstats_push_log_dir }}/agent.log;SimpleJSONFormat
3 | & stop
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge-ddns/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include install tasks
3 | ansible.builtin.include_tasks: install.yml
4 | when: jvb_ddns_install_flag
5 |
6 | - name: Include configure tasks
7 | ansible.builtin.include_tasks: configure.yml
8 | when: jvb_ddns_configure_flag
9 |
--------------------------------------------------------------------------------
/ansible/roles/monit/files/modebug:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | {
3 | echo "MONIT-WRAPPER date"
4 | date
5 | echo "MONIT-WRAPPER env"
6 | env
7 | echo "MONIT-WRAPPER $@"
8 | $@
9 | R=$?
10 | echo "MONIT-WRAPPER exit code $R"
11 | } 2>&1 >> /var/log/monit-debug.log
--------------------------------------------------------------------------------
/ansible/roles/nginx/files/status_server:
--------------------------------------------------------------------------------
1 | server {
2 | listen 127.0.0.1:888;
3 | listen [::1]:888;
4 | server_name localhost;
5 | location /nginx_status {
6 | stub_status on;
7 | access_log off;
8 | allow 127.0.0.1;
9 | deny all;
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/ansible/roles/signal-sidecar/templates/rsyslog.config.j2:
--------------------------------------------------------------------------------
1 | template(name="SimpleJSONFormat" type="string" string= "%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n")
2 | if $programname == '{{ signal_sidecar_service_name }}' then {{ signal_sidecar_log_dir }}/signal-sidecar.log;SimpleJSONFormat
3 | & stop
4 |
--------------------------------------------------------------------------------
/ansible/configure-users.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Main
3 | hosts: all
4 | become_user: root
5 | become: true
6 | gather_facts: true
7 | vars_files:
8 | - secrets/ssh-users.yml
9 | roles:
10 | - { role: "sshusers", tags: "users", ssh_users_accounts_flag: true, ssh_users_config_flag: false }
11 |
--------------------------------------------------------------------------------
/ansible/roles/jenkins/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jenkins_sitename: "jenkins-opsdev.jitsi.net"
3 | jenkins_ssl_certificate: "{{ jitsi_net_ssl_certificate }}{{ jitsi_net_ssl_extras }}"
4 | jenkins_ssl_dest_dir: /etc/nginx/ssl
5 | jenkins_ssl_key_name: "{{ jitsi_net_ssl_key_name }}"
6 | jenkins_install_flag: true
7 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-meet/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | shard_region: "{{ ansible_ec2_placement_region if ansible_ec2_placement_region is defined and ansible_ec2_placement_region
3 | else oracle_to_aws_region_map[oracle_region] if oracle_region is defined and oracle_region in oracle_to_aws_region_map else 'default' }}"
4 |
--------------------------------------------------------------------------------
/ansible/roles/jvb-rtcstats-push/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include install tasks
3 | ansible.builtin.include_tasks: install.yml
4 | when: jvb_rtcstats_push_install_flag
5 |
6 | - name: Include configure tasks
7 | ansible.builtin.include_tasks: configure.yml
8 | when: jvb_rtcstats_push_configure_flag
9 |
--------------------------------------------------------------------------------
/ansible/roles/wavefront/tasks/proxy/RedHat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install Wavefront Proxy (RedHat)
3 | ansible.builtin.yum:
4 | name: "{{ wavefront_proxy_pkg }}"
5 | state: present
6 | register: result
7 | until: result.rc == 0
8 | tags:
9 | - install
10 | - redhat
11 | - proxy
12 |
--------------------------------------------------------------------------------
/ansible/roles/haproxy-tenant-pin/templates/rsyslog.config.j2:
--------------------------------------------------------------------------------
1 | template(name="SimpleJSONFormat" type="string" string= "%timegenerated% %msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n")
2 | if $programname == '{{ haproxy_tenant_pin_service_name }}' then {{ haproxy_tenant_pin_log_dir }}/tenant-pin.log;SimpleJSONFormat
3 | & stop
4 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi-haproxy-agent/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include install tasks
3 | ansible.builtin.include_tasks: install.yml
4 | when: jigasi_haproxy_agent_install_flag
5 |
6 | - name: Include configure tasks
7 | ansible.builtin.include_tasks: configure.yml
8 | when: jigasi_haproxy_agent_configure_flag
9 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi-rtcstats-push/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include install tasks
3 | ansible.builtin.include_tasks: install.yml
4 | when: jigasi_rtcstats_push_install_flag
5 |
6 | - name: Include configure tasks
7 | ansible.builtin.include_tasks: configure.yml
8 | when: jigasi_rtcstats_push_configure_flag
9 |
--------------------------------------------------------------------------------
/ansible/roles/consul-standalone/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | consul_standalone_cloud_provider: "{{ cloud_provider | default('aws') }}"
3 | consul_standalone_private_ip: "{{ ansible_default_ipv4.address }}"
4 | consul_standalone_public_ip: "{{ oracle_public_ip if consul_standalone_cloud_provider == 'oracle' else ansible_ec2_public_ipv4 }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/jicofo/templates/jicofo-stats.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # pull in environment vars needed for reporting
3 | {% if cloud_provider == 'oracle' -%}
4 | . /usr/local/bin/oracle_cache.sh
5 | {% else -%}
6 | . /usr/local/bin/aws_cache.sh
7 | {% endif -%}
8 |
9 | # pull and report stats
10 | /usr/local/bin/jicofo-stats.py
11 |
--------------------------------------------------------------------------------
/ansible/roles/testrtc/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Generate testRTC credentials # noqa ignore-errors
3 | ansible.builtin.command: /usr/local/bin/testrtc_credentials
4 | ignore_errors: true
5 |
6 | - name: Reload nginx
7 | ansible.builtin.service:
8 | name: nginx
9 | enabled: true
10 | state: reloaded
11 |
--------------------------------------------------------------------------------
/ansible/roles/wavefront/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart wavefront-proxy
3 | service:
4 | name: wavefront-proxy
5 | state: restarted
6 | enabled: yes
7 | become: yes
8 |
9 | - name: restart telegraf
10 | service:
11 | name: telegraf
12 | state: restarted
13 | enabled: yes
14 | become: yes
15 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-kernel/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jibri_kernel_cloud_provider: "{{ cloud_provider | default('aws') }}"
3 | jibri_kernel_clean_aws: true
4 | jibri_kernel_headers_package: "linux-headers-{{ jibri_kernel_package_type }}"
5 | jibri_kernel_package: "linux-image-{{ jibri_kernel_package_type }}"
6 | jibri_kernel_package_type: virtual
7 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-torture/files/auth0-authenticate/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "auth0-authenticate",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "auth0-authenticate.js",
6 | "keywords": [],
7 | "author": "",
8 | "license": "ISC",
9 | "dependencies": {
10 | "set-cookie-parser": "^2.4.7"
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/ansible/roles/wavefront/tasks/proxy/Debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install Wavefront Proxy (Ubuntu)
3 | ansible.builtin.apt:
4 | name: "{{ wavefront_proxy_pkg }}={{ wavefront_proxy_version }}"
5 | state: present
6 | register: result
7 | until: result is succeeded
8 | tags:
9 | - install
10 | - debian
11 | - proxy
12 |
--------------------------------------------------------------------------------
/ansible/roles/autoscaler-sidecar/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install autoscaler
3 | ansible.builtin.include_tasks: install.yml
4 | when: autoscaler_install_flag and jitsi_autoscaler_sidecar
5 |
6 | - name: Configure autoscaler
7 | ansible.builtin.include_tasks: configure.yml
8 | when: autoscaler_configure_flag and jitsi_autoscaler_sidecar
9 |
--------------------------------------------------------------------------------
/ansible/roles/testrtc/templates/testrtc.upstart.j2:
--------------------------------------------------------------------------------
1 | description "TestRTC service at port {{ testrtc_port }}"
2 |
3 | stop on runlevel [06]
4 |
5 | respawn
6 | script
7 | chdir {{ testrtc_base_path }}
8 | echo $$ > /var/run/testrtc.pid
9 | exec dev_appserver.py --port {{ testrtc_port }} --enable_host_checking false out/app.yaml
10 | end script
--------------------------------------------------------------------------------
/ansible/roles/wavefront/tasks/telegraf/RedHat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install telegraf agent/collector (RedHat)
3 | ansible.builtin.yum:
4 | name: "{{ wavefront_collector }}"
5 | state: present
6 | update_cache: true
7 | register: result
8 | until: result.rc == 0
9 | tags:
10 | - install
11 | - redhat
12 | - collector
13 |
--------------------------------------------------------------------------------
/ansible/roles/haproxy-lua/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | haproxy_lua_lua_version: 5.3
3 | haproxy_lua_rocks_checksum: eb20cd9814df05535d9aae98da532217c590fc07d48d90ca237e2a7cdcf284fe
4 | haproxy_lua_rocks_url: "https://luarocks.org/releases/luarocks-{{ haproxy_lua_rocks_version }}.tar.gz"
5 | haproxy_lua_rocks_version: 3.3.1
6 | haproxy_lua_src_dir: /tmp/luarocks
7 |
--------------------------------------------------------------------------------
/ansible/roles/hcv-haproxy-status/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hcv_environment: all
3 | hcv_haproxy_status_snapshot: false
4 | hcv_haproxy_status_path: "../../haproxy-status/{{ hcv_environment }}{{ '_snapshot' if hcv_haproxy_status_snapshot == 'true' else '' }}"
5 | hcv_haproxy_status_lock_file: '/tmp/haproxy-status.lock'
6 | hcv_haproxy_status_ignore_lock: false
7 |
--------------------------------------------------------------------------------
/ansible/roles/consul-agent/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install consul config file
3 | ansible.builtin.template:
4 | mode: 0640
5 | src: "consul.hcl.j2"
6 | dest: "/etc/consul.d/consul.hcl"
7 |
8 | - name: Install consul env file
9 | ansible.builtin.copy:
10 | mode: 0640
11 | content: ""
12 | dest: "/etc/consul.d/consul.env"
13 |
--------------------------------------------------------------------------------
/ansible/roles/consul-agent/templates/consul.hcl.j2:
--------------------------------------------------------------------------------
1 | datacenter = "{{ consul_datacenter }}"
2 | data_dir = "/opt/consul"
3 | bind_addr = "{{ ansible_default_ipv4.address }}"
4 | advertise_addr = "{{ ansible_default_ipv4.address }}"
5 | {% if consul_encryption_key %}
6 | encrypt = "{{ consul_encryption_key }}"
7 | {% endif %}
8 | retry_join ={{ consul_retry_join_lan }}
9 |
--------------------------------------------------------------------------------
/ansible/roles/haproxy-tenant-pin/defaults/main.yml:
--------------------------------------------------------------------------------
1 | haproxy_tenant_pin_configure_flag: false
2 | haproxy_tenant_pin_enabled: false
3 | haproxy_tenant_pin_install_flag: false
4 | haproxy_tenant_pin_log_dir: "/var/log"
5 | haproxy_tenant_pin_map_path: /etc/haproxy/maps/tenant.map
6 | haproxy_tenant_pin_service_name: "tenant-pin"
7 | haproxy_tenant_pin_tick_duration: 5
8 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi/templates/monitor-terminating-instance.conf.j2:
--------------------------------------------------------------------------------
1 | description "Terminating Instance Monitor"
2 |
3 | start on filesystem and net-device-up IFACE={{ ansible_default_ipv4.interface }}
4 |
5 | stop on runlevel [016]
6 |
7 | respawn
8 | chdir /usr/local/bin
9 |
10 | exec /usr/local/bin/monitor-terminating-instance.sh
11 | respawn limit 10 90
--------------------------------------------------------------------------------
/ansible/roles/selenium-grid/templates/selenium-xvfb.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Selenium Xvfb
3 | After=network.target
4 |
5 | [Service]
6 | User={{ selenium_grid_username }}
7 | Group={{ selenium_grid_groupname }}
8 | ExecStart=/usr/bin/Xvfb :99 -screen 0 1024x768x24
9 | Restart=on-failure
10 | RestartPreventExitStatus=255
11 | Type=simple
12 |
13 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/files/monitor-terminating-instance.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=JVB Terminating Instance Monitor
3 | After=network.target
4 |
5 | [Service]
6 | WorkingDirectory=/usr/local/bin
7 | ExecStart=/usr/local/bin/monitor-terminating-instance.sh
8 | Restart=on-failure
9 | Type=simple
10 |
11 | [Install]
12 | WantedBy=multi-user.target
13 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/templates/monitor-terminating-instance.conf.j2:
--------------------------------------------------------------------------------
1 | description "Terminating Instance Monitor"
2 |
3 | start on filesystem and net-device-up IFACE={{ ansible_default_ipv4.interface }}
4 |
5 | stop on runlevel [016]
6 |
7 | respawn
8 | chdir /usr/local/bin
9 |
10 | exec /usr/local/bin/monitor-terminating-instance.sh
11 | respawn limit 10 90
--------------------------------------------------------------------------------
/ansible/roles/coturn/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: Aaron van Meerten
4 | description: configure coturn
5 | license: Apache License Version 2.0
6 | min_ansible_version: '6.6.0'
7 | platforms:
8 | - name: Ubuntu
9 | releases:
10 | - focal
11 | - jammy
12 | dependencies:
13 | - {role: jitsi-repo}
14 | - {role: monit}
15 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/files/reconfigure-jvb-oracle.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #first rebuild the configuration files
4 | CONFIGURE_ONLY=true ANSIBLE_TAGS="setup,jitsi-videobridge" /usr/local/bin/configure-jvb-local-oracle.sh
5 |
6 | echo "JVB configuration signaling"
7 | /usr/local/bin/configure-jvb-shards.sh
8 | RET=$?
9 | echo "JVB reconfiguration completed"
10 | exit $RET
--------------------------------------------------------------------------------
/ansible/roles/jibri-java/templates/monitor-terminating-instance-systemd.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Jibri Terminating Instance Monitor
3 | After=network.target
4 |
5 | [Service]
6 | WorkingDirectory={{ jibri_scripts_dir }}
7 | ExecStart={{ jibri_scripts_dir }}/monitor-terminating-instance.sh
8 | Restart=on-failure
9 | Type=simple
10 |
11 | [Install]
12 | WantedBy=multi-user.target
13 |
--------------------------------------------------------------------------------
/ansible/roles/consul-jigasi/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Consul service file
3 | ansible.builtin.template:
4 | mode: 0644
5 | src: "jigasi.json.j2"
6 | dest: "/etc/consul.d/jigasi.json"
7 |
8 | - name: Enable consul service # noqa ignore-errors
9 | ansible.builtin.systemd:
10 | name: consul
11 | state: started
12 | enabled: true
13 | ignore_errors: true
14 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-java/templates/graceful_shutdown_terminate_oracle.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | GRACEFUL_SHUTDOWN="/opt/jitsi/jibri/wait_graceful_shutdown.sh"
3 | TERMINATE_INSTANCE="{{ jibri_path_to_terminate_instance_script }}"
4 |
5 | # run the graceful shutdown and wait for it to finish
6 | sudo "$GRACEFUL_SHUTDOWN"
7 |
8 | # now terminate the instance
9 | sudo "$TERMINATE_INSTANCE"
10 |
11 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/files/reconfigure-jvb.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #first rebuild the configuration files
4 | CONFIGURE_ONLY=true ANSIBLE_TAGS="setup,jitsi-videobridge" /usr/local/bin/configure-jvb-local.sh
5 |
6 | echo "JVB configuration signaling"
7 | #now gracefully reload jibri
8 | /usr/local/bin/configure-jvb-shards.sh
9 | RET=$?
10 | echo "JVB reload completed"
11 | exit $RET
--------------------------------------------------------------------------------
/ansible/roles/signal-sidecar/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include install tasks
3 | ansible.builtin.include_tasks: install.yml
4 | when:
5 | - signal_sidecar_install_flag
6 | - signal_sidecar_enabled
7 |
8 | - name: Include configure tasks
9 | ansible.builtin.include_tasks: configure.yml
10 | when:
11 | - signal_sidecar_configure_flag
12 | - signal_sidecar_enabled
13 |
--------------------------------------------------------------------------------
/ansible/roles/consul-haproxy/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install consul service file
3 | ansible.builtin.template:
4 | mode: 0644
5 | src: "haproxy.json.j2"
6 | dest: "/etc/consul.d/haproxy.json"
7 |
8 | - name: Enable consul service # noqa ignore-errors
9 | ansible.builtin.systemd:
10 | name: consul
11 | state: started
12 | enabled: true
13 | ignore_errors: true
14 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-repo/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jitsi_repo_host: internal-repo.jitsi.net
3 | jitsi_repo_password: "default"
4 | jitsi_repo_url: "https://{{ jitsi_repo_host }}/debian"
5 | jitsi_auth_url: "https://{{ jitsi_repo_username }}:{{ jitsi_repo_password }}@{{ jitsi_repo_host }}/debian"
6 | jitsi_auth_url_old: "https://jitsi:j1ts1r3p0@{{ jitsi_repo_host }}/debian"
7 | jitsi_repo_username: "repo"
8 |
--------------------------------------------------------------------------------
/ansible/roles/rsyslog/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart rsyslog
3 | tags:
4 | - rsyslog
5 | - configuration
6 | ansible.builtin.service:
7 | name: "{{ rsyslog_service_name }}"
8 | state: restarted
9 |
10 | - name: Reload apparmor
11 | tags:
12 | - rsyslog
13 | - configuration
14 | ansible.builtin.service:
15 | name: apparmor
16 | state: reloaded
17 |
--------------------------------------------------------------------------------
/ansible/roles/consul-haproxy/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: consul_haproxy
4 | author: Aaron van Meerten
5 | description: configure consul for haproxy
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - {role: consul-agent}
15 |
--------------------------------------------------------------------------------
/ansible/jenkins-server.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Main
3 | hosts: all
4 | gather_facts: true
5 | become_user: root
6 | become: true
7 | vars_files:
8 | - secrets/ssh-users.yml
9 | - secrets/ssl-certificates.yml
10 |
11 | roles:
12 | - { role: "iptables-jenkins", tags: "iptables"}
13 | - { role: "jenkins", tags: "jenkins"}
14 | - { role: "jenkins-sshkey", tags: "jenkins-sshkey"}
15 |
--------------------------------------------------------------------------------
/ansible/roles/consul-standalone/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install consul service file
3 | ansible.builtin.template:
4 | mode: 0644
5 | src: "standalone.json.j2"
6 | dest: "/etc/consul.d/standalone.json"
7 |
8 | - name: Enable consul service # noqa ignore-errors
9 | ansible.builtin.systemd:
10 | name: consul
11 | state: started
12 | enabled: true
13 | ignore_errors: true
14 |
--------------------------------------------------------------------------------
/ansible/roles/hcv-haproxy-rsyslog/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Suppress recurring drain log message from agent
3 | ansible.builtin.lineinfile:
4 | dest: /etc/rsyslog.d/49-haproxy.conf
5 | insertafter: "^$$AddUnixListenSocket \/var\/lib/haproxy\/dev\/log$"
6 | line: ":msg, contains, \"remains in forced drain mode.\" stop"
7 | when: haproxy_reconfigure_rsyslog
8 | notify: Restart haproxy rsyslog
9 |
--------------------------------------------------------------------------------
/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | host_key_checking = False
3 | gathering = smart
4 | fact_caching = jsonfile
5 | fact_caching_connection = .facts
6 | fact_caching_timeout = 86400
7 | timeout = 90
8 | vault_password_file = .vault-password.txt
9 |
10 | [ssh_connection]
11 | control_path = %(directory)s/%%h-%%r
12 | ssh_args = -o ControlPersist=15m -F config/ssh.config -q
13 | scp_if_ssh = True
14 | pipelining = True
15 |
--------------------------------------------------------------------------------
/ansible/roles/consul-server/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: consul_server
4 | author: Aaron van Meerten
5 | description: install and configure consul server
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - { role: consul-install }
15 |
--------------------------------------------------------------------------------
/ansible/roles/coturn/templates/check-files-limits-oracle.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | open_files_limit=$(cat /etc/default/coturn|grep ulimit|cut -d' ' -f3)
4 | currently_opened_files_count=$(lsof -p $(cat /var/run/turnserver.pid)|wc -l)
5 |
6 | if [[ $currently_opened_files_count -ge $open_files_limit ]]; then
7 | # we do not set alarms for oracle, just return error
8 | exit 1
9 | else
10 | exit 0
11 | fi
--------------------------------------------------------------------------------
/ansible/roles/haproxy-tenant-pin/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include install tasks
3 | ansible.builtin.include_tasks: install.yml
4 | when:
5 | - haproxy_tenant_pin_install_flag
6 | - haproxy_tenant_pin_enabled
7 |
8 | - name: Include configure tasks
9 | ansible.builtin.include_tasks: configure.yml
10 | when:
11 | - haproxy_tenant_pin_configure_flag
12 | - haproxy_tenant_pin_enabled
13 |
--------------------------------------------------------------------------------
/ansible/roles/haproxy-tenant-pin/tasks/configure.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Generate config file
3 | ansible.builtin.template:
4 | mode: 0644
5 | src: config.j2
6 | dest: '/etc/{{ haproxy_tenant_pin_service_name }}.conf'
7 | notify:
8 | - Restart tenant-pin
9 |
10 | - name: Start tenant-pin service
11 | ansible.builtin.service:
12 | name: tenant-pin
13 | state: started
14 | enabled: true
15 |
--------------------------------------------------------------------------------
/ansible/roles/consul-haproxy-jigasi/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: consul_haproxy_jigasi
4 | author: Aaron van Meerten
5 | description: configure consul on jigasi haproxy
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - {role: consul-agent}
15 |
--------------------------------------------------------------------------------
/ansible/roles/unattended-upgrades/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | unattended_upgrades_auto_reboot: false
3 | unattended_upgrades_auto_remove: false
4 | unattended_upgrades_email_address: root@localhost
5 | unattended_upgrades_origins:
6 | - '${distro_id} ${distro_codename}-security'
7 | unattended_upgrades_send_email: false
8 | unattended_upgrades_update_package_lists_interval: 1
9 | unattended_upgrades_upgrade_interval: 1
10 |
--------------------------------------------------------------------------------
/ansible/roles/consul-jigasi/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: consul_jigasi
4 | author: Aaron van Meerten
5 | description: configure consul for jigasi
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - {role: consul-agent, consul_install_flag: false}
15 |
--------------------------------------------------------------------------------
/ansible/roles/consul-selenium-grid-hub/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install consul service file
3 | ansible.builtin.template:
4 | mode: 0644
5 | src: "selenium-grid-hub.json.j2"
6 | dest: "/etc/consul.d/selenium-grid-hub.json"
7 |
8 | - name: Enable consul service # noqa ignore-errors
9 | ansible.builtin.systemd:
10 | name: consul
11 | state: started
12 | enabled: true
13 | ignore_errors: true
14 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-dumper/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jitsi_dump_cloud_provider: "{{ cloud_provider | default('aws') }}"
3 | jitsi_dump_jibri_sns_topic: arn:aws:sns:us-west-2:103425057857:Jibri-Dumps
4 | jitsi_dump_s3_bucket: jitsi-infra-dumps
5 | jitsi_dump_sns_region: us-west-2
6 | jitsi_dump_sns_topic: arn:aws:sns:us-west-2:103425057857:JVB-Dumps
7 | # disable dump jvb script by default
8 | jitsi_dumper_prosody_dump_jvb_mode: 0644
9 |
--------------------------------------------------------------------------------
/ansible/roles/coturn/templates/check-files-limits.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | open_files_limit=$(cat /etc/default/coturn|grep ulimit|cut -d' ' -f3)
4 | currently_opened_files_count=$(lsof -p $(cat /var/run/turnserver.pid)|wc -l)
5 |
6 | if [[ $currently_opened_files_count -ge $open_files_limit ]]; then
7 | {{coturn_scripts_path}}/coturn-set-alarms.sh alarm CoturnOpenedFilesLimitFailed
8 | exit 1
9 | else
10 | exit 0
11 | fi
--------------------------------------------------------------------------------
/ansible/roles/prosody/templates/jvb_muc_presence_filter.pfw.j2:
--------------------------------------------------------------------------------
1 | # Drop all presence from a jvb in a MUC to a jvb
2 | FROM: {{ prosody_jvb_brewery_muc }}
3 | TO: {{ prosody_jvb_auth_user }}@{{ prosody_jvb_auth_domain_name }}
4 | KIND: presence
5 | # Seems safer to allow all "unavailable" to pass
6 | TYPE: available
7 | # Allow self-presence (code=110)
8 | NOT INSPECT: {http://jabber.org/protocol/muc#user}x/status@code=110
9 | DROP.
10 |
--------------------------------------------------------------------------------
/ansible/jvb-colibri-proxy-nginx.yaml:
--------------------------------------------------------------------------------
1 | - name: Main
2 | hosts: all
3 | gather_facts: true
4 | become_user: root
5 | become: true
6 | force_handlers: true
7 | gather_timeout: 180
8 | vars_files:
9 | - config/vars.yml
10 | - sites/{{ hcv_environment }}/vars.yml
11 | vars:
12 | cloud_provider: oracle
13 | shard_role: haproxy
14 | roles:
15 | - { role: "jvb-colibri-proxy", "tags": "jvb-colibri-proxy"}
16 |
--------------------------------------------------------------------------------
/ansible/roles/consul-agent/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: consul_agent
4 | author: Aaron van Meerten
5 | description: install and configure consul agent
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - { role: consul-install, when: consul_install_flag }
15 |
--------------------------------------------------------------------------------
/ansible/roles/consul-signal/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: consul_server_signal
4 | author: Aaron van Meerten
5 | description: install and configure consul server for signal ndoes
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - { role: consul-agent }
15 |
--------------------------------------------------------------------------------
/ansible/roles/consul-standalone/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: consul_standalone
4 | author: Aaron van Meerten
5 | description: install and configure consul server for standalone nodes
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - {role: consul-agent}
15 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-pjsua/templates/jibri-icewm2.systemd.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Jibri Window Manager
3 | Requires=jibri-xorg2.service
4 | After=jibri-xorg2.service
5 |
6 | [Service]
7 | User={{ jibri_username }}
8 | Group={{ jibri_groupname }}
9 | Environment=DISPLAY=:1
10 | ExecStart=/usr/bin/icewm-session
11 | Restart=on-failure
12 | RestartPreventExitStatus=255
13 | Type=simple
14 |
15 | [Install]
16 | WantedBy=jibri.service
17 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi-web/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: jigasi_web
4 | author: Aaron van Meerten
5 | description: configure web server for jigasi user output
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - { role: nginx, when: jigasi_web_install_flag }
15 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: Aaron van Meerten
4 | description: configure jigasi
5 | license: Apache License Version 2.0
6 | min_ansible_version: '6.6.0'
7 | platforms:
8 | - name: Ubuntu
9 | releases:
10 | - focal
11 | - jammy
12 | dependencies:
13 | - { role: jitsi-repo, when: jigasi_install_flag }
14 | - { role: openjdk-java, when: jigasi_install_flag }
15 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-torture/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: jitsi_torture
4 | author: Aaron van Meerten
5 | description: install and run jitsi torture
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - { role: google-chrome }
15 | - { role: chromedriver }
16 |
--------------------------------------------------------------------------------
/ansible/roles/ntp/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: "René Moser"
4 | license: BSD
5 | description: NTP role
6 | min_ansible_version: '1.4'
7 | platforms:
8 | - name: Ubuntu
9 | versions:
10 | - precise
11 | - quantal
12 | - raring
13 | - saucy
14 | - name: Debian
15 | versions:
16 | - wheezy
17 | galaxy_tags:
18 | - system
19 | dependencies: []
20 |
--------------------------------------------------------------------------------
/ansible/roles/selenium-grid/templates/selenium-grid-hub.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Selenium Grid
3 | After=network.target
4 |
5 | [Service]
6 | User={{ selenium_grid_username }}
7 | Group={{ selenium_grid_groupname }}
8 | ExecStart={{ selenium_grid_java_path }} -jar {{ selenium_grid_file }} -role hub -hubConfig {{ selenium_grid_hub_config_file }}
9 | Restart=on-failure
10 | RestartPreventExitStatus=255
11 | Type=simple
12 |
13 |
--------------------------------------------------------------------------------
/ansible/roles/consul-selenium-grid-hub/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: consul_selenium_grid_hub
4 | author: Aaron van Meerten
5 | description: install and configure consul for selenium grid
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - { role: consul-agent }
15 |
--------------------------------------------------------------------------------
/ansible/roles/nodejs/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: Mark Wolfe
4 | description: Installs the NodeSource Node.js binary packages
5 | company: NodeSource
6 | license: MIT
7 | min_ansible_version: 1.2
8 | platforms:
9 | - name: Ubuntu
10 | versions:
11 | - precise
12 | - trusty
13 | categories:
14 | - development
15 | - networking
16 | - packaging
17 | - web
18 | dependencies: []
19 |
--------------------------------------------------------------------------------
/ansible/roles/selenium-grid/templates/selenium-grid-extras-hub.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Selenium Grid
3 | After=network.target
4 |
5 | [Service]
6 | User={{ selenium_grid_username }}
7 | Group={{ selenium_grid_groupname }}
8 | ExecStart={{ selenium_grid_java_path }} -jar {{ selenium_grid_extras_file }}
9 | Restart=on-failure
10 | RestartPreventExitStatus=255
11 | Type=simple
12 | WorkingDirectory={{ selenium_grid_extras_path }}
13 |
--------------------------------------------------------------------------------
/ansible/roles/selenium-grid/templates/selenium-grid-hub.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "port": 4444,
3 | "newSessionWaitTimeout": -1,
4 | "servlets" : [],
5 | "withoutServlets": [],
6 | "custom": {},
7 | "capabilityMatcher": "org.openqa.grid.internal.utils.DefaultCapabilityMatcher",
8 | "throwOnCapabilityNotPresent": true,
9 | "cleanUpCycle": 5000,
10 | "role": "hub",
11 | "debug": false,
12 | "browserTimeout": 60,
13 | "timeout": 60
14 | }
--------------------------------------------------------------------------------
/ansible/roles/jigasi-haproxy-agent/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: jigasi_haproxy_agent
4 | author: Aaron van Meerten
5 | description: configure haproxy agent for jigasi
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - { role: nodejs, when: jigasi_haproxy_agent_install_flag }
15 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi-rtcstats-push/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: jigasi_rtcstats_push
4 | author: Aaron van Meerten
5 | description: configure rtcstats push for jigasi
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - { role: nodejs, when: jigasi_rtcstats_push_install_flag }
15 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi/files/monitor-terminating-instance.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Jigasi Terminating Instance Monitor
3 | After=network.target
4 |
5 | [Service]
6 | WorkingDirectory=/usr/local/bin
7 | ExecStart=/usr/local/bin/monitor-terminating-instance.sh
8 | Restart=on-failure
9 | Type=simple
10 | StandardOutput=syslog
11 | StandardError=syslog
12 | SyslogIdentifier=monitor-terminating-instance
13 |
14 | [Install]
15 | WantedBy=multi-user.target
16 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/files/configure-jvb-oracle.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 |
4 | import json
5 |
6 | # Oracle does not use multiple shards
7 | # Create dummy shard details for now
8 | def main():
9 | local_shard = 'standalone'
10 | facts = {
11 | 'shard': local_shard
12 | }
13 | facts['shards'] = {local_shard: dict(facts)}
14 |
15 | print(json.dumps(facts))
16 |
17 | if __name__ == '__main__':
18 | main()
19 |
--------------------------------------------------------------------------------
/ansible/roles/common/files/download.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | if [ $# -ne 1 ]; then
3 | echo "Usage: download.sh file ..."
4 | exit 1
5 | fi
6 | for fn in "$@"
7 | do
8 | if [ -r "$fn" ] ; then
9 | printf '\033]1337;File=name='`echo -n "$fn" | base64`";"
10 | wc -c "$fn" | awk '{printf "size=%d",$1}'
11 | printf ":"
12 | base64 < "$fn"
13 | printf '\a'
14 | else
15 | echo File $fn does not exist or is not readable.
16 | fi
17 | done
18 |
--------------------------------------------------------------------------------
/ansible/roles/hcv-haproxy-configure/templates/haproxy_default.j2:
--------------------------------------------------------------------------------
1 | # Defaults file for HAProxy
2 | #
3 | # This is sourced by both, the initscript and the systemd unit file, so do not
4 | # treat it as a shell script fragment.
5 |
6 | # Change the config file location if needed
7 | #CONFIG="/etc/haproxy/haproxy.cfg"
8 |
9 | # Add extra flags here, see haproxy(1) for a few options
10 | #EXTRAOPTS="-de -m 16"
11 | EXTRAOPTS="-L {{ ansible_hostname.split('.')[0] }}"
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge-ddns/templates/cleanup_route53_dns.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash -v
2 | set -x
3 | #make sure we exit early if we fail any step
4 | set -e
5 |
6 | # clean up the Route53 DNS
7 | cd /usr/share/jitsi-ddns-lambda
8 | node index.js update_by_info --action remove --instance_name {{ jvb_ddns_hostname }} --zone_id {{ jvb_ddns_zone }} --ipv4_addr "{{ jvb_ddns_ipv4_addr }}" {% if ipv6_addr %} --ipv6_addr "{{ ipv6_addr }}"{% endif %} || true
9 | cd -
10 |
--------------------------------------------------------------------------------
/ansible/roles/nodejs/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | nodejs_architecture: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
3 | nodejs_from_apt: false
4 | # Pin-Priority of NodeSource repository
5 | nodejs_nodesource_pin_priority: 500
6 | nodejs_url: "https://{{ jitsi_repo_username }}:{{ jitsi_repo_password }}@{{ jitsi_repo_host }}/debian/misc/nodejs_12.22.9-deb-1nodesource1_{{ nodejs_architecture }}.deb"
7 | # 0.10 or 0.12 or 4.x
8 | nodejs_version: "12.*"
9 |
--------------------------------------------------------------------------------
/ansible/roles/selenium-grid/templates/hub_4444.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "port": 4444,
3 | "newSessionWaitTimeout": -1,
4 | "servlets": [
5 | "com.groupon.seleniumgridextras.grid.servlets.ProxyStatusJsonServlet"
6 | ],
7 | "capabilityMatcher": "org.openqa.grid.internal.utils.DefaultCapabilityMatcher",
8 | "throwOnCapabilityNotPresent": true,
9 | "nodePolling": 5000,
10 | "cleanUpCycle": 5000,
11 | "browserTimeout": 60,
12 | "timeout": 60
13 | }
14 |
--------------------------------------------------------------------------------
/ansible/templates/torture_wrapper.j2.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | export TORTURE_EXCLUDE_TESTS="{{ jitsi_torture_exclude_tests }}"
3 |
4 | cd {{ jitsi_torture_path }}
5 |
6 | {% if torture_longtest_only == 'long' %}
7 | ./test-runner-long.sh {{ jitsi_torture_domain }} {{ torture_longtest_duration }}
8 | {% elif torture_longtest_only == 'all' %}
9 | ./test-runner-all.sh {{ jitsi_torture_domain }}
10 | {% else %}
11 | ./test-runner.sh {{ jitsi_torture_domain }}
12 | {% endif %}
--------------------------------------------------------------------------------
/ansible/roles/jicofo/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: jicofo
4 | author: Aaron van Meerten
5 | description: install and configure jicofo
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - { role: jitsi-repo, when: jicofo_install_flag }
15 | - { role: openjdk-java, when: jicofo_install_flag }
16 |
--------------------------------------------------------------------------------
/ansible/roles/jiconop/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: jiconop
4 | author: Aaron van Meerten
5 | description: install and configure jiconop
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - { role: nodejs, when: jiconop_install_flag }
15 | - { role: jitsi-repo, when: jiconop_install_flag }
16 |
--------------------------------------------------------------------------------
/ansible/roles/jvb-rtcstats-push/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: jvb_rtcstats_push
4 | author: Aaron van Meerten
5 | description: install and configure rtc stats pusher for videobridge
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - { role: nodejs, when: jvb_rtcstats_push_install_flag }
15 |
--------------------------------------------------------------------------------
/ansible/roles/jvb-ssl/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jvb_ssl_certificate: "{{ jitsi_net_ssl_certificate }}{{ jitsi_net_ssl_extras }}"
3 | jvb_ssl_dest_dir: /etc/nginx/ssl
4 | jvb_ssl_domain_name: "{{ jvb_ssl_server_prefix }}.{{ jvb_ssl_domain_suffix }}"
5 | jvb_ssl_domain_suffix: "jitsi.net"
6 | jvb_ssl_install_flag: true
7 | jvb_ssl_key_name: "{{ jitsi_net_ssl_key_name }}"
8 | jvb_ssl_server_prefix: "{{ ansible_hostname.split('.')[0] }}"
9 | jvb_ssl_websockets_port: 9090
10 |
--------------------------------------------------------------------------------
/ansible/roles/chromedriver/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | chromedriver_latest_release_url: https://chromedriver.storage.googleapis.com/LATEST_RELEASE
3 | chromedriver_path: /usr/bin/chromedriver
4 | chromedriver_tmp_dir: /tmp/chromedriver_linux64
5 | chromedriver_tmp_path: /tmp/chromedriver_linux64.zip
6 | chromedriver_url: https://chromedriver.storage.googleapis.com/{{ chromedriver_version }}/chromedriver_linux64.zip
7 | chromedriver_use_latest: true
8 | chromedriver_version: 85.0.4183.87
9 |
--------------------------------------------------------------------------------
/ansible/roles/iptables-jenkins/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Open TCP port 443 via iptables
3 | ansible.builtin.iptables:
4 | chain: INPUT
5 | ctstate: NEW
6 | protocol: tcp
7 | destination_port: "443"
8 | jump: ACCEPT
9 | action: insert
10 | comment: Added via ansible post-launch configuration script
11 |
12 | - name: Save newly added iptable rules # noqa no-changed-when
13 | ansible.builtin.shell: iptables-save > /etc/iptables/rules.v4
14 |
--------------------------------------------------------------------------------
/ansible/roles/jenkins/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: jenkins
4 | author: Aaron van Meerten
5 | description: install and configure jenkins for jitsi
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - { role: nginx, when: jenkins_install_flag }
15 | - { role: docker, when: jenkins_install_flag }
16 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-pjsua/tasks/install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #v4l2loopback dependency
3 | # Install v4l2 loopback directly from the internet,
4 | # as apt v4l2loopback-dkms results in errors on Oracle Ubuntu version https://github.com/umlaeute/v4l2loopback/issues/247,
5 | # such as this error at modprobe: ERROR: could not insert 'v4l2loopback': Bad address
6 | - name: Install v4l2loopback package from the internet
7 | apt:
8 | deb: "{{ jibri_pjsua_v4l2_download_url }}"
9 |
10 |
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: fluentd_jitsi
4 | author: Aaron van Meerten
5 | description: configure fluentd for jitsi
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - { role: rsyslog, when: fluentd_install_flag }
15 | - { role: fluentd, when: fluentd_install_flag }
16 |
--------------------------------------------------------------------------------
/ansible/roles/haproxy-tenant-pin/templates/tenant-pin.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=haproxy tenant pin updater service
3 | After=multi-user.target
4 |
5 | [Service]
6 | EnvironmentFile=/etc/{{ haproxy_tenant_pin_service_name }}.conf
7 | ExecStart=/usr/bin/python /usr/local/bin/haproxy_tenant_sync.py
8 | Type=simple
9 | User=root
10 | Group=root
11 | Restart=always
12 | SyslogIdentifier={{ haproxy_tenant_pin_service_name }}
13 |
14 | [Install]
15 | WantedBy=multi-user.target
16 |
--------------------------------------------------------------------------------
/ansible/roles/rsyslog/tasks/deb_packages.yml:
--------------------------------------------------------------------------------
1 | # Install packages for Debian
2 | ---
3 | - name: Add rsyslog apt repo
4 | ansible.builtin.apt_repository:
5 | repo: "ppa:adiscon/v8-stable"
6 | state: present
7 | update_cache: true
8 | when: ansible_distribution == "Ubuntu"
9 |
10 | - name: Install packages for Debian
11 | tags:
12 | - rsyslog
13 | - packages
14 | ansible.builtin.apt:
15 | pkg: "{{ rsyslog_package_names }}"
16 | state: present
17 |
--------------------------------------------------------------------------------
/ansible/roles/autoscaler-sidecar/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: autoscaler_sidecar
4 | author: Aaron van Meerten
5 | description: install and configure jitsi autoscaler sidecar service
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - { role: nodejs, when: autoscaler_install_flag and jitsi_autoscaler_sidecar }
15 |
--------------------------------------------------------------------------------
/ansible/roles/google-cloud/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # broke for missing cloud-sdk-focal, so hardcoded to bionic for now
3 | # google_cloud_apt_release_name: "cloud-sdk-{{ ansible_distribution_release }}"
4 | google_cloud_apt_release_name: "cloud-sdk"
5 | google_cloud_conf_dir: /etc/google-cloud
6 | google_cloud_configure_flag: true
7 | google_cloud_install_flag: true
8 | google_cloud_service_key_json: "{{ google_cloud_service_key_json_default }}"
9 | google_cloud_service_key_json_default:
10 |
--------------------------------------------------------------------------------
/ansible/roles/jvb-colibri-proxy/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: jvb_colibri_proxy
4 | author: Aaron van Meerten
5 | description: install and configure nginx to proxy jvb colibri
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 | dependencies:
14 | - { role: nginx, nginx_start_service: false, when: jvb_colibri_proxy_install_flag}
15 |
--------------------------------------------------------------------------------
/ansible/roles/coturn/templates/coturn-mark-unhealthy-oracle.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | INSTANCE_METADATA=`curl -s http://169.254.169.254/opc/v1/instance/`
3 | INSTANCE_ID=$(echo $INSTANCE_METADATA | jq .id -r)
4 | OCI_BIN="/usr/local/bin/oci"
5 |
6 | #Metric
7 | {% if coturn_copy_dumps_to_s3 %}
8 | {{coturn_scripts_path}}/dump-coturn.sh copy_and_send
9 | $OCI_BIN compute instance terminate --debug --instance-id "$INSTANCE_ID" --preserve-boot-volume false --force --auth instance_principal
10 | {% endif %}
--------------------------------------------------------------------------------
/ansible/roles/consul-jigasi/templates/jigasi.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "service": {
3 | "name": "jigasi",
4 | "tags":["{{ hcv_environment }}"],
5 | "meta": {
6 | "environment":"{{ hcv_environment }}"
7 | },
8 | "port": 80,
9 | "checks": [
10 | {
11 | "name": "Jigasi REST Health",
12 | "http": "http://localhost:8788/about/health",
13 | "method": "GET",
14 | "interval": "10s",
15 | "timeout": "1s"
16 | }
17 | ]
18 | }
19 | }
--------------------------------------------------------------------------------
/ansible/roles/hcv-haproxy-status-lock/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Lock haproxy to freeze proxymonitor scan
3 | ansible.builtin.file:
4 | mode: 0644
5 | path: '{{ hcv_haproxy_status_lock_file }}'
6 | state: touch
7 | when: hcv_haproxy_status_lock_action == 'lock'
8 |
9 | - name: Unlock haproxy to allow proxymonitor scan
10 | ansible.builtin.file:
11 | path: '{{ hcv_haproxy_status_lock_file }}'
12 | state: absent
13 | when: hcv_haproxy_status_lock_action == 'unlock'
14 |
--------------------------------------------------------------------------------
/ansible/roles/rsyslog/templates/rsyslog-programrouting.conf.j2:
--------------------------------------------------------------------------------
1 | ## {{ ansible_managed }}
2 |
3 | $PrivDropToGroup adm
4 |
5 | {% for program in rsyslog_known_programs %}
6 | template(name="{{ program }}LocalLog" type="string" string="{{ rsyslog_local_log_directory }}%programname%.log")
7 | if $programname startswith '{{ program }}' then {
8 | action(type="omfile" DynaFile="{{ program }}LocalLog" file="{{ rsyslog_local_log_directory }}{{ program }}.log")
9 | stop
10 | }
11 |
12 | {% endfor %}
--------------------------------------------------------------------------------
/ansible/roles/selenium-grid/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependencies:
3 | - { role: openjdk-java, when: selenium_grid_install_flag }
4 | # not used if extras is used
5 | - { role: chromedriver, when: selenium_grid_install_flag }
6 | - { role: google-chrome, google_chrome_beta_flag: true }
7 | - { role: firefox, firefox_beta_flag: true, when: selenium_grid_install_flag }
8 | # not used if extras is used
9 | - { role: geckodriver, when: selenium_grid_install_flag }
10 | - { role: jitsi-torture-checkout }
11 |
--------------------------------------------------------------------------------
/ansible/roles/sshusers/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | security_additional_groups: []
3 | security_users: "{{ ssh_users_security }}"
4 | # How to manage users
5 | # ssh_users:
6 | # - username: vfedorov
7 | # groups: ["sshsudousers"]
8 | # real_name: Vadym Fedorov
9 | # state: absent
10 | # ssh_key:
11 | # - "ssh-rsa AAAA....Q== vfedorov@spider"
12 | ssh_users: "{{ ssh_users_jitsi }}"
13 | ssh_users_config_flag: true
14 | ssh_users_accounts_flag: true
15 | ssh_users_system_flag: true
16 |
--------------------------------------------------------------------------------
/ansible/roles/selenium-grid/templates/selenium-grid-extras-node.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Selenium Grid
3 | After=selenium-xvfb.service
4 | Requires=selenium-xvfb.service
5 |
6 | [Service]
7 | User={{ selenium_grid_username }}
8 | Group={{ selenium_grid_groupname }}
9 | Environment=DISPLAY=:99
10 | ExecStart={{ selenium_grid_java_path }} -jar {{ selenium_grid_extras_file }}
11 | Restart=on-failure
12 | RestartPreventExitStatus=255
13 | Type=simple
14 | WorkingDirectory={{ selenium_grid_extras_path }}
15 |
--------------------------------------------------------------------------------
/ansible/roles/signal-sidecar/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | role_name: signal_sidecar
4 | author: Aaron van Meerten
5 | description: install and configure signal sidecar
6 | license: Apache License Version 2.0
7 | min_ansible_version: '6.6.0'
8 | platforms:
9 | - name: Ubuntu
10 | releases:
11 | - focal
12 | - jammy
13 |
14 | dependencies:
15 | - { role: nodejs, when: signal_sidecar_install_flag }
16 | - { role: jitsi-repo, when: signal_sidecar_install_flag }
17 |
--------------------------------------------------------------------------------
/ansible/roles/wavefront/templates/10-wavefront.conf.j2:
--------------------------------------------------------------------------------
1 | # # Configuration for Wavefront proxy to send metrics to
2 | [[outputs.wavefront]]
3 | {% if wavefront_tcp_mode %}
4 | host = "{{ wavefront_proxy_address }}"
5 | port = {{ wavefront_proxy_port }}
6 | {% else %}
7 | url = "http://{{ wavefront_proxy_address }}:{{ wavefront_proxy_json_port }}"
8 | {% endif %}
9 | metric_separator = "."
10 | source_override = ["hostname", "snmp_host", "node_host"]
11 | convert_paths = true
12 | use_regex = false
13 |
--------------------------------------------------------------------------------
/ansible/roles/testrtc/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include install tasks
3 | ansible.builtin.include_tasks: install_testrtc.yml
4 | when: testrtc_install_flag
5 |
6 | - name: Check that testrtc folder exists
7 | ansible.builtin.stat:
8 | path: "{{ testrtc_base_path }}"
9 | register: testrtc_path
10 |
11 | - name: Include configure tasks
12 | ansible.builtin.include_tasks: configure_testrtc.yml
13 | when: testrtc_configure_flag and testrtc_path.stat.isdir is defined and testrtc_path.stat.isdir
14 |
--------------------------------------------------------------------------------
/ansible/clear-cloud-cache.yml:
--------------------------------------------------------------------------------
1 | - name: Main
2 | hosts: all
3 | gather_facts: false
4 | strategy: free
5 | become_user: root
6 | become: true
7 |
8 | tasks:
9 | - name: Find cache files
10 | ansible.builtin.find:
11 | paths: /tmp
12 | patterns: "*_cache-*"
13 | register: find_results
14 |
15 | - name: Delete cache files
16 | ansible.builtin.file:
17 | path: "{{ item['path'] }}"
18 | state: absent
19 | with_items: "{{ find_results['files'] }}"
20 |
--------------------------------------------------------------------------------
/ansible/roles/consul-server/templates/consul.hcl.j2:
--------------------------------------------------------------------------------
1 | datacenter = "{{ consul_datacenter }}"
2 | data_dir = "/opt/consul"
3 | encrypt = "{{ consul_encryption_key }}"
4 | retry_join = {{ consul_retry_join_lan }}
5 | retry_join_wan = {{ consul_retry_join_wan }}
6 | server = true
7 | bootstrap_expect = 3
8 | addresses {
9 | http = "0.0.0.0"
10 | }
11 | connect {
12 | enabled = true
13 | }
14 | telemetry {
15 | disable_hostname = true
16 | dogstatsd_addr = "localhost:8125"
17 | prometheus_retention_time = "5m"
18 | }
--------------------------------------------------------------------------------
/ansible/roles/prosody/files/prosody-log-filter.systemd:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Prosody Log Filter
3 | After=network.target
4 | Before=prosody.service
5 |
6 | [Service]
7 | User=root
8 | Group=prosody
9 | WorkingDirectory=/var/log/prosody
10 | ExecStart=/usr/local/bin/prosody-log-filter.sh
11 | Restart=always
12 | KillSignal=SIGQUIT
13 | Type=simple
14 | StandardOutput=syslog
15 | StandardError=syslog
16 | NotifyAccess=all
17 | SyslogIdentifier=prosody-log-filter
18 |
19 | [Install]
20 | WantedBy=multi-user.target prosody.service
--------------------------------------------------------------------------------
/ansible/roles/consul-install/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | consul_apt_key: https://apt.releases.hashicorp.com/gpg
3 | consul_apt_repo: https://apt.releases.hashicorp.com
4 | consul_architecture: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
5 | consul_download_url: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_{{ consul_architecture }}.zip"
6 | consul_log_dir: /var/log/
7 | consul_version: 1.7.2
8 | consul_zip_hash: 5ab689cad175c08a226a5c41d16392bc7dd30ceaaf90788411542a756773e698
9 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-java/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart jibri
3 | ansible.builtin.service:
4 | name: jibri
5 | state: restarted
6 | when: not jibri_configure_only_flag
7 |
8 | - name: Restart jibri systemd
9 | ansible.builtin.systemd:
10 | name: jibri
11 | state: restarted
12 | daemon_reload: true
13 | when: not jibri_configure_only_flag
14 |
15 | - name: Restart Monitor Terminating Instance
16 | ansible.builtin.service:
17 | name: monitor-terminating-instance
18 | state: restarted
19 |
--------------------------------------------------------------------------------
/ansible/roles/autoscaler-sidecar/files/terminate_instance_aws.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | CURL_BIN="/usr/bin/curl"
4 | AWS_BIN="/usr/local/bin/aws"
5 | JQ_BIN="/usr/bin/jq"
6 |
7 | if [ -z "$INSTANCE_ID" ]; then
8 | INSTANCE_ID=$(/usr/bin/ec2metadata --instance-id)
9 | fi
10 |
11 | EC2_REGION=$($CURL_BIN -s http://169.254.169.254/latest/dynamic/instance-identity/document | $JQ_BIN .region -r)
12 | export AWS_DEFAULT_REGION=$EC2_REGION
13 |
14 | # terminate our instance
15 | $AWS_BIN ec2 terminate-instances --instance-ids "$INSTANCE_ID"
16 |
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/templates/bootstrap.conf.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
3 |
4 | type tail
5 | path /var/log/bootstrap.log
6 | pos_file /var/spool/td-agent/bootstrap.pos
7 | tag bootstrap
8 |
9 | format multiline
10 | format_firstline /^(?[\w]+\s?[\w]+) (?\[.*:.*\])\s?\**\s/
11 | format1 /^(?[\w]+\s?[\w]+) (?\[[\w\-:].*\])\s?\**\s?(?[^ ]+) (?[^ ]+) => (?[\{|\(].*[\}|\)]+)$/
12 |
13 | # Date and time format
14 | time_format %d/%B/%Y:%H:%M:%S
15 |
16 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart jitsi-videobridge
2 | service: name={{ jvb_service_name }} state=restarted
3 | when: ((jvb_image_build_flag == false) and (jvb_configure_from_template_flag == true) and (jvb_reconfigure_on_changes_flag == true))
4 |
5 | - name: restart jitsi-videobridge systemd
6 | systemd: name={{ jvb_systemd_service_name }} state=restarted daemon_reload=yes
7 | when: ((jvb_image_build_flag == false) and (jvb_configure_from_template_flag == true) and (jvb_reconfigure_on_changes_flag == true))
--------------------------------------------------------------------------------
/ansible/roles/haproxy-jigasi/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Reload haproxy
3 | ansible.builtin.service:
4 | name: haproxy
5 | state: reloaded
6 |
7 | - name: Reload haproxy systemd
8 | ansible.builtin.systemd:
9 | name: haproxy
10 | state: reloaded
11 | daemon_reload: true
12 | when: ansible_service_mgr == "systemd"
13 |
14 | - name: Restart haproxy systemd
15 | ansible.builtin.systemd:
16 | name: haproxy
17 | state: restarted
18 | daemon_reload: true
19 | when: ansible_service_mgr == "systemd"
20 |
--------------------------------------------------------------------------------
/ansible/roles/consul-server/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install consul config file
3 | ansible.builtin.template:
4 | src: "consul.hcl.j2"
5 | dest: "/etc/consul.d/consul.hcl"
6 | mode: 0640
7 |
8 | - name: Install consul env file
9 | ansible.builtin.copy:
10 | content: ""
11 | dest: "/etc/consul.d/consul.env"
12 | mode: 0644
13 |
14 | - name: Install consul config script
15 | ansible.builtin.copy:
16 | src: "consul-server-config.sh"
17 | dest: "/usr/local/bin/consul-server-config.sh"
18 | mode: 0755
19 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge-auth/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jvb_auth_domain: "auth.{{ prosody_domain_name }}"
3 | jvb_auth_domain_path: "/var/lib/prosody/{{ jvb_auth_domain|regex_replace('\\.','%2e')|regex_replace('-','%2d') }}/accounts/{{ jvb_auth_user }}.dat"
4 | jvb_auth_password: "{{ jvb_xmpp_password | default('replaceme') }}"
5 | # prosody-jvb
6 | jvb_auth_prosody_jvb_domain_path: "/var/lib/prosody-jvb/{{ prosody_jvb_auth_domain_name|regex_replace('\\.','%2e')|regex_replace('-','%2d') }}/accounts/{{ jvb_auth_user }}.dat"
7 | jvb_auth_user: "jvb"
8 |
--------------------------------------------------------------------------------
/ansible/roles/common/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | common_cloud_provider: "{{ cloud_provider | default('aws') }}"
3 | common_install_pip3_flag: "{{ true
4 | if (ansible_distribution_release == 'jammy') or (ansible_distribution_release == 'focal')
5 | or (common_cloud_provider == 'oracle') else false }}"
6 | common_install_pip_flag: "{{
7 | false if (ansible_distribution_release == 'jammy') or (ansible_distribution_release == 'focal')
8 | or (common_cloud_provider == 'oracle') else true }}"
9 | gai_ipv6_resolve_disabled: true
10 | locale: en_US.UTF-8
11 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-torture-checkout/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jitsi_torture_checkout_git_branch: 'master'
3 | jitsi_torture_checkout_git_repo: https://github.com/jitsi/jitsi-meet-torture.git
4 | jitsi_torture_checkout_path: /usr/share/jitsi-meet-torture
5 | jitsi_torture_cloud_provider: "{{ cloud_provider | default('aws') }}"
6 | jitsi_torture_oracle_bucket_name: "jvb-images-{{ jitsi_torture_oracle_environment }}"
7 | jitsi_torture_oracle_environment: "{{ hcv_environment | default('all') }}"
8 | jitsi_torture_oracle_region: "{{ oracle_region | default('') }}"
9 |
--------------------------------------------------------------------------------
/ansible/roles/google-cloud/tasks/install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Google Cloud repo signing key
3 | ansible.builtin.apt_key:
4 | url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
5 | state: present
6 |
7 | - name: Google Cloud repo
8 | ansible.builtin.apt_repository:
9 | repo: "deb http://packages.cloud.google.com/apt {{ google_cloud_apt_release_name }} main"
10 | state: present
11 | update_cache: true
12 |
13 | - name: Install google cloud SDK
14 | ansible.builtin.apt:
15 | name: google-cloud-sdk
16 | state: present
17 |
--------------------------------------------------------------------------------
/ansible/roles/coturn/templates/coturn_set_alarms_systemd.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Check instance termination state
3 | Before=shutdown.target reboot.target halt.target
4 | Requires=network-online.target network.target
5 |
6 | [Service]
7 | ExecStart={{coturn_scripts_path}}/coturn-set-alarms.sh ok
8 | ExecStart={{coturn_scripts_path}}/coturn-set-alarms.sh ok CoturnOpenedFilesLimitFailed
9 | ExecStop={{coturn_scripts_path}}/coturn-set-alarms.sh alarm
10 | Type=oneshot
11 | RemainAfterExit=yes
12 |
13 | [Install]
14 | WantedBy=reboot.target shutdown.target halt.target
--------------------------------------------------------------------------------
/ansible/roles/jigasi-auth/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add jigasi XMPP control authentication
3 | ansible.builtin.command: prosodyctl register {{ jigasi_auth_user }} {{ jigasi_auth_domain }} {{ jigasi_auth_password }}
4 | args:
5 | creates: "{{ jigasi_auth_domain_path }}"
6 |
7 | - name: Add jigasi transcriber authentication
8 | ansible.builtin.command: prosodyctl register {{ jigasi_transcriber_auth_user }} {{ jigasi_transcriber_auth_domain }} {{ jigasi_transcriber_auth_password }}
9 | args:
10 | creates: "{{ jigasi_transcriber_auth_domain_path }}"
11 |
--------------------------------------------------------------------------------
/ansible/roles/tcpdump-jigasi/files/tcpdump-jigasi.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=tcpdump for jigasi
3 | After=network.target
4 |
5 | [Service]
6 | User=root
7 | Group=root
8 | WorkingDirectory=/var/lib/tcpdump-jigasi
9 | ExecStart=/usr/bin/tcpdump -ni ens3 -G 1800 -w /var/lib/tcpdump-jigasi/trace-%%Y-%%m-%%d_%%H.%%M.%%S.pcap port 5061
10 | Restart=on-failure
11 | KillSignal=SIGQUIT
12 | Type=simple
13 | StandardOutput=syslog
14 | StandardError=syslog
15 | NotifyAccess=all
16 | SyslogIdentifier=tcpdump-jigasi
17 |
18 | [Install]
19 | WantedBy=multi-user.target
20 |
21 |
--------------------------------------------------------------------------------
/ansible/roles/unattended-upgrades/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install unattended-upgrades
3 | apt: name=unattended-upgrades state=present
4 |
5 | - name: create unattended-upgrades configuration (20auto-upgrades)
6 | template: >
7 | src=20auto-upgrades.j2 dest=/etc/apt/apt.conf.d/20auto-upgrades
8 | owner=root group=root mode=0644
9 |
10 | - name: create unattended-upgrades configuration (20auto-upgrades)
11 | template: >
12 | src=50unattended-upgrades.j2 dest=/etc/apt/apt.conf.d/50unattended-upgrades
13 | owner=root group=root mode=0644
14 |
--------------------------------------------------------------------------------
/ansible/roles/autoscaler-sidecar/files/terminate_instance_oracle.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export OCI_BIN="/usr/local/bin/oci"
4 | export CURL_BIN="/usr/bin/curl"
5 | export JQ_BIN="/usr/bin/jq"
6 |
7 | if [ -z "$INSTANCE_ID" ]; then
8 | INSTANCE_ID=$($CURL_BIN -s http://169.254.169.254/opc/v1/instance/ | $JQ_BIN .id -r)
9 | fi
10 |
11 | # terminate our instance; we enable debug to have more details in case of oci cli failures
12 | $OCI_BIN compute instance terminate --debug --instance-id "$INSTANCE_ID" --preserve-boot-volume false --auth instance_principal --force
13 |
--------------------------------------------------------------------------------
/ansible/roles/consul-install/files/consul.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description="HashiCorp Consul - A service mesh solution"
3 | Documentation=https://www.consul.io/
4 | Requires=network-online.target
5 | After=network-online.target
6 | ConditionFileNotEmpty=/etc/consul.d/consul.hcl
7 |
8 | [Service]
9 | Type=notify
10 | User=consul
11 | Group=consul
12 | ExecStart=/usr/local/bin/consul agent -config-dir=/etc/consul.d/
13 | ExecReload=/usr/local/bin/consul reload
14 | KillMode=process
15 | Restart=on-failure
16 | LimitNOFILE=65536
17 |
18 | [Install]
19 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/ansible/roles/fail2ban/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install fail2ban
3 | ansible.builtin.apt:
4 | name: fail2ban
5 | install_recommends: true
6 | state: present
7 | notify: Restart fail2ban
8 |
9 | - name: Make fail2ban sshd aggressive
10 | ansible.builtin.lineinfile:
11 | path: /etc/fail2ban/jail.conf
12 | regexp: '^#mode = normal'
13 | line: 'mode = aggressive'
14 | notify: Restart fail2ban
15 |
16 | - name: Start fail2ban at boot
17 | ansible.builtin.service:
18 | name: fail2ban
19 | state: started
20 | enabled: true
21 |
--------------------------------------------------------------------------------
/ansible/roles/nginx/templates/47-nginx.conf.j2:
--------------------------------------------------------------------------------
1 | $ModLoad imfile
2 | $PrivDropToGroup adm
3 |
4 | # Nginx access file:
5 | $InputFileName /var/log/nginx/access.log
6 | $InputFileTag nginx-access:
7 | $InputFileStateFile stat-nginx-access
8 | $InputFileSeverity info
9 | $InputFilePersistStateInterval 20000
10 | $InputRunFileMonitor
11 |
12 | #Nginx Error file:
13 | $InputFileName /var/log/nginx/error.log
14 | $InputFileTag nginx-error:
15 | $InputFileStateFile stat-nginx-error
16 | $InputFileSeverity error
17 | $InputFilePersistStateInterval 20000
18 | $InputRunFileMonitor
19 |
--------------------------------------------------------------------------------
/ansible/roles/prosody/files/mod_websocket_auth_token.patch:
--------------------------------------------------------------------------------
1 | --- mod_websocket.lua Thu Jan 23 21:59:13 2020 +0000
2 | +++ mod_websocket.lua Fri Jan 24 16:21:30 2020 +0000
3 | @@ -305,6 +305,8 @@
4 | response.headers.sec_webSocket_accept = base64(sha1(request.headers.sec_websocket_key .. "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"));
5 | response.headers.sec_webSocket_protocol = "xmpp";
6 |
7 | + module:fire_event("websocket-session", { session = session, request = request });
8 | +
9 | session.log("debug", "Sending WebSocket handshake");
10 |
11 | return "";
12 |
13 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-java/files/jibri.rsyslogd.conf:
--------------------------------------------------------------------------------
1 | if $programname == 'launch_jibri.sh' then {
2 | /var/log/local/jibri.log
3 | ~
4 | }
5 |
6 | if $programname == 'monitor-terminating-instance.sh' then {
7 | /var/log/local/jibri-monitor.log
8 | ~
9 | }
10 |
11 | if $programname == 'monitor-reconfigure-instance.sh' then {
12 | /var/log/local/jibri-monitor.log
13 | ~
14 | }
15 |
16 | if $programname == 'icewm-session' then {
17 | /var/log/local/jibri-extras.log
18 | ~
19 | }
20 |
21 | if $programname == 'Xorg' then {
22 | /var/log/local/jibri-extras.log
23 | ~
24 | }
--------------------------------------------------------------------------------
/ansible/roles/consul-standalone/templates/standalone.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "service": {
3 | "name": "all",
4 | "tags":["{{ hcv_environment }}"],
5 | "meta": {
6 | "environment":"{{ hcv_environment }}",
7 | "domain":"{{ prosody_domain_name }}"
8 | },
9 | "tagged_addresses": {
10 | "lan": {
11 | "address": "{{ consul_standalone_private_ip }}",
12 | "port": 5280
13 | },
14 | "wan": {
15 | "address": "{{ consul_standalone_public_ip }}",
16 | "port": 5280
17 | }
18 | },
19 | "port": 5280
20 | }
21 | }
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/files/jigasi.conf:
--------------------------------------------------------------------------------
1 | # Jicofo
2 |
3 | @type tail
4 | path /var/log/jitsi/jigasi.log
5 | pos_file /var/spool/td-agent/jigasi.pos
6 |
7 | #java with possible multi-line
8 | format multiline
9 | format_firstline /\d{4}-\d{1,2}-\d{1,2}/
10 | format1 /^(?
18 |
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/templates/postinstall_ansible.conf.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
3 |
4 | type tail
5 | path /var/log/postinstall-ansible.log
6 | pos_file /var/spool/td-agent/postinstall-ansible.pos
7 | tag postinstall-ansible
8 |
9 | format multiline
10 | format_firstline /^(?[\w]+\s?[\w]+) (?\[.*:.*\])\s?\**\s/
11 | format1 /^(?[\w]+\s?[\w]+) (?\[.*:.*\])\s?\**\s?task path(?:.*:)(?\d+)\s?(?[\W|\s\S]*)$/
12 |
13 |
14 | # Date and time format
15 | time_format %d/%B/%Y:%H:%M:%S
16 |
17 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-pjsua/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jibri_pjsua_camera_framerate: 30
3 | jibri_pjsua_camera_resolution: "1280x720"
4 | jibri_pjsua_cameras:
5 | - desktop: ":1.0+0,0"
6 | device: "/dev/video0"
7 | number: 0
8 | - desktop: ":0.0+0,0"
9 | device: "/dev/video1"
10 | number: 1
11 | jibri_pjsua_configure_flag: true
12 | jibri_pjsua_dns_server: false
13 | jibri_pjsua_ffmpeg_path: "/usr/bin/ffmpeg"
14 | jibri_pjsua_install_flag: true
15 | jibri_pjsua_v4l2_download_url: http://deb.debian.org/debian/pool/main/v/v4l2loopback/v4l2loopback-dkms_0.12.5-1_all.deb
16 |
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/files/jicofo.conf:
--------------------------------------------------------------------------------
1 | # Jicofo
2 |
3 | @type tail
4 | path /var/log/jitsi/jicofo.log
5 | pos_file /var/spool/td-agent/jicofo.pos
6 |
7 | #java with possible multi-line
8 | format multiline
9 | format_firstline /Jicofo /
10 | format1 /^(?[^\s]+) (?
18 |
--------------------------------------------------------------------------------
/ansible/roles/signal-sidecar/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | signal_sidecar_configure_flag: true
3 | signal_sidecar_census_enabled: true
4 | signal_sidecar_deb_pkg_name: "signal-sidecar"
5 | signal_sidecar_deb_pkg_version: "*"
6 | signal_sidecar_domain_name: "{{ prosody_domain_name | default('localhost') }}"
7 | signal_sidecar_enabled: true
8 | signal_sidecar_install_flag: true
9 | signal_sidecar_log_dir: "/var/log/jitsi"
10 | signal_sidecar_max_participants: 5000
11 | signal_sidecar_service_name: "signal-sidecar"
12 | signal_sidecar_weight: false
13 | signal_sidecar_user_name: "signal-sidecar"
14 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-pjsua/templates/jibri-xorg2.systemd.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Jibri Xorg Process
3 | After=network.target
4 |
5 | [Service]
6 | User={{ jibri_username }}
7 | Group={{ jibri_groupname }}
8 | Environment=DISPLAY=:1
9 | ExecStart=/usr/bin/Xorg -nocursor -noreset +extension RANDR +extension RENDER -logfile /var/log/jitsi/jibri/xorg2.log -config /etc/jitsi/jibri/xorg-video-dummy.conf :1
10 | ExecReload=/bin/kill -HUP $MAINPID
11 | KillMode=process
12 | Restart=on-failure
13 | RestartPreventExitStatus=255
14 | Type=simple
15 |
16 | [Install]
17 | WantedBy=jibri-icewm2.service
18 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi/templates/terminate_instance_oracle.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -x
3 |
4 | export OCI_BIN="/usr/local/bin/oci"
5 |
6 | INSTANCE_METADATA=$(curl -s http://169.254.169.254/opc/v1/instance/)
7 | INSTANCE_ID=$(echo "$INSTANCE_METADATA" | jq .id -r)
8 |
9 | # stop consul gracefully
10 | service consul stop
11 |
12 | # now terminate our instance
13 | echo "Terminate the instance; we enable debug to have more details in case of oci cli failures"
14 | $OCI_BIN compute instance terminate --debug --instance-id "$INSTANCE_ID" --preserve-boot-volume false --auth instance_principal --force
15 |
--------------------------------------------------------------------------------
/ansible/roles/sip-jibri-sidecar/templates/sidecar.systemd.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Jitsi Sip Jibri Sidecar Service
3 | After=network.target
4 |
5 | [Service]
6 | User={{ sip_jibri_username }}
7 | Group={{ sip_jibri_groupname }}
8 | EnvironmentFile={{ sip_jibri_env_path }}
9 | WorkingDirectory={{ sip_jibri_base_path }}
10 | ExecStart=/usr/bin/npm start
11 | Restart=on-failure
12 | KillSignal=SIGQUIT
13 | Type=simple
14 | StandardOutput=syslog
15 | StandardError=syslog
16 | NotifyAccess=all
17 | SyslogIdentifier={{ sip_jibri_sidecar_service_name }}
18 |
19 | [Install]
20 | WantedBy=multi-user.target
21 |
22 |
--------------------------------------------------------------------------------
/ansible/roles/tcpdump-prosody-jvb/files/tcpdump-prosody-jvb.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=tcpdump for prosody-jvb
3 | After=network.target
4 |
5 | [Service]
6 | User=root
7 | Group=root
8 | WorkingDirectory=/var/lib/tcpdump-prosody-jvb
9 | ExecStart=/usr/bin/tcpdump -ni ens3 -G 1800 -w /var/lib/tcpdump-prosody-jvb/trace-%%Y-%%m-%%d_%%H.%%M.%%S.pcap port 6222 or icmp
10 | Restart=on-failure
11 | KillSignal=SIGQUIT
12 | Type=simple
13 | StandardOutput=syslog
14 | StandardError=syslog
15 | NotifyAccess=all
16 | SyslogIdentifier=tcpdump-prosody-jvb
17 |
18 | [Install]
19 | WantedBy=multi-user.target
20 |
21 |
--------------------------------------------------------------------------------
/ansible/roles/autoscaler-sidecar/templates/sidecar.systemd.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Jitsi Autoscaler Sidecar Service
3 | After=network.target
4 |
5 | [Service]
6 | User={{ autoscaler_username }}
7 | Group={{ autoscaler_groupname }}
8 | EnvironmentFile={{ autoscaler_env_path }}
9 | WorkingDirectory={{ autoscaler_base_path }}
10 | ExecStart=/usr/bin/npm start
11 | Restart=on-failure
12 | KillSignal=SIGQUIT
13 | Type=simple
14 | StandardOutput=syslog
15 | StandardError=syslog
16 | NotifyAccess=all
17 | SyslogIdentifier={{ autoscaler_sidecar_service_name }}
18 |
19 | [Install]
20 | WantedBy=multi-user.target
21 |
22 |
--------------------------------------------------------------------------------
/ansible/roles/geckodriver/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | geckodriver_latest_release_url: https://api.github.com/repos/mozilla/geckodriver/releases/latest
3 | geckodriver_password: "{{ infra_github_password | default('defaultpw') }}"
4 | geckodriver_path: /usr/bin/geckodriver
5 | geckodriver_tmp_dir: /tmp
6 | geckodriver_tmp_path: /tmp/geckodriver.tar.gz
7 | geckodriver_url: https://github.com/mozilla/geckodriver/releases/download/{{ geckodriver_version }}/geckodriver-{{ geckodriver_version }}-linux64.tar.gz
8 | geckodriver_username: "{{ infra_github_username | default('defaultuser') }}"
9 | geckodriver_version: v0.19.1
10 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge-auth/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: add jvb XMPP control authentication
3 | command: prosodyctl register {{ jvb_auth_user }} {{ jvb_auth_domain }} {{ jvb_auth_password }}
4 | args:
5 | creates: "{{ jvb_auth_domain_path }}"
6 |
7 | # prosody-jvb
8 | - name: add jvb XMPP control authentication for prosody-jvb
9 | command: prosodyctl --config /etc/prosody-jvb/prosody.cfg.lua register {{ jvb_auth_user }} {{ prosody_jvb_auth_domain_name }} {{ jvb_auth_password }}
10 | args:
11 | creates: "{{ jvb_auth_prosody_jvb_domain_path }}"
12 | when: prosody_jvb_configure_flag
13 |
14 |
--------------------------------------------------------------------------------
/ansible/roles/jvb-colibri-proxy/defaults/main.yml:
--------------------------------------------------------------------------------
1 | jvb_colibri_proxy_install_flag: true
2 | jvb_colibri_proxy_configure_flag: true
3 | # match only 10.52 and 10.53 by default (lonely CIDRs) overridden for each environment
4 | jvb_colibri_proxy_second_octet_regexp: '5[2-3]'
5 | # match 64-127
6 | jvb_colibri_proxy_third_octet_regexp: '6[4-9]|[7-9][0-9]|1[0-1][0-9]|12[0-7]'
7 | # match 0-255
8 | jvb_colibri_proxy_fourth_octet_regexp: '25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?'
9 | jvb_colibri_proxy_domain_name: "{{ environment_domain_name }}"
10 | jvb_colibri_proxy_nginx_port: "{{ jvb_colibri_proxy_port | default(8088) }}"
11 |
--------------------------------------------------------------------------------
/ansible/roles/selenium-grid/templates/selenium-grid-node.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Selenium Grid
3 | After=selenium-xvfb.service
4 | Requires=selenium-xvfb.service
5 |
6 | [Service]
7 | User={{ selenium_grid_username }}
8 | Group={{ selenium_grid_groupname }}
9 | Environment=DISPLAY=:99
10 | Environment=DBUS_SESSION_BUS_ADDRESS=/dev/null
11 | ExecStart={{ selenium_grid_java_path }} -jar {{ selenium_grid_file }} -role node -nodeConfig {{ selenium_grid_node_config_file }}
12 | Restart=on-failure
13 | RestartPreventExitStatus=255
14 | Type=simple
15 | WorkingDirectory={{ jitsi_torture_checkout_path }}
16 |
17 |
--------------------------------------------------------------------------------
/ansible/roles/jvb-rtcstats-push/templates/systemd.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=JVB RTC Stats Pusher
3 | After=network.target
4 |
5 | [Service]
6 | User={{ jvb_rtcstats_push_username }}
7 | Group={{ jvb_rtcstats_push_groupname }}
8 | EnvironmentFile={{ jvb_rtcstats_push_config_path }}
9 | WorkingDirectory={{ jvb_rtcstats_push_base_path }}
10 | ExecStart=/usr/bin/node app.js
11 | Restart=on-failure
12 | KillSignal=SIGQUIT
13 | Type=simple
14 | StandardOutput=syslog
15 | StandardError=syslog
16 | NotifyAccess=all
17 | SyslogIdentifier={{ jvb_rtcstats_push_service_name }}
18 |
19 | [Install]
20 | WantedBy=multi-user.target
21 |
--------------------------------------------------------------------------------
/ansible/roles/consul-signal/files/clear-shard-state-consul.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | SHARD_DATA_TYPE="$1"
4 | [ -z "$SHARD_DATA_TYPE" ] && SHARD_DATA_TYPE="shard-states"
5 |
6 | AWS_CACHE_BIN="/usr/local/bin/aws_cache.sh"
7 | ORACLE_CACHE_BIN="/usr/local/bin/oracle_cache.sh"
8 | if [ -e "$ORACLE_CACHE_BIN" ]; then
9 | . $ORACLE_CACHE_BIN
10 | else
11 | . $AWS_CACHE_BIN
12 | fi
13 | if [ "$DOMAIN" == "null" ]; then
14 | DOMAIN=$(hostname)
15 | fi
16 |
17 | if [ "$SHARD" == "null" ]; then
18 | SHARD="$DOMAIN"
19 | fi
20 | SHARD_KEY="$SHARD_DATA_TYPE/$ENVIRONMENT/$SHARD"
21 | consul kv delete "$SHARD_KEY"
22 | exit $?
--------------------------------------------------------------------------------
/ansible/roles/consul-signal/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install consul service file
3 | ansible.builtin.template:
4 | mode: 0644
5 | src: "signal.json.j2"
6 | dest: "/etc/consul.d/signal.json"
7 |
8 | - name: Install consul service enablement # noqa ignore-errors
9 | ansible.builtin.systemd:
10 | name: consul
11 | state: started
12 | enabled: true
13 | ignore_errors: true
14 |
15 | - name: Install consul signal state script
16 | ansible.builtin.copy:
17 | src: "set-shard-state-consul.sh"
18 | dest: "/usr/local/bin/set-shard-state-consul.sh"
19 | mode: 0755
20 | owner: root
21 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-java/files/graceful_shutdown_terminate.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | GRACEFUL_SHUTDOWN="/opt/jitsi/jibri/wait_graceful_shutdown.sh"
3 |
4 | AWS_BIN="/usr/local/bin/aws"
5 | CURL_BIN="/usr/bin/curl"
6 |
7 | EC2_INSTANCE_ID=$(/usr/bin/ec2metadata --instance-id)
8 | EC2_REGION=$($CURL_BIN -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq .region -r)
9 | export AWS_DEFAULT_REGION=$EC2_REGION
10 |
11 | # run the graceful shutdown and wait for it to finish
12 | sudo $GRACEFUL_SHUTDOWN
13 |
14 | # now terminate our instance
15 | $AWS_BIN ec2 terminate-instances --instance-ids "$EC2_INSTANCE_ID"
16 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi-haproxy-agent/tasks/configure.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Service configuration
3 | ansible.builtin.template:
4 | mode: 0644
5 | src: service.config.j2
6 | dest: "{{ jigasi_haproxy_agent_config_path }}"
7 |
8 | - name: Configure systemd script for service
9 | ansible.builtin.template:
10 | mode: 0644
11 | src: systemd.j2
12 | dest: "/lib/systemd/system/{{ jigasi_haproxy_agent_service_name }}.service"
13 |
14 | - name: Start service and set it enabled
15 | ansible.builtin.service:
16 | name: "{{ jigasi_haproxy_agent_service_name }}"
17 | state: started
18 | enabled: true
19 |
--------------------------------------------------------------------------------
/ansible/roles/signal-sidecar/tasks/install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install signal-sidecar package
3 | ansible.builtin.apt:
4 | name: "{{ signal_sidecar_deb_pkg_name }}={{ signal_sidecar_deb_pkg_version }}"
5 | state: present
6 | notify:
7 | - Restart signal-sidecar
8 |
9 | - name: Create application log directory
10 | ansible.builtin.file:
11 | mode: 0755
12 | path: "{{ signal_sidecar_log_dir }}"
13 | state: directory
14 |
15 | - name: Install rsyslog configuration
16 | ansible.builtin.template:
17 | mode: 0644
18 | src: rsyslog.config.j2
19 | dest: /etc/rsyslog.d/25-signal-sidecar.conf
20 |
--------------------------------------------------------------------------------
/ansible/roles/jenkins/files/jenkins.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Jenkins
3 |
4 | [Service]
5 | SyslogIdentifier=docker-jenkins
6 | ExecStartPre=-/usr/bin/docker create -m 0b -p 8080:8080 -p 50000:50000 --restart=always --name jenkins --add-host=host.docker.internal:host-gateway --privileged --env DOCKER_TLS_CERTDIR=/certs --volume jenkins-docker-certs:/certs/client --volume jenkins-data:/var/jenkins_home --volume /var/run/docker.sock:/var/run/docker.sock aaronkvanmeerten/ops-jenkins:latest
7 | ExecStart=/usr/bin/docker start -a jenkins
8 | ExecStop=-/usr/bin/docker stop --time=0 jenkins
9 |
10 | [Install]
11 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/ansible/roles/jigasi-haproxy-agent/templates/systemd.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Jigasi HAProxy Agent
3 | After=network.target
4 |
5 | [Service]
6 | User={{ jigasi_haproxy_agent_username }}
7 | Group={{ jigasi_haproxy_agent_groupname }}
8 | EnvironmentFile={{ jigasi_haproxy_agent_config_path }}
9 | WorkingDirectory={{ jigasi_haproxy_agent_base_path }}
10 | ExecStart=/usr/bin/npm start
11 | Restart=on-failure
12 | KillSignal=SIGQUIT
13 | Type=simple
14 | StandardOutput=syslog
15 | StandardError=syslog
16 | NotifyAccess=all
17 | SyslogIdentifier={{ jigasi_haproxy_agent_service_name }}
18 |
19 | [Install]
20 | WantedBy=multi-user.target
21 |
--------------------------------------------------------------------------------
/ansible/roles/nginx/tasks/rsyslog.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install syslog for nginx
3 | ansible.builtin.template:
4 | src: 47-nginx.conf.j2
5 | dest: /etc/rsyslog.d/47-nginx.conf
6 | mode: 0644
7 | register: syslog_template
8 | when: nginx_log_syslog_enabled
9 |
10 | - name: Remove syslog for nginx
11 | ansible.builtin.file:
12 | path: /etc/rsyslog.d/47-nginx.conf
13 | state: absent
14 | register: syslog_template
15 | when: not nginx_log_syslog_enabled
16 |
17 | - name: Restart syslog after nginx rules applied
18 | ansible.builtin.service: name=rsyslog state=restarted
19 | when: syslog_template.changed
20 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi-rtcstats-push/templates/systemd.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=JIGASI RTC Stats Pusher
3 | After=network.target
4 |
5 | [Service]
6 | User={{ jigasi_rtcstats_push_username }}
7 | Group={{ jigasi_rtcstats_push_groupname }}
8 | EnvironmentFile={{ jigasi_rtcstats_push_config_path }}
9 | WorkingDirectory={{ jigasi_rtcstats_push_base_path }}
10 | ExecStart=/usr/bin/node app.js
11 | Restart=on-failure
12 | KillSignal=SIGQUIT
13 | Type=simple
14 | StandardOutput=syslog
15 | StandardError=syslog
16 | NotifyAccess=all
17 | SyslogIdentifier={{ jigasi_rtcstats_push_service_name }}
18 |
19 | [Install]
20 | WantedBy=multi-user.target
21 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/templates/shards.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "shards":{{ jvb_shards|to_json }},
3 | "drain_mode":{{ jvb_initial_drain_mode|to_json }},
4 | "port": {{ jvb_xmpp_port }},
5 | {% if prosody_jvb_configure_flag %}
6 | "domain":"{{ jvb_prosody_jvb_auth_domain_name }}",
7 | "muc_jids":"jvbbrewery@{{ jvb_prosody_jvb_muc_name }}",
8 | {% else %}
9 | "domain":"{{ jvb_auth_domain }}",
10 | "muc_jids":"{{ jvb_brewery_muc }}",
11 | {% endif %}
12 | "username":"{{ jvb_auth_user }}",
13 | "password":"{{ jvb_auth_password }}",
14 | "muc_nickname":"{{ ansible_hostname }}",
15 | "iq_handler_mode":"{{ jvb_iq_handler_mode }}"
16 | }
--------------------------------------------------------------------------------
/ansible/roles/journald/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Vacuum any existing journal with journalctl # noqa no-changed-when
3 | ansible.builtin.command: journalctl --vacuum-size=200M
4 |
5 | - name: Config to limit journald size
6 | ansible.builtin.lineinfile:
7 | path: /etc/systemd/journald.conf
8 | regexp: '^#SystemMaxUse='
9 | line: 'SystemMaxUse=200M'
10 | notify: Restart journald
11 |
12 | - name: Config so journald will always leave free space
13 | ansible.builtin.lineinfile:
14 | path: /etc/systemd/journald.conf
15 | regexp: '^#SystemKeepFree'
16 | line: 'SystemKeepFree=500M'
17 | notify: Restart journald
18 |
--------------------------------------------------------------------------------
/ansible/roles/selenium-grid/templates/selenium-grid-node.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "capabilities": {{ selenium_grid_node_capabilities|to_json }},
3 | "proxy": "org.openqa.grid.selenium.proxy.DefaultRemoteProxy",
4 | "maxSession": {{ selenium_grid_node_max_session }},
5 | "port": 5555,
6 | "register": true,
7 | "registerCycle": 5000,
8 | "hub": "http://{{ selenium_grid_hub_host }}:4444",
9 | "nodeStatusCheckTimeout": 5000,
10 | "nodePolling": 5000,
11 | "role": "node",
12 | "unregisterIfStillDownAfter": 60000,
13 | "downPollingLimit": 2,
14 | "debug": false,
15 | "servlets" : [],
16 | "withoutServlets": [],
17 | "custom": {}
18 | }
--------------------------------------------------------------------------------
/ansible/roles/jibri-java/files/reconfigure-jibri-oracle.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | LOGPATH="/var/log/jitsi/reconfigure-jibri.log"
4 | #first rebuild the configuration files
5 | echo "Reconfiguring jibri, check logs in $LOGPATH"
6 | CONFIGURE_ONLY=true ANSIBLE_TAGS="setup,jibri" /usr/local/bin/configure-jibri-local.sh >> $LOGPATH 2>&1
7 | if [ $? -eq 0 ]; then
8 | echo "Jibri reconfiguration successful"
9 | echo "Running service jibri reload"
10 | #now gracefully reload jibri
11 | service jibri reload >> $LOGPATH
12 | echo "Jibri reload completed"
13 | exit 0
14 | else
15 | echo "Jibri reload failed, check logs in $LOGPATH"
16 | fi
--------------------------------------------------------------------------------
/ansible/roles/jvb-rtcstats-push/tasks/configure.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Service configuration
3 | ansible.builtin.template:
4 | mode: 0644
5 | src: service.config.j2
6 | dest: "{{ jvb_rtcstats_push_config_path }}"
7 |
8 | - name: Configure systemd script for service
9 | ansible.builtin.template:
10 | mode: 0644
11 | src: systemd.j2
12 | dest: "/lib/systemd/system/{{ jvb_rtcstats_push_service_name }}.service"
13 |
14 | - name: Start service and set it enabled
15 | ansible.builtin.service:
16 | name: "{{ jvb_rtcstats_push_service_name }}"
17 | state: started
18 | enabled: true
19 | when: jvb_rtcstats_enabled
20 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-java/files/reconfigure-jibri.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | LOGPATH="/var/log/jitsi/reconfigure-jibri.log"
4 | #first rebuild the configuration files
5 | echo "Reconfiguring jibri, check logs in $LOGPATH"
6 | CONFIGURE_ONLY=true ANSIBLE_TAGS="setup,jibri" /usr/local/bin/configure-jibri-local.sh >> $LOGPATH 2>&1
7 | if [ $? -eq 0 ]; then
8 | echo "Jibri reconfiguration successful"
9 | echo "Running service jibri reload"
10 | #now gracefully reload jibri
11 | service jibri reload >> $LOGPATH
12 | echo "Jibri reload completed"
13 | exit 0
14 | else
15 | echo "Jibri reload failed, check logs in $LOGPATH"
16 | exit 2
17 | fi
--------------------------------------------------------------------------------
/ansible/roles/selenium-grid/templates/node_5555.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "capabilities": {{ selenium_grid_node_capabilities|to_json }},
3 | "loadedFromFile": "node_5555.json",
4 | "proxy": "com.groupon.seleniumgridextras.grid.proxies.SetupTeardownProxy",
5 | "servlets": [],
6 | "maxSession": {{ selenium_grid_node_max_session }},
7 | "port": 5555,
8 | "register": true,
9 | "unregisterIfStillDownAfter": 60000,
10 | "hubPort": 4444,
11 | "hubHost": "{{ selenium_grid_hub_host }}",
12 | "registerCycle": 5000,
13 | "nodeStatusCheckTimeout": 5000,
14 | "custom": {},
15 | "downPollingLimit": 0,
16 | "browserTimeout": 60,
17 | "timeout": 60
18 | }
19 |
--------------------------------------------------------------------------------
/ansible/roles/consul-signal/templates/signal.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "service": {
3 | "name": "signal",
4 | "tags":["{{ hcv_environment }}"],
5 | "meta": {
6 | "environment":"{{ hcv_environment }}",
7 | "domain":"{{ prosody_domain_name }}",
8 | "release_number":"{{ jitsi_release_number }}",
9 | "shard":"{{ shard_name }}"
10 | },
11 | "tagged_addresses": {
12 | "lan": {
13 | "address": "{{ consul_signal_private_ip }}",
14 | "port": 5280
15 | },
16 | "wan": {
17 | "address": "{{ consul_signal_public_ip }}",
18 | "port": 5280
19 | }
20 | },
21 | "port": 5280
22 | }
23 | }
--------------------------------------------------------------------------------
/ansible/roles/jigasi-rtcstats-push/tasks/configure.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install service configuration
3 | ansible.builtin.template:
4 | mode: 0644
5 | src: service.config.j2
6 | dest: "{{ jigasi_rtcstats_push_config_path }}"
7 |
8 | - name: Configure systemd script for service
9 | ansible.builtin.template:
10 | mode: 0644
11 | src: systemd.j2
12 | dest: "/lib/systemd/system/{{ jigasi_rtcstats_push_service_name }}.service"
13 |
14 | - name: Start service and set it enabled
15 | ansible.builtin.service:
16 | name: "{{ jigasi_rtcstats_push_service_name }}"
17 | state: started
18 | enabled: true
19 | when: jigasi_rtcstats_enabled
20 |
--------------------------------------------------------------------------------
/ansible/roles/haproxy-configure/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Configure local haproxy # noqa ignore-errors no-changed-when
2 | ansible.builtin.shell: /usr/local/bin/configure-haproxy.sh > /tmp/configure-haproxy.log 2>&1
3 | ignore_errors: true
4 | register: configure_result
5 |
6 | - name: Fetch configure logs
7 | ansible.builtin.fetch:
8 | src: /tmp/configure-haproxy.log
9 | dest: "{{ haproxy_configure_log_dest }}/{{ ansible_hostname }}.log"
10 | flat: true
11 |
12 | - name: Fail on reconfigure failure
13 | ansible.builtin.fail:
14 | msg: "Reconfiguration step failed on {{ ansible_hostname }}, check logs for details"
15 | when: configure_result.rc != 0
16 |
--------------------------------------------------------------------------------
/ansible/roles/pjsua/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | pjsua_apt_dependencies:
3 | - build-essential
4 | - libv4l-dev
5 | - libsdl2-dev
6 | - libavcodec-dev
7 | - libavdevice-dev
8 | - libavfilter-dev
9 | - libavformat-dev
10 | - libavutil-dev
11 | - libswresample-dev
12 | - libswscale-dev
13 | - libasound2-dev
14 | - libopus-dev
15 | - libvpx-dev
16 | pjsua_repo_url: "git@github.com:jitsi/pjproject.git"
17 | pjsua_repo_version: "jibri-2.10-dev1"
18 | pjsua_src_path: "/usr/src/pjsua"
19 | pjsua_ssh_path: "/root/.ssh"
20 | pjsua_ssh_private_key: "{{ pjsua_deployment_certificate }}"
21 | pjsua_ssh_private_key_path: "{{ pjsua_ssh_path }}/id_rsa_pjsua_deployment"
22 |
--------------------------------------------------------------------------------
/ansible/roles/coturn/templates/coturn_set_alarms_service.j2:
--------------------------------------------------------------------------------
1 | #! /bin/sh
2 | ### BEGIN INIT INFO
3 | # Required-Start: $network
4 | # Required-Stop: $remote_fs $syslog
5 | # Default-Start: 2 3 4 5
6 | # Default-Stop: 0 1 6
7 | # Description: Check instance termination state
8 | #
9 | ### END INIT INFO
10 |
11 | case "$1" in
12 | start)
13 | {{coturn_scripts_path}}/coturn-set-alarms.sh ok
14 | {{coturn_scripts_path}}/coturn-set-alarms.sh ok CoturnOpenedFilesLimitFailed
15 | ;;
16 | stop)
17 | {{coturn_scripts_path}}/coturn-set-alarms.sh alarm
18 | sleep 10
19 | ;;
20 | *)
21 | echo "Usage: coturn-set-alarms {start|stop}" >&2
22 | exit 3
23 | ;;
24 | esac
--------------------------------------------------------------------------------
/ansible/roles/jibri-java/templates/environments.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | {% if jibri_enable_consul %}
3 | "datacenters":{{ jibri_datacenters|to_json }},
4 | "local_environment":"{{ hcv_environment }}",
5 | "consul_extra_urls":{{ jibri_consul_extra_urls|to_json }},
6 | "local_domain":"{{ jibri_local_domain }}",
7 | "consul_server":"{{ jibri_consul_server }}",
8 | {% endif %}
9 | "jid_username":"{{ jibri_xmpp_username }}",
10 | "brewery_muc_room":"{{ jibri_xmpp_room_name }}",
11 | "auth_prefix":"{{ jibri_jid_prefix }}",
12 | "internal_muc_prefix": "{{ jibri_brewery_prefix }}",
13 | "environments":{{ jibri_environments|to_json }},
14 | "regions":{{ jibri_regions|to_json }}
15 | }
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/templates/environments.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | {% if jvb_enable_consul %}
3 | "datacenters": {{ jvb_consul_datacenters | to_json }},
4 | "local_datacenters": {{ jvb_consul_local_datacenters | to_json }},
5 | "pool_mode":"{{ jvb_pool_mode }}",
6 | "environment": "{{ hcv_environment }}",
7 | "domain": "{{ jvb_domain_name }}",
8 | "shard": "{{ shard_name }}",
9 | "release_number": "{{ jitsi_release_number }}",
10 | "consul_server": "{{ jvb_consul_server }}",
11 | "consul_extra_urls": {{ jvb_consul_extra_urls | to_json }},
12 | {% endif %}
13 | "enable_cross_region": {{ jvb_enable_cross_region|to_json }},
14 | "regions":{{ jvb_regions|to_json }}
15 | }
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/files/haproxy-monitor.conf:
--------------------------------------------------------------------------------
1 | # HAProxy
2 |
3 | @type tail
4 | path /var/log/proxy_monitor/*.log
5 | pos_file /var/spool/td-agent/haproxy-monitor.pos
6 | format syslog
7 | tag proxymonitor
8 | # Regex fields
9 | # Messages look like: "DATE TIME - PROCESSNAME - LEVEl - ENVIRONMENT - MESSAGE"
10 | # EXAMPLE: "2016-11-01 16:26:46,455 - haproxy_status - INFO - hcv-meetjitsi - No directory found: /opt/jitsi/haproxy_status/hcv-meetjitsi"
11 |
12 | format /^(?
17 |
--------------------------------------------------------------------------------
/ansible/roles/hcv-haproxy-configure/templates/environment.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "environment":"{{ hcv_environment }}",
3 | "consul_extra_urls":{{ haproxy_consul_extra_urls|to_json }},
4 | "enable_cross_region":{{ haproxy_enable_cross_region|to_json }},
5 | "region":"{{ haproxy_region }}",
6 | "consul_enabled":{{ haproxy_consul_enabled|to_json }},
7 | "include_standalone":{{ haproxy_consul_include_standalone|to_json }},
8 | "private_ip":"{{ haproxy_private_ip }}"{% if haproxy_public_ip %},
9 | "public_ip":"{{ haproxy_public_ip }}"{% endif %},
10 | "aliased_regions":{{ aliased_regions|to_json }},
11 | "aws_to_oracle_region_map":{{ aws_to_oracle_region_map|to_json }}
12 | }
--------------------------------------------------------------------------------
/ansible/roles/firefox/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | firefox_beta_flag: false
3 | firefox_dir: /opt/mozilla
4 | firefox_latest_beta_dir: /opt/mozilla/firefox-beta
5 | firefox_latest_beta_path: /usr/bin/firefox-beta
6 | firefox_latest_beta_url: https://download.mozilla.org/?product=firefox-beta-latest&os=linux64&lang=en-US
7 | firefox_latest_stable_dir: /opt/mozilla/firefox-stable
8 | firefox_latest_stable_path: /usr/bin/firefox
9 | firefox_latest_stable_url: https://download.mozilla.org/?product=firefox-latest&os=linux64&lang=en-US
10 | # More info at: https://wiki.mozilla.org/Release_Management/Product_details
11 | firefox_latest_versions_info_url: https://product-details.mozilla.org/1.0/firefox_versions.json
12 |
--------------------------------------------------------------------------------
/ansible/roles/ntp/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: python
3 | python: "2.7"
4 | before_install:
5 | - sudo apt-get update -qq
6 | - sudo apt-get install -qq python-apt python-pycurl
7 | install:
8 | - pip install ansible
9 | - echo -e 'localhost ansible_connection=local' > tests/inventory
10 | - echo -e '[defaults]\nroles_path = ../\nhostfile = ./tests/inventory' > ansible.cfg
11 | script:
12 | - ansible-playbook --syntax-check tests/role.yml
13 | - ansible-playbook --sudo -v --diff tests/role.yml
14 | - >
15 | ansible-playbook --sudo tests/role.yml
16 | | grep -q 'changed=0.*failed=0'
17 | && (echo 'Idempotence test: pass' && exit 0)
18 | || (echo 'Idempotence test: fail' && exit 1)
19 |
--------------------------------------------------------------------------------
/ansible/roles/jibri-pjsua/templates/jibri-camera.systemd.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Jibri Camera {{ item.device }}
3 | Requires=jibri-xorg.service
4 | Requires=jibri-xorg2.service
5 | After=jibri-xorg.service
6 | After=jibri-xorg2.service
7 |
8 | [Service]
9 | User={{ jibri_username }}
10 | Group={{ jibri_groupname }}
11 | WorkingDirectory={{ jibri_homedir }}
12 | ExecStart={{ jibri_pjsua_ffmpeg_path }} -f x11grab -draw_mouse 0 -r {{ jibri_pjsua_camera_framerate }} -s {{ jibri_pjsua_camera_resolution }} -i {{ item.desktop }} -vcodec rawvideo -pix_fmt yuv420p -threads 0 -f v4l2 {{ item.device }}
13 | Restart=on-failure
14 | RestartPreventExitStatus=255
15 | Type=simple
16 |
17 | [Install]
18 | WantedBy=jibri.service
19 |
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/templates/clouds.conf.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
3 |
4 | type tail
5 | path /var/log/cloud-init.log
6 | pos_file /var/spool/td-agent/cloud-init.pos
7 | tag cloud-init
8 | format /^(?[^ ]*) (?[^ ]*) (?[^ ]*) (?[^ ]*) (?.*)$/
9 |
10 | # Date and time format
11 | time_format %d/%B/%Y:%H:%M:%S
12 |
13 |
14 |
15 | # Cloud-init-output
16 |
17 | type tail
18 | path /var/log/cloud-init-output.log
19 | pos_file /var/spool/td-agent/cloud-init-out.pos
20 | tag cloud-init-output
21 | format /^(?.*)?$/
22 |
23 | # Date and time format
24 | time_format %d/%B/%Y:%H:%M:%S
25 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-torture/files/generate-jwt.js:
--------------------------------------------------------------------------------
1 | const path = require("path");
2 |
3 | const private_key_file = process.argv[2];
4 | const tenant = process.argv[3];
5 |
6 | const headers = {
7 | algorithm: 'RS256',
8 | noTimestamp: true,
9 | expiresIn: '1h',
10 | keyid: path.basename(private_key_file)
11 | }
12 |
13 | const privateKey = require('fs').readFileSync(private_key_file);
14 |
15 | const payload = {
16 | "iss": "jitsi",
17 | "aud": "jitsi",
18 | "sub": tenant,
19 | "context": {
20 | "group": tenant
21 | },
22 | "room": "*"
23 | }
24 |
25 | const jwt = require('jsonwebtoken');
26 | const token = jwt.sign(payload, privateKey, headers);
27 | console.log(encodeURI(token))
28 |
--------------------------------------------------------------------------------
/ansible/roles/prosody/files/prosody-log-filter.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | [ -z "$PROSODY_FIFO" ] && PROSODY_FIFO="/var/log/prosody/prosody.fifo"
4 | [ -z "$LOG_DIR" ] && LOG_DIR="/var/log/prosody-filtered"
5 | [ -z "$LOG_OUTPUT" ] && LOG_OUTPUT="$LOG_DIR/prosody-filtered.log"
6 |
7 | EGREP_PATTERN="c2s|epoll|[0-9] conn|runner|changed state from"
8 | EXCLUDE_GREP_PATTERN="Missing listener 'ondrain'"
9 |
10 | if [ ! -d "$LOG_DIR" ]; then
11 | mkdir "$LOG_DIR"
12 | touch "$LOG_OUTPUT"
13 | fi
14 |
15 | if [ ! -e "$PROSODY_FIFO" ]; then
16 | mkfifo "$PROSODY_FIFO"
17 | chown prosody:prosody "$PROSODY_FIFO"
18 | fi
19 |
20 | cat "$PROSODY_FIFO" | egrep "$EGREP_PATTERN" | grep -v "$EXCLUDE_GREP_PATTERN" >> "$LOG_OUTPUT"
21 |
--------------------------------------------------------------------------------
/ansible/roles/coturn/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include install tasks
3 | ansible.builtin.include_tasks: install.yml
4 | when: coturn_install_flag
5 |
6 | - name: Include configure tasks
7 | ansible.builtin.include_tasks: configure.yml
8 | when: coturn_configure_flag
9 |
10 | - name: Include monit tasks
11 | ansible.builtin.include_tasks: monit.yml
12 | when: coturn_configure_flag and coturn_monit_flag
13 |
14 | - name: Include cloudwatch alarm tasks
15 | ansible.builtin.include_tasks: coturn_cloudwatch_alarms.yml
16 | when: coturn_configure_flag and coturn_cloudwatch_flag
17 |
18 | - name: Include ipv6 tasks
19 | ansible.builtin.include_tasks: coturn_ipv6_ddns.yml
20 | when: coturn_ipv6_ddns and ipv6_enabled
21 |
--------------------------------------------------------------------------------
/ansible/roles/nginx/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | nginx_apt_key_url: http://nginx.org/keys/nginx_signing.key
3 | nginx_apt_repo_url: http://nginx.org/packages/ubuntu/
4 | nginx_log_syslog_enabled: false
5 | nginx_log_syslog_hostname: localhost
6 | nginx_log_syslog_port: 514
7 | nginx_package: "nginx"
8 | nginx_start_service: true
9 | nginx_user: www-data
10 | nginx_version: "*"
11 | # each worker can handled 50000 connections
12 | nginx_worker_connections: 50000
13 | # run twice as many workers as virtual CPUs
14 | nginx_worker_processes: "{{ ansible_processor_vcpus * 2 }}"
15 | # the max files all workers combined can have open
16 | # must still be lower than the ulimit -n value for the www-data user
17 | nginx_worker_rlimit_nofile: 150000
18 |
--------------------------------------------------------------------------------
/ansible/roles/prosody/files/prosody-jvb-log-filter.systemd:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Prosody JVB Log Filter
3 | After=network.target
4 | Before=prosody-jvb.service
5 |
6 | [Service]
7 | User=root
8 | Group=prosody
9 | Environment=PROSODY_FIFO=/var/log/prosody-jvb/prosody.fifo
10 | Environment=LOG_DIR=/var/log/prosody-filtered
11 | Environment=LOG_OUTPUT=/var/log/prosody-filtered/prosody-jvb-filtered.log
12 | WorkingDirectory=/var/log/prosody-jvb
13 | ExecStart=/usr/local/bin/prosody-log-filter.sh
14 | Restart=always
15 | KillSignal=SIGQUIT
16 | Type=simple
17 | StandardOutput=syslog
18 | StandardError=syslog
19 | NotifyAccess=all
20 | SyslogIdentifier=prosody-log-jvb-filter
21 |
22 | [Install]
23 | WantedBy=multi-user.target prosody-jvb.service
--------------------------------------------------------------------------------
/ansible/roles/coturn/templates/coturn-mark-unhealthy.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | aws="/usr/local/bin/aws"
3 | instance_id=$(curl http://169.254.169.254/latest/meta-data/instance-id)
4 | region=$(curl http://169.254.169.254/latest/dynamic/instance-identity/document|grep region|awk -F\" '/region/ {print $4}')
5 | instance_name=$($aws ec2 describe-tags --region $region --filters Name=resource-id,Values=$instance_id Name=key,Values=Name --query Tags[].Value --output text)
6 |
7 | #Metric
8 | #{{coturn_scripts_path}}/coturn-set-alarms.sh
9 | {% if coturn_copy_dumps_to_s3 %}
10 | {{coturn_scripts_path}}/dump-coturn.sh copy_and_send
11 | $aws autoscaling set-instance-health --region $region --instance-id $instance_id --health-status Unhealthy
12 | {% endif %}
--------------------------------------------------------------------------------
/ansible/roles/jibri-pjsua/templates/pjsua.config.j2:
--------------------------------------------------------------------------------
1 | --capture-dev=11
2 | --playback-dev=14
3 | --video
4 | --vcapture-dev=1
5 | --no-color
6 | --log-level=5
7 | --app-log-level=5
8 | {% if jibri_pjsua_dns_server %}
9 | --nameserver {{ jibri_pjsua_dns_server }}
10 | {% endif %}
11 | --auto-update-nat 0
12 | --disable-stun
13 | --use-tls
14 | --dis-codec GSM
15 | --dis-codec H263
16 | --dis-codec iLBC
17 | --dis-codec G722
18 | --dis-codec speex
19 | --dis-codec pcmu
20 | --dis-codec pcma
21 | --dis-codec opus
22 | --add-codec pcmu
23 | --add-codec pcma
24 | --add-codec G722
25 | --add-codec opus
26 | --no-vad
27 | --ec-tail 0
28 | --quality 10
29 | --max-calls=1
30 | --auto-keyframe=30
31 | --no-stderr
32 | --log-file=/var/log/jitsi/jibri/pjsua.log
33 |
--------------------------------------------------------------------------------
/ansible/roles/prosody/tasks/install-from-url.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # - name: Check if prosody package is installed
3 | # command: dpkg -l {{ prosody_package_name }} | grep -q "ii {{ prosody_package_name }}"
4 | # register: prosody_check_deb
5 | # failed_when: prosody_check_deb.rc > 1
6 | # changed_when: prosody_check_deb.rc == 1
7 |
8 | - name: Download specific prosody nightly build
9 | get_url: url="{{ prosody_dpkg_url }}" dest="{{ prosody_dpkg_path }}"
10 | # when: prosody_check_deb.rc == 1
11 |
12 | - name: "Install prosody package from file"
13 | command: dpkg -i {{ prosody_dpkg_path }}
14 | # when: prosody_check_deb.rc == 1
15 |
16 | - name: mark hold on prosody package version
17 | command: apt-mark hold {{ prosody_package_name }}
18 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi-auth/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jigasi_auth_domain: "auth.{{ prosody_domain_name }}"
3 | jigasi_auth_domain_path: "/var/lib/prosody/{{ jigasi_auth_domain|regex_replace('\\.','%2e')|regex_replace('-','%2d') }}/accounts/{{ jigasi_auth_user }}.dat"
4 | jigasi_auth_password: "{{ jigasi_xmpp_password | default('replaceme') }}"
5 | jigasi_auth_user: "{{ jigasi_xmpp_jid_username }}"
6 | jigasi_transcriber_auth_domain: "recorder.{{ prosody_domain_name }}"
7 | jigasi_transcriber_auth_domain_path: "/var/lib/prosody/{{ jigasi_transcriber_auth_domain|regex_replace('\\.','%2e')|regex_replace('-','%2d') }}/accounts/{{ jigasi_transcriber_auth_user }}.dat"
8 | jigasi_transcriber_auth_password: "replaceme"
9 | jigasi_transcriber_auth_user: "transcriber"
10 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/tasks/upgrade.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: disable JVB health check script cron
3 | cron: user={{ jvb_health_check_cron_user }}
4 | state=absent
5 | name="JVB health check script"
6 |
7 | - name: mark unhold on jitsi-videobridge package version
8 | command: apt-mark unhold {{ jitsi_videobridge_deb_pkg_name }}
9 | ignore_errors: true
10 |
11 | - name: install jitsi-videobridge package
12 | apt: name="{{ jitsi_videobridge_deb_pkg_name }}={{ jitsi_videobridge_deb_pkg_version }}" state=present
13 | notify:
14 | - restart jitsi-videobridge systemd
15 | when: ansible_service_mgr == "systemd"
16 |
17 | - name: mark hold on JVB package version
18 | command: apt-mark hold {{ jitsi_videobridge_deb_pkg_name }}
19 |
--------------------------------------------------------------------------------
/ansible/roles/iptables-serf/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Open serf TCP port 8301 via iptables
3 | ansible.builtin.iptables:
4 | chain: INPUT
5 | ctstate: NEW
6 | protocol: tcp
7 | destination_port: "8301"
8 | jump: ACCEPT
9 | action: insert
10 | comment: Added via ansible post-launch configuration script
11 | - name: Open serf UDP port 8301 via iptables
12 | ansible.builtin.iptables:
13 | chain: INPUT
14 | ctstate: NEW
15 | protocol: udp
16 | destination_port: "8301"
17 | jump: ACCEPT
18 | action: insert
19 | comment: Added via ansible post-launch configuration script
20 | - name: Save newly added iptable rules # noqa no-changed-when
21 | ansible.builtin.shell: iptables-save > /etc/iptables/rules.v4
22 |
--------------------------------------------------------------------------------
/ansible/roles/google-cloud/tasks/configure.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Google cloud config directory
3 | ansible.builtin.file:
4 | mode: 0755
5 | path: "{{ google_cloud_conf_dir }}"
6 | state: "directory"
7 |
8 | - name: Confirm jitsi group exists
9 | ansible.builtin.group:
10 | name: jitsi
11 | state: present
12 |
13 | - name: Install google cloud key file
14 | ansible.builtin.copy:
15 | dest: "{{ google_cloud_conf_dir }}/google-cloud-s2t-key-file.json"
16 | content: "{{ google_cloud_service_key_json }}"
17 | mode: 0640
18 | group: jitsi
19 |
20 | - name: Activate google cloud service account
21 | ansible.builtin.command: gcloud auth activate-service-account --key-file={{ google_cloud_conf_dir }}/google-cloud-s2t-key-file.json
22 |
--------------------------------------------------------------------------------
/ansible/roles/iptables-coturn/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Open TCP port 443 via iptables
3 | ansible.builtin.iptables:
4 | chain: INPUT
5 | ctstate: NEW
6 | protocol: tcp
7 | destination_port: 443
8 | jump: ACCEPT
9 | action: insert
10 | rule_num: 6
11 | comment: Added via ansible post-launch configuration script
12 | - name: Open UDP port 443 via iptables
13 | ansible.builtin.iptables:
14 | chain: INPUT
15 | ctstate: NEW
16 | protocol: udp
17 | destination_port: 443
18 | jump: ACCEPT
19 | action: insert
20 | rule_num: 7
21 | comment: Added via ansible post-launch configuration script
22 | - name: Save newly added iptable rules
23 | ansible.builtin.shell: iptables-save > /etc/iptables/rules.v4
24 |
--------------------------------------------------------------------------------
/ansible/roles/openjdk-java/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | java_architecture: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
3 | java_alternatives_version: "{{ 'java-1.11.0-openjdk-' + java_architecture if java_use_11 else 'java-1.8.0-openjdk-' + java_architecture }}"
4 | java_from_apt: true
5 | java_install_flag: true
6 | java_package_prefix: openjdk-8
7 | java_package_suffix: "~{{ '18.04' if ansible_distribution_release == 'bionic' else '20.04' }}_{{ java_architecture }}.deb"
8 | java_package_version: "8u282-b08-0ubuntu1"
9 | java_url_base: "https://{ jitsi_repo_username }}:{{ jitsi_repo_password }}@{{ jitsi_repo_host }}/debian/misc/"
10 | java_url_packages:
11 | - jre-headless
12 | - jre
13 | - jdk-headless
14 | - jdk
15 | java_use_11: false
16 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi-web/templates/nginx.site.j2:
--------------------------------------------------------------------------------
1 | server {
2 |
3 | listen 127.0.0.1:80 default_server;
4 | listen [::]:80 default_server;
5 |
6 | listen *:80 default_server;
7 |
8 | resolver 8.8.4.4 8.8.8.8 valid=300s;
9 | resolver_timeout 10s;
10 |
11 | # set the root
12 | root /var/www/html;
13 |
14 |
15 | #Jigasi health check
16 | location = /about/health {
17 | proxy_pass http://localhost:8788/about/health;
18 | # do not cache anything from prebind
19 | add_header "Cache-Control" "no-cache, no-store";
20 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
21 | proxy_set_header Host localhost;
22 | add_header 'Access-Control-Allow-Origin' '*';
23 | }
24 |
25 | }
26 |
--------------------------------------------------------------------------------
/ansible/set-signal-state.yml:
--------------------------------------------------------------------------------
1 | - name: Main
2 | hosts: all
3 | become: true
4 | become_user: root
5 | gather_facts: false
6 | vars:
7 | shard_state: drain
8 | tasks:
9 | - name: Set signal shard state
10 | ansible.builtin.copy:
11 | mode: 0644
12 | content: "{{ shard_state }}"
13 | dest: "/etc/jitsi/shard-status"
14 |
15 | - name: Copy set shard state script
16 | ansible.builtin.copy:
17 | src: "roles/consul-signal/files/set-shard-state-consul.sh"
18 | dest: "/usr/local/bin/set-shard-state-consul.sh"
19 | mode: 0755
20 | owner: root
21 | - name: Set consul shard state # noqa no-changed-when
22 | ansible.builtin.command: /usr/local/bin/set-shard-state-consul.sh {{ shard_state }}
23 |
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/files/sip-jibri-selector.conf:
--------------------------------------------------------------------------------
1 | # server logs
2 |
3 | @type tail
4 | path /var/log/jitsi/sip-jibri-selector/server.log
5 | pos_file /var/spool/td-agent/sip-jibri-selector-server-log.pos
6 |
7 | #json input
8 | format json
9 |
10 | tag sip-jibri-selector.server
11 |
12 | time_key time
13 | keep_time_key true
14 | time_format %Y-%m-%dT%H:%M:%S.%LZ
15 |
16 |
17 | # worker logs
18 |
19 | @type tail
20 | path /var/log/jitsi/sip-jibri-selector/worker.log
21 | pos_file /var/spool/td-agent/sip-jibri-selector-worker-log.pos
22 |
23 | #json input
24 | format json
25 |
26 | tag sip-jibri-selector.worker
27 |
28 | time_key time
29 | keep_time_key true
30 | time_format %Y-%m-%dT%H:%M:%S.%LZ
31 |
32 |
--------------------------------------------------------------------------------
/ansible/roles/testrtc/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | npm_modules_path: "{{ testrtc_base_path }}/node_modules/.bin/"
3 | testrtc_base_path: "/opt/testrtc"
4 | testrtc_configure_flag: false
5 | testrtc_creds_dir: /etc/testrtc
6 | testrtc_creds_path: "{{ testrtc_creds_dir }}/turn_credentials"
7 | testrtc_domain_name: "test.{{ jitsi_meet_domain_name }}"
8 | testrtc_git_repository: "https://github.com/aaronkvanmeerten/testrtc.git"
9 | testrtc_install_flag: false
10 | testrtc_port: "8002"
11 | testrtc_ssl_certificate: "{{ jitsi_net_ssl_certificate }}{{ jitsi_net_ssl_extras }}"
12 | testrtc_ssl_dest_dir: /etc/nginx/ssl
13 | testrtc_ssl_key_name: "{{ jitsi_net_ssl_key_name }}"
14 | testrtc_turn_hostname: all-turnrelay.jitsi.net
15 | testrtc_turn_port: 443
16 | testrtc_web_root_path: /var/www/testrtc
17 |
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/files/nginx.conf:
--------------------------------------------------------------------------------
1 | # Access
2 |
3 | @type tail
4 | path /var/log/nginx/access.log
5 | pos_file /var/spool/td-agent/nginx_access.pos
6 | format syslog
7 | tag nginx.access
8 | # Regex fields
9 | format /^(?[^ ]*) (?[^ ]*) (?[^ ]*) \[(?
13 |
14 | # Errors
15 |
16 | @type tail
17 | path /var/log/nginx/error.log
18 | pos_file /var/spool/td-agent/nginx_error.pos
19 | format syslog
20 | tag nginx.error
21 | # Regex fields
22 | format /^(?.*)$/
23 |
24 |
--------------------------------------------------------------------------------
/ansible/roles/google-chrome/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: skip chrome repo cron configuration
3 | file: path=/etc/default/google-chrome state="touch"
4 |
5 | - name: skip chrome beta repo cron configuration
6 | file: path=/etc/default/google-chrome-beta state="touch"
7 |
8 | - name: Chrome repo signing key
9 | apt_key: url=https://dl.google.com/linux/linux_signing_key.pub state=present
10 |
11 | - name: Chrome repo
12 | apt_repository: repo='deb http://dl.google.com/linux/chrome/deb/ stable main' state=present update_cache=yes filename='google-chrome'
13 |
14 | - name: Install google chrome stable
15 | apt: name=google-chrome-stable state=latest
16 |
17 | - name: Install google chrome beta
18 | apt: name=google-chrome-beta state=latest
19 | when: google_chrome_beta_flag
20 |
--------------------------------------------------------------------------------
/ansible/roles/consul-signal/files/set-shard-state-consul.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | SHARD_DATA="$1"
4 | SHARD_DATA_TYPE="$2"
5 | [ -z "$SHARD_DATA_TYPE" ] && SHARD_DATA_TYPE="shard-states"
6 |
7 | if [ -z "$SHARD_DATA" ]; then
8 | echo "No shard state set."
9 | echo "Usage: $0 []"
10 | exit 1
11 | fi
12 |
13 | AWS_CACHE_BIN="/usr/local/bin/aws_cache.sh"
14 | ORACLE_CACHE_BIN="/usr/local/bin/oracle_cache.sh"
15 | if [ -e "$ORACLE_CACHE_BIN" ]; then
16 | . $ORACLE_CACHE_BIN
17 | else
18 | . $AWS_CACHE_BIN
19 | fi
20 | if [ "$DOMAIN" == "null" ]; then
21 | DOMAIN=$(hostname)
22 | fi
23 |
24 | if [ "$SHARD" == "null" ]; then
25 | SHARD="$DOMAIN"
26 | fi
27 | SHARD_KEY="$SHARD_DATA_TYPE/$ENVIRONMENT/$SHARD"
28 | consul kv put "$SHARD_KEY" "$SHARD_DATA"
29 | exit $?
--------------------------------------------------------------------------------
/ansible/roles/jigasi/templates/config.j2:
--------------------------------------------------------------------------------
1 | # Jigasi settings
2 | JIGASI_OPTS="--nocomponent=true"
3 |
4 | # adds java system props that are passed to jigasi (default are for logging config file)
5 | JAVA_SYS_PROPS="-Djava.util.logging.config.file=/etc/jitsi/jigasi/logging.properties"
6 |
7 | # Credential for Google Cloud Speech API
8 | GOOGLE_APPLICATION_CREDENTIALS=/etc/google-cloud/google-cloud-s2t-key-file.json
9 |
10 | {% if jigasi_enable_css_integration %}
11 | CSS_AUTH_URL={{ jigasi_css_auth_url }}
12 | CSS_AUTH_CLIENT_ID={{ jigasi_css_auth_client_id }}
13 | CSS_AUTH_CLIENT_SECRET={{ jigasi_css_auth_client_secret }}
14 | CSS_AUTH_USERNAME={{ jigasi_css_auth_username }}
15 | CSS_AUTH_PASS={{ jigasi_css_auth_pass }}
16 | CSS_STORAGE_SERVICE_URL={{ jigasi_css_storage_service_url }}
17 | {% endif %}
18 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-upload-integrations/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jitsi_uploader_cloud_provider: "{{ cloud_provider | default('aws') }}"
3 | instance_volume_id: "undefined"
4 | jibri_private_ip: "{{ ansible_default_ipv4.address }}"
5 | jitsi_upload_cloud_provider: "{{ cloud_provider | default('aws') }}"
6 | jitsi_uploader_dropbox_failed_bucket: "dropbox-failed-recordings-{{ hcv_environment }}-{{ oracle_region }}"
7 | jitsi_uploader_failed_bucket: "failed-recordings-{{ hcv_environment }}-{{ oracle_region }}"
8 | jitsi_uploader_failed_dir: /opt/jitsi/jibri/failed
9 | jitsi_uploader_vpaas_failed_bucket: "vpaas-failed-recordings-{{ hcv_environment }}-{{ oracle_region }}"
10 | upload_integrations_configure_flag: true
11 | upload_integrations_install_flag: true
12 | uploader_pkg_name: jitsi-upload-integrations
13 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/files/jvb-stats-oracle.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #pull our own instance and environment
4 | . /usr/local/bin/oracle_cache.sh
5 |
6 | #now run the python that pushes stats to DD
7 | /usr/local/bin/jvb-stats.py
8 |
9 |
10 | # Save more detailed jvb stats locally for postmortem.
11 | LOCAL_STATS_DIR="/tmp/jvb-stats"
12 | NUM_LOCAL_STATS_TO_KEEP=30
13 | mkdir -p $LOCAL_STATS_DIR
14 | for stat in node-stats pool-stats queue-stats transit-stats task-pool-stats xmpp-delay-stats
15 | do
16 | # Rotate
17 | for i in $(seq $NUM_LOCAL_STATS_TO_KEEP -1 2)
18 | do
19 | mv -f "$LOCAL_STATS_DIR/$stat.$((i-1)).json" "$LOCAL_STATS_DIR/$stat.$i.json"
20 | done
21 |
22 | curl -s http://localhost:8080/debug/stats/jvb/$stat | jq . > "$LOCAL_STATS_DIR/$stat.1.json"
23 | done
24 |
--------------------------------------------------------------------------------
/ansible/roles/consul-server/files/consul-server-config.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ -z "$DATACENTER" ]; then
4 | echo "No DATACENTER environment variable, exiting..."
5 | exit 1
6 | fi
7 |
8 | if [ -z "$SERVER_ENV" ]; then
9 | echo "No SERVER_ENV environment variable, exiting..."
10 | exit 1
11 | fi
12 | if [ -z "$ENC_KEY" ]; then
13 | echo "No ENC_KEY environment variable, exiting..."
14 | exit 1
15 | fi
16 |
17 | CONSUL_CONFIG_PATH="/etc/consul.d/consul.hcl"
18 |
19 | if [ ! -f "$CONSUL_CONFIG_PATH" ]; then
20 | echo "Consul config file $CONSUL_CONFIG_PATH not found, exiting..."
21 | fi
22 |
23 | sed -i "s#REPLACE_DATACENTER#$DATACENTER#g" $CONSUL_CONFIG_PATH
24 | sed -i "s#REPLACE_ENC_KEY#$ENC_KEY#g" $CONSUL_CONFIG_PATH
25 | sed -i "s#REPLACE_SERVER_ENV#$SERVER_ENV#g" $CONSUL_CONFIG_PATH
--------------------------------------------------------------------------------
/ansible/roles/haproxy-lua/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install Lua and associated requirements
3 | ansible.builtin.apt:
4 | name: ["lua{{ haproxy_lua_lua_version }}", "liblua{{ haproxy_lua_lua_version }}-dev", 'unzip', 'libssl-dev', 'build-essential', 'libreadline-dev']
5 | state: present
6 |
7 | - name: Set lua5.3 as default lua binary
8 | ansible.builtin.command: update-alternatives --install /usr/bin/lua lua-interpreter /usr/bin/lua{{ haproxy_lua_lua_version }} 130
9 | --slave /usr/share/man/man1/lua.1.gz lua-manual /usr/share/man/man1/lua{{ haproxy_lua_lua_version }}.1.gz
10 | args:
11 | creates: /usr/bin/lua
12 |
13 | - name: Create haproxy lua scripts directory if it does not exist
14 | ansible.builtin.file:
15 | path: /etc/haproxy/lua
16 | state: directory
17 | mode: '0755'
18 |
--------------------------------------------------------------------------------
/ansible/roles/consul-haproxy/templates/haproxy.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "service": {
3 | "name": "haproxy",
4 | "tags":["{{ hcv_environment }}"],
5 | "meta": {
6 | "environment":"{{ hcv_environment }}",
7 | "domain":"{{ environment_domain_name }}"
8 | },
9 | "tagged_addresses": {
10 | "lan": {
11 | "address": "{{ consul_haproxy_private_ip }}",
12 | "port": 80
13 | },
14 | "wan": {
15 | "address": "{{ consul_haproxy_public_ip }}",
16 | "port": 80
17 | }
18 | },
19 | "port": 80,
20 | "checks": [
21 | {
22 | "name": "HAProxy REST Health",
23 | "http": "http://localhost:8080/haproxy_health",
24 | "method": "GET",
25 | "interval": "10s",
26 | "timeout": "1s"
27 | }
28 | ]
29 | }
30 | }
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/templates/terminate_instance_oracle.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -x
3 |
4 | export OCI_BIN="/usr/local/bin/oci"
5 |
6 | INSTANCE_METADATA=$(curl -s http://169.254.169.254/opc/v1/instance/)
7 | INSTANCE_ID=$(echo "$INSTANCE_METADATA" | jq .id -r)
8 |
9 | echo "Clean up the Route53 DNS"
10 | # this script is run from different users, e.g. jsidecar, ubuntu, root, and should not use sudo commands
11 | CLEANUP_ROUTE53_DNS="/usr/local/bin/cleanup_route53_dns.sh"
12 | if [ -f "$CLEANUP_ROUTE53_DNS" ]; then
13 | $CLEANUP_ROUTE53_DNS
14 | fi
15 |
16 | # now terminate our instance
17 | echo "Terminate the instance; we enable debug to have more details in case of oci cli failures"
18 | $OCI_BIN compute instance terminate --debug --instance-id "$INSTANCE_ID" --preserve-boot-volume false --auth instance_principal --force
19 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "ansible/roles/fluentd"]
2 | path = ansible/roles/fluentd
3 | url = https://github.com/aaronkvanmeerten/ansible-fluentd.git
4 | [submodule "ansible/roles/nvm"]
5 | path = ansible/roles/nvm
6 | url = https://github.com/aaronkvanmeerten/ansible-nvm.git
7 | [submodule "ansible/roles/memcached"]
8 | path = ansible/roles/memcached
9 | url = https://github.com/aaronkvanmeerten/ansible-role-memcached.git
10 | [submodule "ansible/roles/haproxy"]
11 | path = ansible/roles/haproxy
12 | url = https://github.com/aaronkvanmeerten/ansible-haproxy.git
13 | [submodule "ansible/roles/docker"]
14 | path = ansible/roles/docker
15 | url = https://github.com/aaronkvanmeerten/ansible-role-docker.git
16 | [submodule "ansible/roles/nomad"]
17 | path = ansible/roles/nomad
18 | url = https://github.com/aaronkvanmeerten/ansible-nomad.git
19 |
--------------------------------------------------------------------------------
/scripts/configure-firezone.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | unset ANSIBLE_SSH_USER
3 |
4 | [ -e ./stack-env.sh ] && . ./stack-env.sh
5 |
6 | usage() { echo "Usage: $0 []" 1>&2; }
7 |
8 | usage
9 |
10 | if [ -z "$1" ]
11 | then
12 | ANSIBLE_SSH_USER=$(whoami)
13 | echo "Ansible SSH user is not defined. We use current user: $ANSIBLE_SSH_USER"
14 | else
15 | ANSIBLE_SSH_USER=$1
16 | echo "Run ansible as $ANSIBLE_SSH_USER"
17 | fi
18 |
19 | DEPLOY_TAGS=${ANSIBLE_TAGS-"all"}
20 |
21 | if [ -z "$ANSIBLE_INVENTORY" ]; then
22 | ANSIBLE_INVENTORY="../all/bin/ec2.py"
23 | fi
24 |
25 | ansible-playbook --verbose ../../ansible/configure-firezone.yml --extra-vars "hcv_environment=$ENVIRONMENT" -i $ANSIBLE_INVENTORY \
26 | -e "ansible_ssh_user=$ANSIBLE_SSH_USER" \
27 | --vault-password-file ../../.vault-password.txt \
28 | --tags "$DEPLOY_TAGS"
29 |
--------------------------------------------------------------------------------
/ansible/roles/hcv-haproxy-set-stick-table/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Provide stick table entries
3 | ansible.builtin.copy:
4 | mode: 0644
5 | dest: "/tmp/{{ stick_table_filename }}"
6 | src: "{{ stick_table_entries_file }}"
7 |
8 | - name: Push entries to haproxy
9 | ansible.builtin.shell: |
10 | set -o pipefail
11 | cat /tmp/{{ stick_table_filename }} | \
12 | jq -r '.|map("set table {{ backend_name }} key \(.key) data.server_id \(.id)")|.[]' | sudo socat /var/run/haproxy/admin.sock stdio
13 | args:
14 | executable: /bin/bash
15 | register: socat_output
16 | when: stick_table_entries_file is defined
17 |
18 | - name: Clean stick table entries
19 | ansible.builtin.file:
20 | path: "/tmp/{{ stick_table_filename }}"
21 | state: "absent"
22 |
23 | # - debug: var=socat_output.stdout
24 | # when: show_shard_state
25 |
--------------------------------------------------------------------------------
/ansible/roles/jitsi-videobridge/templates/config.j2:
--------------------------------------------------------------------------------
1 | # Jitsi Videobridge settings
2 |
3 | # extra options to pass to the JVB daemon
4 | JVB_OPTS=""
5 |
6 | {% if jvb_set_memory %}
7 | VIDEOBRIDGE_MAX_MEMORY={{ jvb_memory }}
8 | export VIDEOBRIDGE_MAX_MEMORY
9 | {% endif %}
10 |
11 | # adds java system props that are passed to jvb (default are for home and logging config file)
12 | JAVA_SYS_PROPS="{% if jvb_preserve_frame_pointer %}-XX:+PreserveFramePointer{% endif %}{% if jvb_enable_yourkit %} -agentpath:{{ jvb_yourkit_agent_path }}{% endif %} -Dnet.java.sip.communicator.SC_HOME_DIR_LOCATION=/etc/jitsi -Dnet.java.sip.communicator.SC_HOME_DIR_NAME=videobridge -Dnet.java.sip.communicator.SC_LOG_DIR_LOCATION=/var/log/jitsi -Djava.util.logging.config.file=/etc/jitsi/videobridge/logging.properties -Dconfig.file=/etc/jitsi/videobridge/jvb.conf"
13 |
14 | AUTHBIND=yes
15 |
--------------------------------------------------------------------------------
/ansible/roles/hcv-haproxy-configure/files/hook-configure-haproxy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -x
3 | set -e
4 |
5 | #first check if we have any input
6 | [ -t 0 ] || IN=$(cat -)
7 |
8 | if [ -z "$IN" ]; then
9 | echo "No input provided, exiting..."
10 | exit 1
11 | fi
12 |
13 | #now source or calculate our local values
14 | . /usr/local/bin/aws_cache.sh
15 |
16 | EVENT_ENVIRONMENT=$(echo $IN | jq -r '.environment')
17 | EVENT_TYPE=$(echo $IN | jq -r '.event_type')
18 | EVENT_EC2_INSTANCE_ID=$(echo $IN | jq -r '.ec2_instance_id')
19 |
20 | if [ -z "$ENVIRONMENT" ]; then
21 | echo "No environment provided, exiting..."
22 | exit 2
23 | fi
24 |
25 | # if [ "$ENVIRONMENT" == "$EVENT_ENVIRONMENT" ]; then
26 | # #do the restart
27 | # if [ ! "$EVENT_EC2_INSTANCE_ID" == "$EC2_INSTANCE_ID" ]; then
28 | # /usr/local/bin/configure-haproxy.py
29 | # fi
30 | # fi
31 |
--------------------------------------------------------------------------------
/ansible/roles/ntp/README.md:
--------------------------------------------------------------------------------
1 | ntp
2 | ===
3 |
4 | [](https://travis-ci.org/resmo/ansible-role-ntp)
5 |
6 | This role enables users to install and configure ntp on their hosts.
7 |
8 | Requirements
9 | ------------
10 |
11 | This role requires Ansible 1.4 or higher, and platform requirements are listed
12 | in the metadata file.
13 |
14 | Examples
15 | --------
16 |
17 | 1) Install ntp and set the default settings.
18 |
19 | - hosts: all
20 | roles:
21 | - role: ntp
22 |
23 | 2) Install ntp and set some custom servers.
24 |
25 | - hosts: all
26 | roles:
27 | - role: ntp
28 | ntp_config_server: [2.ubuntu.pool.ntp.org, 1.ubuntu.pool.ntp.org]
29 |
30 | License
31 | -------
32 |
33 | BSD
34 |
35 | Author Information
36 | ------------------
37 |
38 | - Benno Joy
39 | - René Moser
40 |
--------------------------------------------------------------------------------
/ansible/roles/prosody/files/mod_muc_filter_access.lua:
--------------------------------------------------------------------------------
1 | local whitelist = module:get_option_set("muc_filter_whitelist");
2 |
3 | if not whitelist then
4 | module:log("warn", "No 'muc_filter_whitelist' option set, disabling muc_filter_access, plugin inactive");
5 | return
6 | end
7 |
8 | local jid_split = require "util.jid".split;
9 |
10 | local function incoming_presence_filter(event)
11 | local stanza = event.stanza;
12 | local _, domain, _ = jid_split(stanza.attr.from);
13 |
14 | if not stanza.attr.from or not whitelist:contains(domain) then
15 | -- Filter presence
16 | module:log("error", "Filtering unauthorized presence: %s", stanza:top_tag());
17 | return true;
18 | end
19 | end
20 |
21 | for _, jid_type in ipairs({ "host", "bare", "full" }) do
22 | module:hook("presence/"..jid_type, incoming_presence_filter, 2000);
23 | end
24 |
--------------------------------------------------------------------------------
/ansible/stop-shard-services.yml:
--------------------------------------------------------------------------------
1 | - name: Main
2 | hosts: all
3 | become: true
4 | become_user: root
5 | gather_facts: false
6 | tasks:
7 | - name: Copy clear shard state script
8 | ansible.builtin.copy:
9 | src: "roles/consul-signal/files/clear-shard-state-consul.sh"
10 | dest: "/usr/local/bin/clear-shard-state-consul.sh"
11 | mode: 0755
12 | owner: root
13 | - name: Clear shard state # noqa no-changed-when
14 | ansible.builtin.command: /usr/local/bin/clear-shard-state-consul.sh
15 | - name: Clear signal report # noqa no-changed-when
16 | ansible.builtin.command: /usr/local/bin/clear-shard-state-consul.sh signal-report
17 | - name: Stop consul
18 | ansible.builtin.service:
19 | name: consul
20 | state: stopped
21 | - name: Stop nginx
22 | ansible.builtin.service:
23 | name: nginx
24 | state: stopped
25 |
--------------------------------------------------------------------------------
/ansible/roles/jigasi-web/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jigasi_placement_region: "{{ ansible_ec2_placement_region if ansible_ec2_placement_region is defined and ansible_ec2_placement_region
3 | else oracle_to_aws_region_map[oracle_region] if oracle_region is defined else default(cloud_name) }}"
4 | jigasi_voximplant_users: "{{ voximplant_users[hcv_environment] | default({jigasi_placement_region: {jigasi_number: {'name':
5 | voximplant_users['outbound']['global']['outbound']['name'], 'password': voximplant_users['outbound']['global']['outbound']['password']}}}) }}"
6 | jigasi_voximplant_user: "{{ jigasi_voximplant_users[jigasi_placement_region][jigasi_number] | default({'name':
7 | voximplant_users['outbound']['global']['outbound']['name'], 'password': voximplant_users['outbound']['global']['outbound']['password']}) }}"
8 | jigasi_web_username: "{{ jigasi_voximplant_user['name'] | default('jigasi') }}"
9 |
--------------------------------------------------------------------------------
/ansible/roles/fluentd-jitsi/files/haproxy.conf:
--------------------------------------------------------------------------------
1 | # HAProxy
2 |
3 | @type tail
4 | path /var/log/haproxy.log
5 | pos_file /var/spool/td-agent/haproxy.pos
6 | tag haproxy
7 | # Regex fields
8 |
9 | format /^(?[^ ]*) (?[\w-\.]+) (?\w+)\[(?\d+)\]: ((?[\w\.]+):(?\d+) \[(?