├── ansible ├── roles │ ├── prosody-egress │ │ ├── tasks │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── prosody │ │ ├── meta │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── uninstall │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── files │ │ │ ├── disable_messaging.pfw │ │ │ ├── mod_muc_hide_all.lua │ │ │ ├── prosody-log-filter.systemd │ │ │ ├── mod_websocket_auth_token.patch │ │ │ ├── prosody-log-filter.sh │ │ │ ├── prosody-jvb-log-filter.systemd │ │ │ ├── mod_muc_filter_access.lua │ │ │ ├── muc_owner_allow_kick-0.12.patch │ │ │ └── setup-prosody-jvb-service.sh │ │ ├── vars │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── main.yml │ │ │ ├── install-from-url.yml │ │ │ └── install-from-apt.yml │ │ └── templates │ │ │ └── jvb_muc_presence_filter.pfw.j2 │ ├── wavefront │ │ ├── tests │ │ │ ├── inventory │ │ │ └── test.yml │ │ ├── README.md │ │ ├── tasks │ │ │ ├── proxy │ │ │ │ ├── RedHat.yml │ │ │ │ └── Debian.yml │ │ │ └── telegraf │ │ │ │ ├── RedHat.yml │ │ │ │ └── Debian.yml │ │ ├── handlers │ │ │ └── main.yml │ │ └── templates │ │ │ └── 10-wavefront.conf.j2 │ ├── consul-jigasi │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── templates │ │ │ └── jigasi.json.j2 │ ├── jitsi-videobridge │ │ ├── vars │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── aws_credentials.j2 │ │ │ ├── jvb-udp-buffers.conf.j2 │ │ │ ├── monitor-terminating-instance.conf.j2 │ │ │ ├── shards.json.j2 │ │ │ ├── environments.json.j2 │ │ │ ├── terminate_instance_oracle.j2 │ │ │ └── config.j2 │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── main.yml │ │ │ └── upgrade.yml │ │ ├── files │ │ │ ├── monitor-terminating-instance.service │ │ │ ├── reconfigure-jvb-oracle.sh │ │ │ ├── reconfigure-jvb.sh │ │ │ ├── configure-jvb-oracle.py │ │ │ └── jvb-stats-oracle.sh │ │ └── handlers │ │ │ └── main.yml │ ├── rsyslog │ │ ├── templates │ │ │ ├── main.yml │ │ │ ├── apparmor-usr.sbin.rsyslogd.j2 │ │ │ └── rsyslog-programrouting.conf.j2 │ │ ├── tasks │ │ │ ├── main_deb.yml │ │ │ └── deb_packages.yml │ │ ├── vars │ │ │ ├── main.yml │ │ │ ├── default.yml │ │ │ ├── Debian.yml │ │ │ └── Ubuntu.yml │ │ ├── handlers │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── nodejs │ │ ├── tests │ │ │ └── localhosts │ │ ├── handlers │ │ │ └── main.yml │ │ ├── role.yml │ │ ├── tasks │ │ │ ├── install-url.yml │ │ │ └── main.yml │ │ ├── vars │ │ │ └── main.yml │ │ ├── templates │ │ │ └── etc │ │ │ │ └── apt │ │ │ │ └── preferences.d │ │ │ │ └── deb_nodesource_com_node.pref.2 │ │ ├── meta │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── ntp │ │ ├── .gitignore │ │ ├── tests │ │ │ └── role.yml │ │ ├── vars │ │ │ ├── RedHat.yml │ │ │ └── Debian.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── .travis.yml │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── consul-haproxy-jigasi │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── meta │ │ │ └── main.yml │ ├── google-chrome │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── jigasi-web │ │ ├── templates │ │ │ ├── jigasi_user.html.j2 │ │ │ └── nginx.site.j2 │ │ ├── defaults │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── vars │ │ │ └── main.yml │ ├── sshusers │ │ ├── templates │ │ │ ├── sudoers.d.rapid7.j2 │ │ │ └── sudoers.d.sshusers.j2 │ │ ├── handlers │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── jitsi-meet │ │ ├── files │ │ │ ├── robots.txt │ │ │ └── jidesha-0.1.1-fx.xpi │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── nginx.yml │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── vars │ │ │ └── main.yml │ │ └── templates │ │ │ └── local.html.j2 │ ├── consul-selenium-grid-hub │ │ ├── defaults │ │ │ └── main.yml │ │ ├── templates │ │ │ └── selenium-grid-hub.json.j2 │ │ ├── tasks │ │ │ └── main.yml │ │ └── meta │ │ │ └── main.yml │ ├── hcv-haproxy-rsyslog │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── jiconop │ │ ├── vars │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── templates │ │ │ └── config.j2 │ │ ├── tasks │ │ │ ├── install.yml │ │ │ ├── main.yml │ │ │ └── configure.yml │ │ ├── defaults │ │ │ └── main.yml │ │ └── meta │ │ │ └── main.yml │ ├── haproxy-configure │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── jvb-ssl │ │ ├── meta │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── jibri-pjsua │ │ ├── meta │ │ │ └── main.yml │ │ ├── files │ │ │ ├── jibri-pjsua.rsyslogd.conf │ │ │ └── background.png │ │ ├── tasks │ │ │ ├── main.yml │ │ │ └── install.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── jibri-icewm2.systemd.j2 │ │ │ ├── jibri-xorg2.systemd.j2 │ │ │ ├── jibri-camera.systemd.j2 │ │ │ └── pjsua.config.j2 │ │ └── defaults │ │ │ └── main.yml │ ├── sshmfa │ │ ├── handlers │ │ │ └── main.yaml │ │ ├── defaults │ │ │ └── main.yml │ │ └── templates │ │ │ └── google_authenticator.j2 │ ├── jicofo │ │ ├── uninstall │ │ │ └── tasks │ │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── vars │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── jicofo-stats.sh.j2 │ │ │ └── config.j2 │ │ └── meta │ │ │ └── main.yml │ ├── jitsi-videobridge-ddns │ │ ├── meta │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── aws_credentials.j2 │ │ │ └── cleanup_route53_dns.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── sip-jibri-sidecar │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ └── templates │ │ │ ├── rsyslog.config.j2 │ │ │ └── sidecar.systemd.j2 │ ├── consul-haproxy │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── templates │ │ │ └── haproxy.json.j2 │ ├── fail2ban │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── tcpdump-jigasi │ │ └── files │ │ │ ├── tcpdump-jigasi-cleanup.sh │ │ │ └── tcpdump-jigasi.service │ ├── coturn │ │ ├── tasks │ │ │ ├── install-apt.yml │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── templates │ │ │ ├── check-files-limits-oracle.sh.j2 │ │ │ ├── check-files-limits.sh.j2 │ │ │ ├── coturn-mark-unhealthy-oracle.sh.j2 │ │ │ ├── coturn_set_alarms_systemd.j2 │ │ │ ├── coturn_set_alarms_service.j2 │ │ │ └── coturn-mark-unhealthy.sh.j2 │ ├── jitsi-upload-customizations │ │ └── defaults │ │ │ └── main.yml │ ├── consul-install │ │ ├── handlers │ │ │ └── main.yml │ │ ├── templates │ │ │ └── rsyslog.config.j2 │ │ ├── defaults │ │ │ └── main.yml │ │ └── files │ │ │ └── consul.service │ ├── jitsi-repo │ │ ├── templates │ │ │ └── jitsi-repo.conf.j2 │ │ └── defaults │ │ │ └── main.yml │ ├── journald │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── jvb-rtcstats-push │ │ ├── templates │ │ │ ├── service.config.j2 │ │ │ ├── rsyslog.config.j2 │ │ │ └── systemd.j2 │ │ ├── tasks │ │ │ ├── main.yml │ │ │ └── configure.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── haproxy-tenant-pin │ │ ├── handlers │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── config.j2 │ │ │ ├── rsyslog.config.j2 │ │ │ └── tenant-pin.service.j2 │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ ├── main.yml │ │ │ └── configure.yml │ ├── tcpdump-prosody-jvb │ │ └── files │ │ │ ├── tcpdump-prosody-jvb-cleanup.sh │ │ │ └── tcpdump-prosody-jvb.service │ ├── autoscaler-sidecar │ │ ├── templates │ │ │ ├── aws_credentials.j2 │ │ │ ├── reconfigure_wrapper.sh.j2 │ │ │ ├── rsyslog.config.j2 │ │ │ └── sidecar.systemd.j2 │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── files │ │ │ ├── terminate_instance_aws.sh │ │ │ └── terminate_instance_oracle.sh │ ├── hcv-haproxy-configure │ │ ├── files │ │ │ ├── haproxy-fact.sh │ │ │ └── hook-configure-haproxy.sh │ │ ├── templates │ │ │ ├── haproxy_default.j2 │ │ │ ├── environment.json.j2 │ │ │ └── haproxy.service.j2 │ │ └── handlers │ │ │ └── main.yml │ ├── signal-sidecar │ │ ├── handlers │ │ │ └── main.yml │ │ ├── templates │ │ │ └── rsyslog.config.j2 │ │ ├── tasks │ │ │ ├── main.yml │ │ │ └── install.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── testrtc │ │ ├── meta │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── testrtc.systemd.j2 │ │ │ ├── testrtc.upstart.j2 │ │ │ └── testrtc.credentials.sh.j2 │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── jibri-java │ │ ├── tasks │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── aws_credentials.j2 │ │ │ ├── monitor-terminating-instance-systemd.j2 │ │ │ ├── graceful_shutdown_terminate_oracle.j2 │ │ │ ├── environments.json.j2 │ │ │ └── terminate_instance_oracle.j2 │ │ ├── meta │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ └── files │ │ │ ├── jibri.rsyslogd.conf │ │ │ ├── graceful_shutdown_terminate.sh │ │ │ ├── reconfigure-jibri-oracle.sh │ │ │ └── reconfigure-jibri.sh │ ├── jigasi-rtcstats-push │ │ ├── templates │ │ │ ├── service.config.j2 │ │ │ ├── rsyslog.config.j2 │ │ │ └── systemd.j2 │ │ ├── tasks │ │ │ ├── main.yml │ │ │ └── configure.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── jvb-colibri-proxy │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── consul-server-start │ │ └── tasks │ │ │ └── main.yml │ ├── fluentd-jitsi │ │ ├── handlers │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── aws_credentials.j2 │ │ │ ├── fluentd_jitsi.conf.j2 │ │ │ ├── config.oci.j2 │ │ │ ├── bootstrap.conf.j2 │ │ │ ├── postinstall_ansible.conf.j2 │ │ │ ├── clouds.conf.j2 │ │ │ └── in_jvb.conf.j2 │ │ ├── files │ │ │ ├── filters.conf │ │ │ ├── jigasi.conf │ │ │ ├── jicofo.conf │ │ │ ├── haproxy-monitor.conf │ │ │ ├── sip-jibri-selector.conf │ │ │ ├── nginx.conf │ │ │ ├── haproxy.conf │ │ │ └── prosody.conf │ │ ├── tasks │ │ │ └── main.yml │ │ └── meta │ │ │ └── main.yml │ ├── flush-handlers │ │ └── tasks │ │ │ └── main.yml │ ├── hcv-haproxy-status-lock │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── monit │ │ └── files │ │ │ ├── morun │ │ │ └── modebug │ ├── hcv-haproxy-set-stick-table │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── jenkins-sshkey │ │ └── tasks │ │ │ └── main.yml │ ├── selenium-grid │ │ ├── files │ │ │ ├── selenium-grid.datadog.conf │ │ │ └── selenium-grid.rsyslogd.conf │ │ ├── templates │ │ │ ├── environments.json.j2 │ │ │ ├── selenium-xvfb.service.j2 │ │ │ ├── selenium-grid-hub.service.j2 │ │ │ ├── selenium-grid-extras-hub.service.j2 │ │ │ ├── selenium-grid-hub.json.j2 │ │ │ ├── hub_4444.json.j2 │ │ │ ├── selenium-grid-extras-node.service.j2 │ │ │ ├── selenium-grid-node.service.j2 │ │ │ ├── selenium-grid-node.json.j2 │ │ │ ├── node_5555.json.j2 │ │ │ └── selenium_grid_extras_config-hub.json.j2 │ │ └── meta │ │ │ └── main.yml │ ├── clean-system │ │ └── templates │ │ │ └── 20auto-upgrades.j2 │ ├── jigasi-haproxy-agent │ │ ├── templates │ │ │ ├── service.config.j2 │ │ │ ├── rsyslog.config.j2 │ │ │ └── systemd.j2 │ │ ├── tasks │ │ │ ├── main.yml │ │ │ └── configure.yml │ │ └── meta │ │ │ └── main.yml │ ├── jigasi │ │ ├── files │ │ │ ├── reconfigure-jigasi.sh │ │ │ ├── jigasi-stats-oracle.sh │ │ │ ├── monitor-terminating-instance.service │ │ │ └── postinstall-jigasi-oracle.sh │ │ ├── tasks │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── monitor-terminating-instance.conf.j2 │ │ │ ├── terminate_instance_oracle.j2 │ │ │ ├── config.j2 │ │ │ └── environments.json.j2 │ │ └── meta │ │ │ └── main.yml │ ├── unattended-upgrades │ │ ├── templates │ │ │ └── 20auto-upgrades.j2 │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── consul-signal │ │ ├── defaults │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── files │ │ │ ├── clear-shard-state-consul.sh │ │ │ └── set-shard-state-consul.sh │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── signal.json.j2 │ ├── haproxy-jigasi │ │ ├── defaults │ │ │ └── main.yml │ │ └── handlers │ │ │ └── main.yml │ ├── nginx │ │ ├── handlers │ │ │ └── main.yml │ │ ├── files │ │ │ └── status_server │ │ ├── templates │ │ │ └── 47-nginx.conf.j2 │ │ ├── tasks │ │ │ └── rsyslog.yml │ │ └── defaults │ │ │ └── main.yml │ ├── google-cloud │ │ ├── tasks │ │ │ ├── main.yml │ │ │ ├── install.yml │ │ │ └── configure.yml │ │ └── defaults │ │ │ └── main.yml │ ├── jenkins │ │ ├── defaults │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── files │ │ │ └── jenkins.service │ ├── consul-standalone │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── templates │ │ │ └── standalone.json.j2 │ ├── jibri-kernel │ │ └── defaults │ │ │ └── main.yml │ ├── jitsi-torture │ │ ├── files │ │ │ ├── auth0-authenticate │ │ │ │ └── package.json │ │ │ └── generate-jwt.js │ │ ├── meta │ │ │ └── main.yml │ │ └── templates │ │ │ ├── check-long.j2 │ │ │ └── check.j2 │ ├── haproxy-lua │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── hcv-haproxy-status │ │ └── defaults │ │ │ └── main.yml │ ├── consul-agent │ │ ├── tasks │ │ │ └── main.yml │ │ ├── templates │ │ │ └── consul.hcl.j2 │ │ ├── meta │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── consul-server │ │ ├── meta │ │ │ └── main.yml │ │ ├── templates │ │ │ └── consul.hcl.j2 │ │ ├── tasks │ │ │ └── main.yml │ │ └── files │ │ │ └── consul-server-config.sh │ ├── jitsi-dumper │ │ └── defaults │ │ │ └── main.yml │ ├── common │ │ ├── files │ │ │ └── download.sh │ │ └── defaults │ │ │ └── main.yml │ ├── chromedriver │ │ └── defaults │ │ │ └── main.yml │ ├── iptables-jenkins │ │ └── tasks │ │ │ └── main.yml │ ├── jitsi-videobridge-auth │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── jitsi-torture-checkout │ │ └── defaults │ │ │ └── main.yml │ ├── jigasi-auth │ │ ├── tasks │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── geckodriver │ │ └── defaults │ │ │ └── main.yml │ ├── pjsua │ │ └── defaults │ │ │ └── main.yml │ ├── firefox │ │ └── defaults │ │ │ └── main.yml │ ├── iptables-serf │ │ └── tasks │ │ │ └── main.yml │ ├── iptables-coturn │ │ └── tasks │ │ │ └── main.yml │ ├── openjdk-java │ │ └── defaults │ │ │ └── main.yml │ ├── jitsi-upload-integrations │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── iptables-firezone │ │ └── tasks │ │ │ └── main.yml │ ├── iptables │ │ └── tasks │ │ │ └── main.yml │ └── iptables-selenium-grid │ │ └── tasks │ │ └── main.yml ├── tcpdump-jigasi.yml ├── haproxy-set-stick-table.yml ├── haproxy-status.yml ├── haproxy-status-lock.yml ├── configure-jitsi-repo.yml ├── stop-consul-services.yml ├── configure-users.yml ├── jenkins-server.yml ├── jvb-colibri-proxy-nginx.yaml ├── templates │ └── torture_wrapper.j2.sh ├── clear-cloud-cache.yml ├── set-signal-state.yml ├── stop-shard-services.yml ├── build-coturn-oracle.yml └── haproxy-health-value.yml ├── README.md ├── .ansible-lint ├── scripts ├── configure-users.sh ├── configure-jitsi-repo.sh └── configure-firezone.sh ├── ansible.cfg └── .gitmodules /ansible/roles/prosody-egress/tasks/main.yml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ansible/roles/prosody/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /ansible/roles/prosody-egress/defaults/main.yml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ansible/roles/wavefront/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost -------------------------------------------------------------------------------- /ansible/roles/consul-jigasi/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /ansible/roles/rsyslog/templates/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | -------------------------------------------------------------------------------- /ansible/roles/nodejs/tests/localhosts: -------------------------------------------------------------------------------- 1 | [local] 2 | localhost -------------------------------------------------------------------------------- /ansible/roles/ntp/.gitignore: -------------------------------------------------------------------------------- 1 | vagrant* 2 | .vagrant 3 | -------------------------------------------------------------------------------- /ansible/roles/consul-haproxy-jigasi/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /ansible/roles/nodejs/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for nodejs 3 | -------------------------------------------------------------------------------- /ansible/roles/google-chrome/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | google_chrome_beta_flag: false -------------------------------------------------------------------------------- /ansible/roles/jigasi-web/templates/jigasi_user.html.j2: -------------------------------------------------------------------------------- 1 | {{ jigasi_web_username }} -------------------------------------------------------------------------------- /ansible/roles/sshusers/templates/sudoers.d.rapid7.j2: -------------------------------------------------------------------------------- 1 | rapid7 ALL=(ALL) NOPASSWD:ALL -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # infra-configuration 2 | Scripts for configuration jitsi services 3 | 4 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-meet/files/robots.txt: -------------------------------------------------------------------------------- 1 | User-agent: * 2 | Allow: /$ 3 | Disallow: / 4 | -------------------------------------------------------------------------------- /ansible/roles/consul-selenium-grid-hub/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | selenium_grid_name: 'default' 3 | -------------------------------------------------------------------------------- /ansible/roles/hcv-haproxy-rsyslog/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | haproxy_reconfigure_rsyslog: false 3 | -------------------------------------------------------------------------------- /ansible/roles/jiconop/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jiconop_xmpp_domain: "{{ prosody_domain_name }}" 3 | -------------------------------------------------------------------------------- /ansible/roles/ntp/tests/role.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | roles: 3 | - ansible-role-ntp 4 | -------------------------------------------------------------------------------- /ansible/roles/sshusers/templates/sudoers.d.sshusers.j2: -------------------------------------------------------------------------------- 1 | %sshsudousers ALL=(ALL) NOPASSWD:ALL 2 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-configure/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | haproxy_configure_log_dest: ../../test-results 3 | -------------------------------------------------------------------------------- /ansible/roles/jvb-ssl/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: nginx, when: jvb_ssl_install_flag} -------------------------------------------------------------------------------- /ansible/roles/ntp/vars/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ntp_service_name: ntpd 3 | ntp_config_driftfile: /var/lib/ntp/drift 4 | -------------------------------------------------------------------------------- /ansible/roles/jibri-pjsua/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - {"role":"pjsua", when: jibri_pjsua_install_flag} -------------------------------------------------------------------------------- /ansible/roles/jigasi-web/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jigasi_web_install_flag: true 3 | jigasi_web_username: jigasi 4 | -------------------------------------------------------------------------------- /ansible/roles/ntp/vars/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ntp_service_name: ntp 3 | ntp_config_driftfile: /var/lib/ntp/ntp.drift 4 | -------------------------------------------------------------------------------- /ansible/roles/prosody/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reload prosody 3 | service: name=prosody state=restarted 4 | -------------------------------------------------------------------------------- /ansible/roles/jvb-ssl/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart nginx 3 | service: name=nginx state=restarted enabled=yes 4 | -------------------------------------------------------------------------------- /ansible/roles/sshmfa/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart sshd 3 | service: name=ssh state=restarted enabled=yes 4 | -------------------------------------------------------------------------------- /ansible/roles/jicofo/uninstall/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - service: name=jicofo state=stopped enabled=no 3 | ignore_errors: true 4 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge-ddns/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: nodejs, when: jvb_ddns_install_flag } 4 | -------------------------------------------------------------------------------- /ansible/roles/prosody/uninstall/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - service: name=prosody state=stopped enabled=no 3 | ignore_errors: true 4 | -------------------------------------------------------------------------------- /ansible/roles/sip-jibri-sidecar/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: nodejs, when: sip_jibri_install_flag } 4 | 5 | -------------------------------------------------------------------------------- /ansible/roles/sshmfa/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | mfa_users: "{{ ssh_users_jitsi }}" 3 | mfa_security_users: "{{ ssh_users_security }}" 4 | -------------------------------------------------------------------------------- /.ansible-lint: -------------------------------------------------------------------------------- 1 | # Ansible-lint completely ignores rules or tags listed below 2 | skip_list: 3 | - run_once[play] 4 | - no-changed-when 5 | -------------------------------------------------------------------------------- /ansible/roles/prosody/files/disable_messaging.pfw: -------------------------------------------------------------------------------- 1 | KIND: message 2 | INSPECT: body 3 | LOG=[debug] Dropping message: $(stanza) 4 | DROP. 5 | -------------------------------------------------------------------------------- /ansible/roles/consul-haproxy/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | consul_haproxy_private_ip: "{{ ansible_default_ipv4.address }}" 3 | consul_haproxy_public_ip: 4 | -------------------------------------------------------------------------------- /ansible/roles/jibri-pjsua/files/jibri-pjsua.rsyslogd.conf: -------------------------------------------------------------------------------- 1 | if $programname == 'ffmpeg' then { 2 | /var/log/local/jibri-ffmpeg.log 3 | ~ 4 | } 5 | 6 | -------------------------------------------------------------------------------- /ansible/roles/jiconop/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart jiconop 3 | ansible.builtin.service: 4 | name: jiconop 5 | state: restarted 6 | -------------------------------------------------------------------------------- /ansible/roles/rsyslog/templates/apparmor-usr.sbin.rsyslogd.j2: -------------------------------------------------------------------------------- 1 | network inet stream{% if rsyslog_inet6_input_enabled %}, 2 | network inet6 stream{% endif %} -------------------------------------------------------------------------------- /ansible/roles/fail2ban/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart fail2ban 3 | ansible.builtin.service: 4 | name: fail2ban 5 | state: restarted 6 | -------------------------------------------------------------------------------- /ansible/roles/jibri-pjsua/files/background.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daimoc/infra-configuration/main/ansible/roles/jibri-pjsua/files/background.png -------------------------------------------------------------------------------- /ansible/roles/nodejs/role.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Test the Node.js role 3 | hosts: all 4 | become: yes 5 | roles: 6 | - role: "ansible-nodejs-role" 7 | -------------------------------------------------------------------------------- /ansible/roles/sshmfa/templates/google_authenticator.j2: -------------------------------------------------------------------------------- 1 | {{ item.mfa_key }} 2 | " RATE_LIMIT 3 30 3 | " WINDOW_SIZE 17 4 | " DISALLOW_REUSE 5 | " TOTP_AUTH 6 | -------------------------------------------------------------------------------- /ansible/roles/tcpdump-jigasi/files/tcpdump-jigasi-cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | /usr/bin/find /var/lib/tcpdump-jigasi -type f -mmin +300 -exec rm {} \; 4 | -------------------------------------------------------------------------------- /ansible/roles/coturn/tasks/install-apt.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Install coturn package and dependencies from apt" 3 | ansible.builtin.apt: 4 | name: coturn 5 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-upload-customizations/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | upload_customizations_configure_flag: true 3 | upload_customizations_install_flag: true 4 | -------------------------------------------------------------------------------- /ansible/roles/ntp/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart ntp 3 | ansible.builtin.service: 4 | name: "{{ ntp_service_name }}" 5 | state: restarted 6 | -------------------------------------------------------------------------------- /ansible/roles/rsyslog/tasks/main_deb.yml: -------------------------------------------------------------------------------- 1 | # Debian related tasks 2 | --- 3 | - name: Install deb packages 4 | ansible.builtin.include_tasks: deb_packages.yml 5 | -------------------------------------------------------------------------------- /ansible/roles/consul-install/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart rsyslog service 3 | ansible.builtin.service: 4 | name: rsyslog 5 | state: restarted 6 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-meet/files/jidesha-0.1.1-fx.xpi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daimoc/infra-configuration/main/ansible/roles/jitsi-meet/files/jidesha-0.1.1-fx.xpi -------------------------------------------------------------------------------- /ansible/roles/jitsi-repo/templates/jitsi-repo.conf.j2: -------------------------------------------------------------------------------- 1 | machine {{ jitsi_repo_host }} 2 | login {{ jitsi_repo_username }} 3 | password {{ jitsi_repo_password }} 4 | -------------------------------------------------------------------------------- /ansible/roles/journald/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart journald 3 | ansible.builtin.service: 4 | name: systemd-journald 5 | state: restarted 6 | -------------------------------------------------------------------------------- /ansible/roles/jvb-rtcstats-push/templates/service.config.j2: -------------------------------------------------------------------------------- 1 | JVB_ADDRESS={{ jvb_rtcstats_push_jvb_address }} 2 | RTCSTATS_SERVER={{ jvb_rtcstats_push_rtcstats_server }} -------------------------------------------------------------------------------- /ansible/roles/prosody/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | prosody_bosh_service_address: "{{ prosody_domain_name }}" 3 | prosody_xmpp_service_address: "{{ prosody_domain_name }}" 4 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-tenant-pin/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart tenant-pin 3 | ansible.builtin.service: 4 | name: tenant-pin 5 | state: restarted 6 | -------------------------------------------------------------------------------- /ansible/roles/hcv-haproxy-rsyslog/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart haproxy rsyslog 3 | ansible.builtin.service: 4 | name: rsyslog 5 | state: restarted 6 | -------------------------------------------------------------------------------- /ansible/roles/jiconop/templates/config.j2: -------------------------------------------------------------------------------- 1 | JICONOP_XMPP_DOMAIN={{ jiconop_xmpp_domain }} 2 | JICONOP_BOSH_URL={{ jiconop_bosh_url }} 3 | JICONOP_PORT={{ jiconop_port }} 4 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-meet/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: jitsi-repo, when: jitsi_meet_install_flag } 4 | - { role: nginx, when: jitsi_meet_install_flag} -------------------------------------------------------------------------------- /ansible/roles/jitsi-meet/tasks/nginx.yml: -------------------------------------------------------------------------------- 1 | - template: src=nginx.site.j2 dest=/etc/nginx/sites-available/{{ jitsi_meet_domain_name }} mode=0644 2 | notify: reload nginx 3 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge/templates/aws_credentials.j2: -------------------------------------------------------------------------------- 1 | [default] 2 | aws_access_key_id = {{ aws_access_key_id }} 3 | aws_secret_access_key = {{ aws_secret_access_key }} -------------------------------------------------------------------------------- /ansible/roles/tcpdump-prosody-jvb/files/tcpdump-prosody-jvb-cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | /usr/bin/find /var/lib/tcpdump-prosody-jvb -type f -mmin +300 -exec rm {} \; 4 | -------------------------------------------------------------------------------- /ansible/roles/autoscaler-sidecar/templates/aws_credentials.j2: -------------------------------------------------------------------------------- 1 | [default] 2 | aws_access_key_id = {{ aws_access_key_id }} 3 | aws_secret_access_key = {{ aws_secret_access_key }} -------------------------------------------------------------------------------- /ansible/roles/hcv-haproxy-configure/files/haproxy-fact.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FACT_CACHE_FILE="/tmp/haproxy-facts.json" 4 | [ -f "$FACT_CACHE_FILE" ] && cat $FACT_CACHE_FILE -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge-ddns/templates/aws_credentials.j2: -------------------------------------------------------------------------------- 1 | [default] 2 | aws_access_key_id = {{ aws_access_key_id }} 3 | aws_secret_access_key = {{ aws_secret_access_key }} -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: jitsi-repo, when: jvb_install_flag } 4 | - { role: openjdk-java, when: jvb_install_flag } 5 | -------------------------------------------------------------------------------- /ansible/roles/rsyslog/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # items: 3 | # Logstash host for beta environment 4 | logstash_endpoint_domain: beta-us-east-1-logstash.meet-beta.hipchat.ninja 5 | -------------------------------------------------------------------------------- /ansible/roles/signal-sidecar/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart signal-sidecar 3 | ansible.builtin.service: 4 | name: signal-sidecar 5 | state: restarted 6 | -------------------------------------------------------------------------------- /ansible/roles/testrtc/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependencies: 4 | - { role: "nodejs", when: testrtc_install_flag } 5 | - { role: "nginx", when: testrtc_install_flag } 6 | -------------------------------------------------------------------------------- /ansible/roles/jibri-java/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: install.yml 3 | when: jibri_install_flag 4 | 5 | - include_tasks: configure.yml 6 | when: jibri_configure_flag -------------------------------------------------------------------------------- /ansible/roles/jigasi-rtcstats-push/templates/service.config.j2: -------------------------------------------------------------------------------- 1 | JIGASI_ADDRESS={{ jigasi_rtcstats_push_jigasi_address }} 2 | RTCSTATS_SERVER={{ jigasi_rtcstats_push_rtcstats_server }} -------------------------------------------------------------------------------- /ansible/roles/jvb-colibri-proxy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart nginx 3 | ansible.builtin.service: 4 | name: nginx 5 | state: restarted 6 | enabled: true 7 | -------------------------------------------------------------------------------- /ansible/roles/nodejs/tasks/install-url.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install nodejs from internal repo URL 3 | apt: deb="{{ nodejs_url }}" state=present 4 | retries: 3 5 | delay: 1 6 | -------------------------------------------------------------------------------- /ansible/roles/prosody/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: install.yml 3 | when: prosody_install_flag 4 | 5 | - include_tasks: configure.yml 6 | when: prosody_configure_flag -------------------------------------------------------------------------------- /ansible/roles/sshusers/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart sshd for sshusers 3 | ansible.builtin.service: 4 | name: ssh 5 | state: restarted 6 | enabled: true 7 | -------------------------------------------------------------------------------- /ansible/roles/consul-server-start/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Start consul service 3 | ansible.builtin.service: 4 | name: consul 5 | enabled: true 6 | state: started 7 | -------------------------------------------------------------------------------- /ansible/roles/fluentd-jitsi/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart jitsi fluentd 3 | ansible.builtin.service: 4 | name: td-agent 5 | state: restarted 6 | enabled: true 7 | -------------------------------------------------------------------------------- /ansible/roles/jibri-java/templates/aws_credentials.j2: -------------------------------------------------------------------------------- 1 | [default] 2 | aws_access_key_id = {{ aws_cloudwatch_access_key_id }} 3 | aws_secret_access_key = {{ aws_cloudwatch_secret_access_key }} -------------------------------------------------------------------------------- /ansible/roles/consul-haproxy-jigasi/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Enable consul service 3 | ansible.builtin.systemd: 4 | name: consul 5 | state: started 6 | enabled: true 7 | -------------------------------------------------------------------------------- /ansible/roles/fluentd-jitsi/templates/aws_credentials.j2: -------------------------------------------------------------------------------- 1 | [default] 2 | aws_access_key_id = {{ aws_cloudwatch_access_key_id }} 3 | aws_secret_access_key = {{ aws_cloudwatch_secret_access_key }} -------------------------------------------------------------------------------- /ansible/roles/flush-handlers/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Force all notified handlers to run at this point, not waiting for normal sync points 3 | ansible.builtin.meta: flush_handlers 4 | -------------------------------------------------------------------------------- /ansible/roles/jibri-pjsua/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: install.yml 3 | when: jibri_pjsua_install_flag 4 | 5 | - include_tasks: configure.yml 6 | when: jibri_pjsua_configure_flag -------------------------------------------------------------------------------- /ansible/roles/hcv-haproxy-status-lock/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | hcv_environment: all 3 | hcv_haproxy_status_lock_file: '/tmp/haproxy-status.lock' 4 | hcv_haproxy_status_lock_action: 'unlock' 5 | -------------------------------------------------------------------------------- /ansible/roles/jibri-pjsua/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart jibri cameras 3 | service: name="{{ item }}" state=restarted 4 | with_items: 5 | - jibri-camera-0 6 | - jibri-camera-1 -------------------------------------------------------------------------------- /ansible/roles/monit/files/morun: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | { 3 | echo "MONIT-WRAPPER $@" 4 | $@ 5 | R=$? 6 | echo "MONIT-WRAPPER exit code $R" 7 | } 2>&1 >> /var/log/monit-debug.log -------------------------------------------------------------------------------- /ansible/roles/nodejs/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for nodejs 3 | debian_repo_version: "{{ nodejs_version if nodejs_version.split('.')[1] == 'x' else nodejs_version.split('.')[0]+'.x' }}" 4 | -------------------------------------------------------------------------------- /ansible/roles/sip-jibri-sidecar/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: install.yml 3 | when: sip_jibri_install_flag 4 | 5 | - include_tasks: configure.yml 6 | when: sip_jibri_configure_flag 7 | -------------------------------------------------------------------------------- /ansible/roles/fluentd-jitsi/templates/fluentd_jitsi.conf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | @type record_transformer 4 | 5 | host ${hostname} 6 | 7 | 8 | -------------------------------------------------------------------------------- /ansible/tcpdump-jigasi.yml: -------------------------------------------------------------------------------- 1 | - name: Main 2 | hosts: all 3 | gather_facts: false 4 | become_user: root 5 | become: true 6 | roles: 7 | - { role: "tcpdump-jigasi", "tags": "tcpdump-jigasi"} 8 | -------------------------------------------------------------------------------- /ansible/roles/fluentd-jitsi/files/filters.conf: -------------------------------------------------------------------------------- 1 | 2 | @type record_transformer 3 | 4 | host ${hostname} 5 | process ${tag_parts[0]} 6 | tag ${tag} 7 | 8 | -------------------------------------------------------------------------------- /ansible/roles/hcv-haproxy-set-stick-table/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | backend_name: 'nodes' 3 | stick_table_entries: false 4 | stick_table_entries_file: false 5 | stick_table_filename: stick-table-entries.json 6 | -------------------------------------------------------------------------------- /ansible/roles/jenkins-sshkey/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Ubuntu jenkins service key 2 | ansible.posix.authorized_key: 3 | user: "ubuntu" 4 | key: "{{ item }}" 5 | with_items: "{{ jenkins_deploy_keys }}" 6 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge/templates/jvb-udp-buffers.conf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | net.core.rmem_max={{ jvb_udp_buffer_size }} 4 | net.core.netdev_max_backlog={{ jvb_udp_buffer_max_backlog }} 5 | -------------------------------------------------------------------------------- /ansible/roles/rsyslog/vars/default.yml: -------------------------------------------------------------------------------- 1 | # Fallback OS defaults 2 | --- 3 | rsyslog_package_names: 4 | - rsyslog 5 | rsyslog_service_name: rsyslog 6 | rsyslog_file_owner: root 7 | rsyslog_file_group: root 8 | -------------------------------------------------------------------------------- /ansible/roles/jicofo/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart jicofo 3 | ansible.builtin.service: 4 | name: jicofo 5 | state: restarted 6 | when: not jicofoservice.changed and jicofo_configure_flag 7 | -------------------------------------------------------------------------------- /ansible/roles/nodejs/templates/etc/apt/preferences.d/deb_nodesource_com_node.pref.2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | Package: * 4 | Pin: release o=Node Source 5 | Pin-Priority: {{ nodejs_nodesource_pin_priority }} 6 | -------------------------------------------------------------------------------- /ansible/roles/selenium-grid/files/selenium-grid.datadog.conf: -------------------------------------------------------------------------------- 1 | init_config: 2 | default_timeout: 10 3 | default_url: http://localhost:5555/wd/hub/sessions 4 | default_slots: 1 5 | 6 | instances: 7 | [{}] -------------------------------------------------------------------------------- /scripts/configure-users.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # e.g. ../all/bin/terraform/standalone 4 | 5 | ansible-playbook -v -i "127.0.0.1," -c local ansible/configure-users.yml --vault-password-file .vault-password.txt 6 | -------------------------------------------------------------------------------- /ansible/haproxy-set-stick-table.yml: -------------------------------------------------------------------------------- 1 | - name: Main 2 | hosts: all 3 | become: true 4 | become_user: root 5 | gather_facts: false 6 | roles: 7 | - { role: "hcv-haproxy-set-stick-table", tags: "set-stick-table" } 8 | -------------------------------------------------------------------------------- /ansible/haproxy-status.yml: -------------------------------------------------------------------------------- 1 | - name: Main 2 | hosts: tag_shard_role_haproxy 3 | become_user: root 4 | become: true 5 | strategy: free 6 | roles: 7 | - { role: "hcv-haproxy-status", tags: "hcv-haproxy-status"} 8 | -------------------------------------------------------------------------------- /ansible/roles/prosody/files/mod_muc_hide_all.lua: -------------------------------------------------------------------------------- 1 | -- This module makes all MUCs in Prosody unavialable on disco#items query 2 | 3 | module:hook("muc-room-pre-create", function(event) 4 | event.room:set_hidden(true); 5 | end, -1); -------------------------------------------------------------------------------- /ansible/roles/wavefront/README.md: -------------------------------------------------------------------------------- 1 | # Wavefront Ansible Role 2 | 3 | **Note**: I've copied this from wavefrontHQ's ansible role instead of using a 4 | submodule so I could adjust it to meet the requirements set by the COP team 5 | -------------------------------------------------------------------------------- /ansible/roles/autoscaler-sidecar/templates/reconfigure_wrapper.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #first wait for cloud init to finish 3 | cloud-init status --wait 4 | #run reconfiguration script 5 | sudo {{ autoscaler_reconfigure_script }} 6 | exit $? -------------------------------------------------------------------------------- /ansible/roles/clean-system/templates/20auto-upgrades.j2: -------------------------------------------------------------------------------- 1 | APT::Periodic::Update-Package-Lists "0"; 2 | APT::Periodic::Download-Upgradeable-Packages "0"; 3 | APT::Periodic::AutocleanInterval "0"; 4 | APT::Periodic::Unattended-Upgrade "0"; -------------------------------------------------------------------------------- /ansible/roles/consul-install/templates/rsyslog.config.j2: -------------------------------------------------------------------------------- 1 | template(name="outfmt" type="string" string= "%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n") 2 | if $programname == 'consul' then {{ consul_log_dir }}/consul.log 3 | & stop 4 | -------------------------------------------------------------------------------- /ansible/roles/jigasi-haproxy-agent/templates/service.config.j2: -------------------------------------------------------------------------------- 1 | API_PORT={{ jigasi_haproxy_agent_listen_port }} 2 | MAX_PARTICIPANTS={{ jigasi_haproxy_agent_max_participants }} 3 | HEALTH_INTERVAL={{ jigasi_haproxy_agent_health_interval }} -------------------------------------------------------------------------------- /ansible/roles/jigasi/files/reconfigure-jigasi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #rebuild the configuration files and signal new shards to jigasi 4 | CONFIGURE_ONLY=true ANSIBLE_TAGS="setup,jigasi" /usr/local/bin/configure-jigasi-local.sh 5 | exit $? -------------------------------------------------------------------------------- /scripts/configure-jitsi-repo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # e.g. ../all/bin/terraform/standalone 4 | 5 | ansible-playbook -v -i "127.0.0.1," -c local ansible/configure-jitsi-repo.yml --vault-password-file .vault-password.txt 6 | 7 | exit $? -------------------------------------------------------------------------------- /ansible/haproxy-status-lock.yml: -------------------------------------------------------------------------------- 1 | - name: Main 2 | hosts: tag_shard_role_haproxy 3 | become_user: root 4 | become: true 5 | strategy: free 6 | roles: 7 | - { role: "hcv-haproxy-status-lock", tags: "hcv-haproxy-status-lock"} 8 | -------------------------------------------------------------------------------- /ansible/roles/jigasi/files/jigasi-stats-oracle.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #pull our own instance and environment 4 | . /usr/local/bin/oracle_cache.sh 5 | 6 | #now run the python that pushes stats to statsd 7 | /usr/local/bin/jigasi-stats.py 8 | -------------------------------------------------------------------------------- /ansible/roles/rsyslog/vars/Debian.yml: -------------------------------------------------------------------------------- 1 | # Ubuntu Family OS defaults 2 | --- 3 | rsyslog_package_names: 4 | - rsyslog 5 | rsyslog_service_name: rsyslog 6 | rsyslog_file_owner: syslog 7 | rsyslog_file_group: adm 8 | rsyslog_os_supported: yes -------------------------------------------------------------------------------- /ansible/roles/rsyslog/vars/Ubuntu.yml: -------------------------------------------------------------------------------- 1 | # Ubuntu Family OS defaults 2 | --- 3 | rsyslog_package_names: 4 | - rsyslog 5 | rsyslog_service_name: rsyslog 6 | rsyslog_file_owner: syslog 7 | rsyslog_file_group: adm 8 | rsyslog_os_supported: yes -------------------------------------------------------------------------------- /ansible/roles/selenium-grid/files/selenium-grid.rsyslogd.conf: -------------------------------------------------------------------------------- 1 | if $programname == 'java' then { 2 | /var/log/local/selenium-grid.log 3 | ~ 4 | } 5 | 6 | if $programname == 'Xvfb' then { 7 | /var/log/local/selenium-xvfb.log 8 | ~ 9 | } -------------------------------------------------------------------------------- /ansible/roles/jiconop/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install jiconop package 3 | ansible.builtin.apt: 4 | name: "{{ jiconop_deb_pkg_name }}={{ jiconop_deb_pkg_version }}" 5 | state: present 6 | notify: 7 | - Restart jiconop 8 | -------------------------------------------------------------------------------- /ansible/roles/nodejs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # Install Node.js using packages crafted by NodeSource 2 | --- 3 | - include_tasks: install-apt.yml 4 | when: nodejs_from_apt 5 | 6 | - include_tasks: install-url.yml 7 | when: not nodejs_from_apt 8 | 9 | -------------------------------------------------------------------------------- /ansible/roles/sip-jibri-sidecar/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart sip-jibri sidecar service 3 | systemd: 4 | state: restarted 5 | daemon_reload: yes 6 | enabled: yes 7 | name: "{{ sip_jibri_sidecar_service_name }}" 8 | -------------------------------------------------------------------------------- /ansible/roles/unattended-upgrades/templates/20auto-upgrades.j2: -------------------------------------------------------------------------------- 1 | APT::Periodic::Update-Package-Lists "{{ unattended_upgrades_update_package_lists_interval }}"; 2 | APT::Periodic::Unattended-Upgrade "{{ unattended_upgrades_upgrade_interval }}"; 3 | -------------------------------------------------------------------------------- /ansible/roles/wavefront/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | gather_facts: true 5 | become: true 6 | roles: 7 | - { role: wavefront, wavefront_install_collector: "true", proxy_address: "localhost" } 8 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: install.yml 3 | when: jvb_install_flag 4 | 5 | - include_tasks: upgrade.yml 6 | when: jvb_upgrade_flag 7 | 8 | - include_tasks: configure.yml 9 | when: jvb_configure_flag -------------------------------------------------------------------------------- /ansible/configure-jitsi-repo.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Main 3 | hosts: all 4 | become_user: root 5 | become: true 6 | gather_facts: true 7 | vars_files: 8 | - secrets/repo.yml 9 | roles: 10 | - { role: "jitsi-repo", tags: "repo" } 11 | -------------------------------------------------------------------------------- /ansible/roles/autoscaler-sidecar/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart autoscaler sidecar service 3 | ansible.builtin.systemd: 4 | state: restarted 5 | daemon_reload: true 6 | enabled: true 7 | name: "{{ autoscaler_sidecar_service_name }}" 8 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-meet/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: install.yml 3 | when: jitsi_meet_install_flag 4 | 5 | - include_tasks: configure.yml 6 | when: jitsi_meet_configure_flag 7 | 8 | - include_tasks: nginx.yml 9 | when: jitsi_meet_nginx_only_flag -------------------------------------------------------------------------------- /ansible/roles/consul-signal/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | consul_signal_cloud_provider: "{{ cloud_provider | default('aws') }}" 3 | consul_signal_private_ip: "{{ ansible_default_ipv4.address }}" 4 | consul_signal_public_ip: "{{ ansible_ec2_public_ipv4 | default('') }}" 5 | -------------------------------------------------------------------------------- /ansible/roles/jicofo/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jicofo_hostname: "{{ prosody_domain_name }}" 3 | jicofo_auth_domain: "auth.{{ prosody_domain_name }}" 4 | jicofo_auth_user: focus 5 | jicofo_auth_password: "{{ prosody_focus_user_secret }}" 6 | jicofo_make_jvb_checks: true 7 | -------------------------------------------------------------------------------- /ansible/roles/selenium-grid/templates/environments.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | {% if selenium_grid_enable_consul %} 3 | "consul_server": "{{ selenium_grid_consul_server_url }}", 4 | {% endif %} 5 | "grid": "{{ selenium_grid_name }}", 6 | "grid_role": "{{ selenium_grid_role }}" 7 | } -------------------------------------------------------------------------------- /ansible/stop-consul-services.yml: -------------------------------------------------------------------------------- 1 | - name: Main 2 | hosts: all 3 | become: true 4 | become_user: root 5 | gather_facts: false 6 | tasks: 7 | - name: Stop consul 8 | ansible.builtin.service: 9 | name: consul 10 | state: stopped 11 | -------------------------------------------------------------------------------- /ansible/roles/fluentd-jitsi/templates/config.oci.j2: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | user={{ oci_logging_user_id }} 3 | fingerprint={{ oci_logging_private_key_fingerprint }} 4 | tenancy={{ oci_logging_tenancy }} 5 | region={{ oci_logging_region }} 6 | key_file=/var/lib/td-agent/.oci/private.pem 7 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-jigasi/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | haproxy_conf_path: /etc/haproxy 3 | haproxy_jigasi_agent_enabled: false 4 | haproxy_jigasi_max_servers: 500 5 | haproxy_jigasi_path_to_config_script: /usr/local/bin/configure-haproxy-jigasi.sh 6 | haproxy_jigasi_servers: [] 7 | -------------------------------------------------------------------------------- /ansible/roles/jiconop/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jiconop_bosh_url: "http://localhost/http-bind" 3 | jiconop_configure_flag: true 4 | jiconop_deb_pkg_name: "jiconop" 5 | jiconop_deb_pkg_version: "*" 6 | jiconop_enabled: true 7 | jiconop_install_flag: true 8 | jiconop_port: 9615 9 | -------------------------------------------------------------------------------- /ansible/roles/nginx/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart nginx 3 | ansible.builtin.service: 4 | name: nginx 5 | state: restarted 6 | enabled: true 7 | 8 | - name: Stop nginx 9 | ansible.builtin.service: 10 | name: nginx 11 | state: stopped 12 | -------------------------------------------------------------------------------- /ansible/roles/jibri-java/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: jitsi-repo, when: jibri_install_flag } 4 | - { role: chromedriver, when: jibri_install_flag } 5 | - { role: google-chrome, when: jibri_install_flag} 6 | - { role: openjdk-java, when: jibri_install_flag } -------------------------------------------------------------------------------- /ansible/roles/jicofo/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include install tasks 3 | ansible.builtin.include_tasks: install.yml 4 | when: jicofo_install_flag 5 | 6 | - name: Include configure tasks 7 | ansible.builtin.include_tasks: configure.yml 8 | when: jicofo_configure_flag 9 | -------------------------------------------------------------------------------- /ansible/roles/jiconop/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include install tasks 3 | ansible.builtin.include_tasks: install.yml 4 | when: jiconop_install_flag 5 | 6 | - name: Include configure tasks 7 | ansible.builtin.include_tasks: configure.yml 8 | when: jiconop_configure_flag 9 | -------------------------------------------------------------------------------- /ansible/roles/jigasi/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include install tasks 3 | ansible.builtin.include_tasks: install.yml 4 | when: jigasi_install_flag 5 | 6 | - name: Include configure tasks 7 | ansible.builtin.include_tasks: configure.yml 8 | when: jigasi_configure_flag 9 | -------------------------------------------------------------------------------- /ansible/roles/fluentd-jitsi/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include install steps 3 | ansible.builtin.include_tasks: install.yml 4 | when: fluentd_install_flag 5 | 6 | - name: Include configure steps 7 | ansible.builtin.include_tasks: configure.yml 8 | when: fluentd_configure_flag 9 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-tenant-pin/templates/config.j2: -------------------------------------------------------------------------------- 1 | TENANT_MAP_PATH={{ haproxy_tenant_pin_map_path }} 2 | CONSUL_URL=http://localhost:8500 3 | ENVIRONMENT={{ hcv_environment }} 4 | DAEMON_MODE=True 5 | DAEMON_TICK_DURATION={{ haproxy_tenant_pin_tick_duration }} 6 | STATSD_ENABLED=True 7 | -------------------------------------------------------------------------------- /ansible/roles/jiconop/tasks/configure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # eventually build a config file for jiconop 3 | - name: Install jiconop configuration file 4 | ansible.builtin.template: 5 | mode: 0644 6 | src: "config.j2" 7 | dest: "/etc/jitsi/jiconop/config" 8 | notify: Restart jiconop 9 | -------------------------------------------------------------------------------- /ansible/roles/consul-selenium-grid-hub/templates/selenium-grid-hub.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "service": { 3 | "name": "selenium-grid-hub", 4 | "tags":["{{ selenium_grid_name }}"], 5 | "meta": { 6 | "grid":"{{ selenium_grid_name }}" 7 | }, 8 | "port": 5555 9 | } 10 | } -------------------------------------------------------------------------------- /ansible/roles/jitsi-meet/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart nginx 3 | service: name=nginx state=restarted enabled=yes 4 | 5 | - name: reload nginx 6 | service: name=nginx state=reloaded 7 | 8 | - name: reload prosody plugins 9 | service: name=prosody state=restarted 10 | -------------------------------------------------------------------------------- /ansible/roles/testrtc/templates/testrtc.systemd.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=TestRTC service at port {{ testrtc_port }} 3 | 4 | [Service] 5 | WorkingDirectory={{ testrtc_base_path }} 6 | Type=idle 7 | ExecStart=dev_appserver.py --port {{ testrtc_port }} --enable_host_checking false out/app.yaml 8 | -------------------------------------------------------------------------------- /ansible/roles/google-cloud/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include install tasks 3 | ansible.builtin.include_tasks: install.yml 4 | when: google_cloud_install_flag 5 | 6 | - name: Include configure tasks 7 | ansible.builtin.include_tasks: configure.yml 8 | when: google_cloud_configure_flag 9 | -------------------------------------------------------------------------------- /ansible/roles/jigasi/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Restart jigasi 2 | ansible.builtin.service: 3 | name: jigasi 4 | state: restarted 5 | when: not jigasi_configure_only_flag 6 | 7 | - name: Perform systemctl daemon-reload 8 | ansible.builtin.systemd: 9 | daemon_reload: true 10 | -------------------------------------------------------------------------------- /ansible/roles/jvb-rtcstats-push/templates/rsyslog.config.j2: -------------------------------------------------------------------------------- 1 | template(name="SimpleJSONFormat" type="string" string= "%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n") 2 | if $programname == '{{ jvb_rtcstats_push_service_name }}' then {{ jvb_rtcstats_push_log_dir }}/agent.log;SimpleJSONFormat 3 | & stop -------------------------------------------------------------------------------- /ansible/roles/sip-jibri-sidecar/templates/rsyslog.config.j2: -------------------------------------------------------------------------------- 1 | template(name="SimpleJSONFormat" type="string" string= "%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n") 2 | if $programname == '{{ sip_jibri_sidecar_service_name }}' then {{ sip_jibri_log_dir }}/sidecar.log;SimpleJSONFormat 3 | & stop 4 | 5 | -------------------------------------------------------------------------------- /ansible/roles/autoscaler-sidecar/templates/rsyslog.config.j2: -------------------------------------------------------------------------------- 1 | template(name="SimpleJSONFormat" type="string" string= "%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n") 2 | if $programname == '{{ autoscaler_sidecar_service_name }}' then {{ autoscaler_log_dir }}/sidecar.log;SimpleJSONFormat 3 | & stop 4 | 5 | -------------------------------------------------------------------------------- /ansible/roles/coturn/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart coturn 3 | ansible.builtin.service: 4 | name: coturn 5 | state: restarted 6 | 7 | - name: Restart coturn systemd 8 | ansible.builtin.systemd: 9 | name: coturn 10 | state: restarted 11 | daemon_reload: true 12 | -------------------------------------------------------------------------------- /ansible/roles/jigasi-haproxy-agent/templates/rsyslog.config.j2: -------------------------------------------------------------------------------- 1 | template(name="SimpleJSONFormat" type="string" string= "%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n") 2 | if $programname == '{{ jigasi_haproxy_agent_service_name }}' then {{ jigasi_haproxy_agent_log_dir }}/agent.log;SimpleJSONFormat 3 | & stop -------------------------------------------------------------------------------- /ansible/roles/jigasi-rtcstats-push/templates/rsyslog.config.j2: -------------------------------------------------------------------------------- 1 | template(name="SimpleJSONFormat" type="string" string= "%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n") 2 | if $programname == '{{ jigasi_rtcstats_push_service_name }}' then {{ jigasi_rtcstats_push_log_dir }}/agent.log;SimpleJSONFormat 3 | & stop -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge-ddns/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include install tasks 3 | ansible.builtin.include_tasks: install.yml 4 | when: jvb_ddns_install_flag 5 | 6 | - name: Include configure tasks 7 | ansible.builtin.include_tasks: configure.yml 8 | when: jvb_ddns_configure_flag 9 | -------------------------------------------------------------------------------- /ansible/roles/monit/files/modebug: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | { 3 | echo "MONIT-WRAPPER date" 4 | date 5 | echo "MONIT-WRAPPER env" 6 | env 7 | echo "MONIT-WRAPPER $@" 8 | $@ 9 | R=$? 10 | echo "MONIT-WRAPPER exit code $R" 11 | } 2>&1 >> /var/log/monit-debug.log -------------------------------------------------------------------------------- /ansible/roles/nginx/files/status_server: -------------------------------------------------------------------------------- 1 | server { 2 | listen 127.0.0.1:888; 3 | listen [::1]:888; 4 | server_name localhost; 5 | location /nginx_status { 6 | stub_status on; 7 | access_log off; 8 | allow 127.0.0.1; 9 | deny all; 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /ansible/roles/signal-sidecar/templates/rsyslog.config.j2: -------------------------------------------------------------------------------- 1 | template(name="SimpleJSONFormat" type="string" string= "%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n") 2 | if $programname == '{{ signal_sidecar_service_name }}' then {{ signal_sidecar_log_dir }}/signal-sidecar.log;SimpleJSONFormat 3 | & stop 4 | -------------------------------------------------------------------------------- /ansible/configure-users.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Main 3 | hosts: all 4 | become_user: root 5 | become: true 6 | gather_facts: true 7 | vars_files: 8 | - secrets/ssh-users.yml 9 | roles: 10 | - { role: "sshusers", tags: "users", ssh_users_accounts_flag: true, ssh_users_config_flag: false } 11 | -------------------------------------------------------------------------------- /ansible/roles/jenkins/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jenkins_sitename: "jenkins-opsdev.jitsi.net" 3 | jenkins_ssl_certificate: "{{ jitsi_net_ssl_certificate }}{{ jitsi_net_ssl_extras }}" 4 | jenkins_ssl_dest_dir: /etc/nginx/ssl 5 | jenkins_ssl_key_name: "{{ jitsi_net_ssl_key_name }}" 6 | jenkins_install_flag: true 7 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-meet/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | shard_region: "{{ ansible_ec2_placement_region if ansible_ec2_placement_region is defined and ansible_ec2_placement_region 3 | else oracle_to_aws_region_map[oracle_region] if oracle_region is defined and oracle_region in oracle_to_aws_region_map else 'default' }}" 4 | -------------------------------------------------------------------------------- /ansible/roles/jvb-rtcstats-push/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include install tasks 3 | ansible.builtin.include_tasks: install.yml 4 | when: jvb_rtcstats_push_install_flag 5 | 6 | - name: Include configure tasks 7 | ansible.builtin.include_tasks: configure.yml 8 | when: jvb_rtcstats_push_configure_flag 9 | -------------------------------------------------------------------------------- /ansible/roles/wavefront/tasks/proxy/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Wavefront Proxy (RedHat) 3 | ansible.builtin.yum: 4 | name: "{{ wavefront_proxy_pkg }}" 5 | state: present 6 | register: result 7 | until: result.rc == 0 8 | tags: 9 | - install 10 | - redhat 11 | - proxy 12 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-tenant-pin/templates/rsyslog.config.j2: -------------------------------------------------------------------------------- 1 | template(name="SimpleJSONFormat" type="string" string= "%timegenerated% %msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\n") 2 | if $programname == '{{ haproxy_tenant_pin_service_name }}' then {{ haproxy_tenant_pin_log_dir }}/tenant-pin.log;SimpleJSONFormat 3 | & stop 4 | -------------------------------------------------------------------------------- /ansible/roles/jigasi-haproxy-agent/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include install tasks 3 | ansible.builtin.include_tasks: install.yml 4 | when: jigasi_haproxy_agent_install_flag 5 | 6 | - name: Include configure tasks 7 | ansible.builtin.include_tasks: configure.yml 8 | when: jigasi_haproxy_agent_configure_flag 9 | -------------------------------------------------------------------------------- /ansible/roles/jigasi-rtcstats-push/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include install tasks 3 | ansible.builtin.include_tasks: install.yml 4 | when: jigasi_rtcstats_push_install_flag 5 | 6 | - name: Include configure tasks 7 | ansible.builtin.include_tasks: configure.yml 8 | when: jigasi_rtcstats_push_configure_flag 9 | -------------------------------------------------------------------------------- /ansible/roles/consul-standalone/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | consul_standalone_cloud_provider: "{{ cloud_provider | default('aws') }}" 3 | consul_standalone_private_ip: "{{ ansible_default_ipv4.address }}" 4 | consul_standalone_public_ip: "{{ oracle_public_ip if consul_standalone_cloud_provider == 'oracle' else ansible_ec2_public_ipv4 }}" 5 | -------------------------------------------------------------------------------- /ansible/roles/jicofo/templates/jicofo-stats.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # pull in environment vars needed for reporting 3 | {% if cloud_provider == 'oracle' -%} 4 | . /usr/local/bin/oracle_cache.sh 5 | {% else -%} 6 | . /usr/local/bin/aws_cache.sh 7 | {% endif -%} 8 | 9 | # pull and report stats 10 | /usr/local/bin/jicofo-stats.py 11 | -------------------------------------------------------------------------------- /ansible/roles/testrtc/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Generate testRTC credentials # noqa ignore-errors 3 | ansible.builtin.command: /usr/local/bin/testrtc_credentials 4 | ignore_errors: true 5 | 6 | - name: Reload nginx 7 | ansible.builtin.service: 8 | name: nginx 9 | enabled: true 10 | state: reloaded 11 | -------------------------------------------------------------------------------- /ansible/roles/wavefront/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart wavefront-proxy 3 | service: 4 | name: wavefront-proxy 5 | state: restarted 6 | enabled: yes 7 | become: yes 8 | 9 | - name: restart telegraf 10 | service: 11 | name: telegraf 12 | state: restarted 13 | enabled: yes 14 | become: yes 15 | -------------------------------------------------------------------------------- /ansible/roles/jibri-kernel/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jibri_kernel_cloud_provider: "{{ cloud_provider | default('aws') }}" 3 | jibri_kernel_clean_aws: true 4 | jibri_kernel_headers_package: "linux-headers-{{ jibri_kernel_package_type }}" 5 | jibri_kernel_package: "linux-image-{{ jibri_kernel_package_type }}" 6 | jibri_kernel_package_type: virtual 7 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-torture/files/auth0-authenticate/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "auth0-authenticate", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "auth0-authenticate.js", 6 | "keywords": [], 7 | "author": "", 8 | "license": "ISC", 9 | "dependencies": { 10 | "set-cookie-parser": "^2.4.7" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /ansible/roles/wavefront/tasks/proxy/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Wavefront Proxy (Ubuntu) 3 | ansible.builtin.apt: 4 | name: "{{ wavefront_proxy_pkg }}={{ wavefront_proxy_version }}" 5 | state: present 6 | register: result 7 | until: result is succeeded 8 | tags: 9 | - install 10 | - debian 11 | - proxy 12 | -------------------------------------------------------------------------------- /ansible/roles/autoscaler-sidecar/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install autoscaler 3 | ansible.builtin.include_tasks: install.yml 4 | when: autoscaler_install_flag and jitsi_autoscaler_sidecar 5 | 6 | - name: Configure autoscaler 7 | ansible.builtin.include_tasks: configure.yml 8 | when: autoscaler_configure_flag and jitsi_autoscaler_sidecar 9 | -------------------------------------------------------------------------------- /ansible/roles/testrtc/templates/testrtc.upstart.j2: -------------------------------------------------------------------------------- 1 | description "TestRTC service at port {{ testrtc_port }}" 2 | 3 | stop on runlevel [06] 4 | 5 | respawn 6 | script 7 | chdir {{ testrtc_base_path }} 8 | echo $$ > /var/run/testrtc.pid 9 | exec dev_appserver.py --port {{ testrtc_port }} --enable_host_checking false out/app.yaml 10 | end script -------------------------------------------------------------------------------- /ansible/roles/wavefront/tasks/telegraf/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install telegraf agent/collector (RedHat) 3 | ansible.builtin.yum: 4 | name: "{{ wavefront_collector }}" 5 | state: present 6 | update_cache: true 7 | register: result 8 | until: result.rc == 0 9 | tags: 10 | - install 11 | - redhat 12 | - collector 13 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-lua/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | haproxy_lua_lua_version: 5.3 3 | haproxy_lua_rocks_checksum: eb20cd9814df05535d9aae98da532217c590fc07d48d90ca237e2a7cdcf284fe 4 | haproxy_lua_rocks_url: "https://luarocks.org/releases/luarocks-{{ haproxy_lua_rocks_version }}.tar.gz" 5 | haproxy_lua_rocks_version: 3.3.1 6 | haproxy_lua_src_dir: /tmp/luarocks 7 | -------------------------------------------------------------------------------- /ansible/roles/hcv-haproxy-status/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | hcv_environment: all 3 | hcv_haproxy_status_snapshot: false 4 | hcv_haproxy_status_path: "../../haproxy-status/{{ hcv_environment }}{{ '_snapshot' if hcv_haproxy_status_snapshot == 'true' else '' }}" 5 | hcv_haproxy_status_lock_file: '/tmp/haproxy-status.lock' 6 | hcv_haproxy_status_ignore_lock: false 7 | -------------------------------------------------------------------------------- /ansible/roles/consul-agent/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install consul config file 3 | ansible.builtin.template: 4 | mode: 0640 5 | src: "consul.hcl.j2" 6 | dest: "/etc/consul.d/consul.hcl" 7 | 8 | - name: Install consul env file 9 | ansible.builtin.copy: 10 | mode: 0640 11 | content: "" 12 | dest: "/etc/consul.d/consul.env" 13 | -------------------------------------------------------------------------------- /ansible/roles/consul-agent/templates/consul.hcl.j2: -------------------------------------------------------------------------------- 1 | datacenter = "{{ consul_datacenter }}" 2 | data_dir = "/opt/consul" 3 | bind_addr = "{{ ansible_default_ipv4.address }}" 4 | advertise_addr = "{{ ansible_default_ipv4.address }}" 5 | {% if consul_encryption_key %} 6 | encrypt = "{{ consul_encryption_key }}" 7 | {% endif %} 8 | retry_join ={{ consul_retry_join_lan }} 9 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-tenant-pin/defaults/main.yml: -------------------------------------------------------------------------------- 1 | haproxy_tenant_pin_configure_flag: false 2 | haproxy_tenant_pin_enabled: false 3 | haproxy_tenant_pin_install_flag: false 4 | haproxy_tenant_pin_log_dir: "/var/log" 5 | haproxy_tenant_pin_map_path: /etc/haproxy/maps/tenant.map 6 | haproxy_tenant_pin_service_name: "tenant-pin" 7 | haproxy_tenant_pin_tick_duration: 5 8 | -------------------------------------------------------------------------------- /ansible/roles/jigasi/templates/monitor-terminating-instance.conf.j2: -------------------------------------------------------------------------------- 1 | description "Terminating Instance Monitor" 2 | 3 | start on filesystem and net-device-up IFACE={{ ansible_default_ipv4.interface }} 4 | 5 | stop on runlevel [016] 6 | 7 | respawn 8 | chdir /usr/local/bin 9 | 10 | exec /usr/local/bin/monitor-terminating-instance.sh 11 | respawn limit 10 90 -------------------------------------------------------------------------------- /ansible/roles/selenium-grid/templates/selenium-xvfb.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Selenium Xvfb 3 | After=network.target 4 | 5 | [Service] 6 | User={{ selenium_grid_username }} 7 | Group={{ selenium_grid_groupname }} 8 | ExecStart=/usr/bin/Xvfb :99 -screen 0 1024x768x24 9 | Restart=on-failure 10 | RestartPreventExitStatus=255 11 | Type=simple 12 | 13 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge/files/monitor-terminating-instance.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=JVB Terminating Instance Monitor 3 | After=network.target 4 | 5 | [Service] 6 | WorkingDirectory=/usr/local/bin 7 | ExecStart=/usr/local/bin/monitor-terminating-instance.sh 8 | Restart=on-failure 9 | Type=simple 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge/templates/monitor-terminating-instance.conf.j2: -------------------------------------------------------------------------------- 1 | description "Terminating Instance Monitor" 2 | 3 | start on filesystem and net-device-up IFACE={{ ansible_default_ipv4.interface }} 4 | 5 | stop on runlevel [016] 6 | 7 | respawn 8 | chdir /usr/local/bin 9 | 10 | exec /usr/local/bin/monitor-terminating-instance.sh 11 | respawn limit 10 90 -------------------------------------------------------------------------------- /ansible/roles/coturn/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Aaron van Meerten 4 | description: configure coturn 5 | license: Apache License Version 2.0 6 | min_ansible_version: '6.6.0' 7 | platforms: 8 | - name: Ubuntu 9 | releases: 10 | - focal 11 | - jammy 12 | dependencies: 13 | - {role: jitsi-repo} 14 | - {role: monit} 15 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge/files/reconfigure-jvb-oracle.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #first rebuild the configuration files 4 | CONFIGURE_ONLY=true ANSIBLE_TAGS="setup,jitsi-videobridge" /usr/local/bin/configure-jvb-local-oracle.sh 5 | 6 | echo "JVB configuration signaling" 7 | /usr/local/bin/configure-jvb-shards.sh 8 | RET=$? 9 | echo "JVB reconfiguration completed" 10 | exit $RET -------------------------------------------------------------------------------- /ansible/roles/jibri-java/templates/monitor-terminating-instance-systemd.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Jibri Terminating Instance Monitor 3 | After=network.target 4 | 5 | [Service] 6 | WorkingDirectory={{ jibri_scripts_dir }} 7 | ExecStart={{ jibri_scripts_dir }}/monitor-terminating-instance.sh 8 | Restart=on-failure 9 | Type=simple 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /ansible/roles/consul-jigasi/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Consul service file 3 | ansible.builtin.template: 4 | mode: 0644 5 | src: "jigasi.json.j2" 6 | dest: "/etc/consul.d/jigasi.json" 7 | 8 | - name: Enable consul service # noqa ignore-errors 9 | ansible.builtin.systemd: 10 | name: consul 11 | state: started 12 | enabled: true 13 | ignore_errors: true 14 | -------------------------------------------------------------------------------- /ansible/roles/jibri-java/templates/graceful_shutdown_terminate_oracle.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | GRACEFUL_SHUTDOWN="/opt/jitsi/jibri/wait_graceful_shutdown.sh" 3 | TERMINATE_INSTANCE="{{ jibri_path_to_terminate_instance_script }}" 4 | 5 | # run the graceful shutdown and wait for it to finish 6 | sudo "$GRACEFUL_SHUTDOWN" 7 | 8 | # now terminate the instance 9 | sudo "$TERMINATE_INSTANCE" 10 | 11 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge/files/reconfigure-jvb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #first rebuild the configuration files 4 | CONFIGURE_ONLY=true ANSIBLE_TAGS="setup,jitsi-videobridge" /usr/local/bin/configure-jvb-local.sh 5 | 6 | echo "JVB configuration signaling" 7 | #now gracefully reload jibri 8 | /usr/local/bin/configure-jvb-shards.sh 9 | RET=$? 10 | echo "JVB reload completed" 11 | exit $RET -------------------------------------------------------------------------------- /ansible/roles/signal-sidecar/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include install tasks 3 | ansible.builtin.include_tasks: install.yml 4 | when: 5 | - signal_sidecar_install_flag 6 | - signal_sidecar_enabled 7 | 8 | - name: Include configure tasks 9 | ansible.builtin.include_tasks: configure.yml 10 | when: 11 | - signal_sidecar_configure_flag 12 | - signal_sidecar_enabled 13 | -------------------------------------------------------------------------------- /ansible/roles/consul-haproxy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install consul service file 3 | ansible.builtin.template: 4 | mode: 0644 5 | src: "haproxy.json.j2" 6 | dest: "/etc/consul.d/haproxy.json" 7 | 8 | - name: Enable consul service # noqa ignore-errors 9 | ansible.builtin.systemd: 10 | name: consul 11 | state: started 12 | enabled: true 13 | ignore_errors: true 14 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-repo/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jitsi_repo_host: internal-repo.jitsi.net 3 | jitsi_repo_password: "default" 4 | jitsi_repo_url: "https://{{ jitsi_repo_host }}/debian" 5 | jitsi_auth_url: "https://{{ jitsi_repo_username }}:{{ jitsi_repo_password }}@{{ jitsi_repo_host }}/debian" 6 | jitsi_auth_url_old: "https://jitsi:j1ts1r3p0@{{ jitsi_repo_host }}/debian" 7 | jitsi_repo_username: "repo" 8 | -------------------------------------------------------------------------------- /ansible/roles/rsyslog/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart rsyslog 3 | tags: 4 | - rsyslog 5 | - configuration 6 | ansible.builtin.service: 7 | name: "{{ rsyslog_service_name }}" 8 | state: restarted 9 | 10 | - name: Reload apparmor 11 | tags: 12 | - rsyslog 13 | - configuration 14 | ansible.builtin.service: 15 | name: apparmor 16 | state: reloaded 17 | -------------------------------------------------------------------------------- /ansible/roles/consul-haproxy/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: consul_haproxy 4 | author: Aaron van Meerten 5 | description: configure consul for haproxy 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - {role: consul-agent} 15 | -------------------------------------------------------------------------------- /ansible/jenkins-server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Main 3 | hosts: all 4 | gather_facts: true 5 | become_user: root 6 | become: true 7 | vars_files: 8 | - secrets/ssh-users.yml 9 | - secrets/ssl-certificates.yml 10 | 11 | roles: 12 | - { role: "iptables-jenkins", tags: "iptables"} 13 | - { role: "jenkins", tags: "jenkins"} 14 | - { role: "jenkins-sshkey", tags: "jenkins-sshkey"} 15 | -------------------------------------------------------------------------------- /ansible/roles/consul-standalone/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install consul service file 3 | ansible.builtin.template: 4 | mode: 0644 5 | src: "standalone.json.j2" 6 | dest: "/etc/consul.d/standalone.json" 7 | 8 | - name: Enable consul service # noqa ignore-errors 9 | ansible.builtin.systemd: 10 | name: consul 11 | state: started 12 | enabled: true 13 | ignore_errors: true 14 | -------------------------------------------------------------------------------- /ansible/roles/hcv-haproxy-rsyslog/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Suppress recurring drain log message from agent 3 | ansible.builtin.lineinfile: 4 | dest: /etc/rsyslog.d/49-haproxy.conf 5 | insertafter: "^$$AddUnixListenSocket \/var\/lib/haproxy\/dev\/log$" 6 | line: ":msg, contains, \"remains in forced drain mode.\" stop" 7 | when: haproxy_reconfigure_rsyslog 8 | notify: Restart haproxy rsyslog 9 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = False 3 | gathering = smart 4 | fact_caching = jsonfile 5 | fact_caching_connection = .facts 6 | fact_caching_timeout = 86400 7 | timeout = 90 8 | vault_password_file = .vault-password.txt 9 | 10 | [ssh_connection] 11 | control_path = %(directory)s/%%h-%%r 12 | ssh_args = -o ControlPersist=15m -F config/ssh.config -q 13 | scp_if_ssh = True 14 | pipelining = True 15 | -------------------------------------------------------------------------------- /ansible/roles/consul-server/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: consul_server 4 | author: Aaron van Meerten 5 | description: install and configure consul server 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - { role: consul-install } 15 | -------------------------------------------------------------------------------- /ansible/roles/coturn/templates/check-files-limits-oracle.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | open_files_limit=$(cat /etc/default/coturn|grep ulimit|cut -d' ' -f3) 4 | currently_opened_files_count=$(lsof -p $(cat /var/run/turnserver.pid)|wc -l) 5 | 6 | if [[ $currently_opened_files_count -ge $open_files_limit ]]; then 7 | # we do not set alarms for oracle, just return error 8 | exit 1 9 | else 10 | exit 0 11 | fi -------------------------------------------------------------------------------- /ansible/roles/haproxy-tenant-pin/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include install tasks 3 | ansible.builtin.include_tasks: install.yml 4 | when: 5 | - haproxy_tenant_pin_install_flag 6 | - haproxy_tenant_pin_enabled 7 | 8 | - name: Include configure tasks 9 | ansible.builtin.include_tasks: configure.yml 10 | when: 11 | - haproxy_tenant_pin_configure_flag 12 | - haproxy_tenant_pin_enabled 13 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-tenant-pin/tasks/configure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Generate config file 3 | ansible.builtin.template: 4 | mode: 0644 5 | src: config.j2 6 | dest: '/etc/{{ haproxy_tenant_pin_service_name }}.conf' 7 | notify: 8 | - Restart tenant-pin 9 | 10 | - name: Start tenant-pin service 11 | ansible.builtin.service: 12 | name: tenant-pin 13 | state: started 14 | enabled: true 15 | -------------------------------------------------------------------------------- /ansible/roles/consul-haproxy-jigasi/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: consul_haproxy_jigasi 4 | author: Aaron van Meerten 5 | description: configure consul on jigasi haproxy 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - {role: consul-agent} 15 | -------------------------------------------------------------------------------- /ansible/roles/unattended-upgrades/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | unattended_upgrades_auto_reboot: false 3 | unattended_upgrades_auto_remove: false 4 | unattended_upgrades_email_address: root@localhost 5 | unattended_upgrades_origins: 6 | - '${distro_id} ${distro_codename}-security' 7 | unattended_upgrades_send_email: false 8 | unattended_upgrades_update_package_lists_interval: 1 9 | unattended_upgrades_upgrade_interval: 1 10 | -------------------------------------------------------------------------------- /ansible/roles/consul-jigasi/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: consul_jigasi 4 | author: Aaron van Meerten 5 | description: configure consul for jigasi 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - {role: consul-agent, consul_install_flag: false} 15 | -------------------------------------------------------------------------------- /ansible/roles/consul-selenium-grid-hub/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install consul service file 3 | ansible.builtin.template: 4 | mode: 0644 5 | src: "selenium-grid-hub.json.j2" 6 | dest: "/etc/consul.d/selenium-grid-hub.json" 7 | 8 | - name: Enable consul service # noqa ignore-errors 9 | ansible.builtin.systemd: 10 | name: consul 11 | state: started 12 | enabled: true 13 | ignore_errors: true 14 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-dumper/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jitsi_dump_cloud_provider: "{{ cloud_provider | default('aws') }}" 3 | jitsi_dump_jibri_sns_topic: arn:aws:sns:us-west-2:103425057857:Jibri-Dumps 4 | jitsi_dump_s3_bucket: jitsi-infra-dumps 5 | jitsi_dump_sns_region: us-west-2 6 | jitsi_dump_sns_topic: arn:aws:sns:us-west-2:103425057857:JVB-Dumps 7 | # disable dump jvb script by default 8 | jitsi_dumper_prosody_dump_jvb_mode: 0644 9 | -------------------------------------------------------------------------------- /ansible/roles/coturn/templates/check-files-limits.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | open_files_limit=$(cat /etc/default/coturn|grep ulimit|cut -d' ' -f3) 4 | currently_opened_files_count=$(lsof -p $(cat /var/run/turnserver.pid)|wc -l) 5 | 6 | if [[ $currently_opened_files_count -ge $open_files_limit ]]; then 7 | {{coturn_scripts_path}}/coturn-set-alarms.sh alarm CoturnOpenedFilesLimitFailed 8 | exit 1 9 | else 10 | exit 0 11 | fi -------------------------------------------------------------------------------- /ansible/roles/prosody/templates/jvb_muc_presence_filter.pfw.j2: -------------------------------------------------------------------------------- 1 | # Drop all presence from a jvb in a MUC to a jvb 2 | FROM: {{ prosody_jvb_brewery_muc }} 3 | TO: {{ prosody_jvb_auth_user }}@{{ prosody_jvb_auth_domain_name }} 4 | KIND: presence 5 | # Seems safer to allow all "unavailable" to pass 6 | TYPE: available 7 | # Allow self-presence (code=110) 8 | NOT INSPECT: {http://jabber.org/protocol/muc#user}x/status@code=110 9 | DROP. 10 | -------------------------------------------------------------------------------- /ansible/jvb-colibri-proxy-nginx.yaml: -------------------------------------------------------------------------------- 1 | - name: Main 2 | hosts: all 3 | gather_facts: true 4 | become_user: root 5 | become: true 6 | force_handlers: true 7 | gather_timeout: 180 8 | vars_files: 9 | - config/vars.yml 10 | - sites/{{ hcv_environment }}/vars.yml 11 | vars: 12 | cloud_provider: oracle 13 | shard_role: haproxy 14 | roles: 15 | - { role: "jvb-colibri-proxy", "tags": "jvb-colibri-proxy"} 16 | -------------------------------------------------------------------------------- /ansible/roles/consul-agent/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: consul_agent 4 | author: Aaron van Meerten 5 | description: install and configure consul agent 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - { role: consul-install, when: consul_install_flag } 15 | -------------------------------------------------------------------------------- /ansible/roles/consul-signal/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: consul_server_signal 4 | author: Aaron van Meerten 5 | description: install and configure consul server for signal ndoes 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - { role: consul-agent } 15 | -------------------------------------------------------------------------------- /ansible/roles/consul-standalone/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: consul_standalone 4 | author: Aaron van Meerten 5 | description: install and configure consul server for standalone nodes 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - {role: consul-agent} 15 | -------------------------------------------------------------------------------- /ansible/roles/jibri-pjsua/templates/jibri-icewm2.systemd.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Jibri Window Manager 3 | Requires=jibri-xorg2.service 4 | After=jibri-xorg2.service 5 | 6 | [Service] 7 | User={{ jibri_username }} 8 | Group={{ jibri_groupname }} 9 | Environment=DISPLAY=:1 10 | ExecStart=/usr/bin/icewm-session 11 | Restart=on-failure 12 | RestartPreventExitStatus=255 13 | Type=simple 14 | 15 | [Install] 16 | WantedBy=jibri.service 17 | -------------------------------------------------------------------------------- /ansible/roles/jigasi-web/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: jigasi_web 4 | author: Aaron van Meerten 5 | description: configure web server for jigasi user output 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - { role: nginx, when: jigasi_web_install_flag } 15 | -------------------------------------------------------------------------------- /ansible/roles/jigasi/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Aaron van Meerten 4 | description: configure jigasi 5 | license: Apache License Version 2.0 6 | min_ansible_version: '6.6.0' 7 | platforms: 8 | - name: Ubuntu 9 | releases: 10 | - focal 11 | - jammy 12 | dependencies: 13 | - { role: jitsi-repo, when: jigasi_install_flag } 14 | - { role: openjdk-java, when: jigasi_install_flag } 15 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-torture/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: jitsi_torture 4 | author: Aaron van Meerten 5 | description: install and run jitsi torture 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - { role: google-chrome } 15 | - { role: chromedriver } 16 | -------------------------------------------------------------------------------- /ansible/roles/ntp/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "René Moser" 4 | license: BSD 5 | description: NTP role 6 | min_ansible_version: '1.4' 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - precise 11 | - quantal 12 | - raring 13 | - saucy 14 | - name: Debian 15 | versions: 16 | - wheezy 17 | galaxy_tags: 18 | - system 19 | dependencies: [] 20 | -------------------------------------------------------------------------------- /ansible/roles/selenium-grid/templates/selenium-grid-hub.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Selenium Grid 3 | After=network.target 4 | 5 | [Service] 6 | User={{ selenium_grid_username }} 7 | Group={{ selenium_grid_groupname }} 8 | ExecStart={{ selenium_grid_java_path }} -jar {{ selenium_grid_file }} -role hub -hubConfig {{ selenium_grid_hub_config_file }} 9 | Restart=on-failure 10 | RestartPreventExitStatus=255 11 | Type=simple 12 | 13 | -------------------------------------------------------------------------------- /ansible/roles/consul-selenium-grid-hub/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: consul_selenium_grid_hub 4 | author: Aaron van Meerten 5 | description: install and configure consul for selenium grid 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - { role: consul-agent } 15 | -------------------------------------------------------------------------------- /ansible/roles/nodejs/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Mark Wolfe 4 | description: Installs the NodeSource Node.js binary packages 5 | company: NodeSource 6 | license: MIT 7 | min_ansible_version: 1.2 8 | platforms: 9 | - name: Ubuntu 10 | versions: 11 | - precise 12 | - trusty 13 | categories: 14 | - development 15 | - networking 16 | - packaging 17 | - web 18 | dependencies: [] 19 | -------------------------------------------------------------------------------- /ansible/roles/selenium-grid/templates/selenium-grid-extras-hub.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Selenium Grid 3 | After=network.target 4 | 5 | [Service] 6 | User={{ selenium_grid_username }} 7 | Group={{ selenium_grid_groupname }} 8 | ExecStart={{ selenium_grid_java_path }} -jar {{ selenium_grid_extras_file }} 9 | Restart=on-failure 10 | RestartPreventExitStatus=255 11 | Type=simple 12 | WorkingDirectory={{ selenium_grid_extras_path }} 13 | -------------------------------------------------------------------------------- /ansible/roles/selenium-grid/templates/selenium-grid-hub.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "port": 4444, 3 | "newSessionWaitTimeout": -1, 4 | "servlets" : [], 5 | "withoutServlets": [], 6 | "custom": {}, 7 | "capabilityMatcher": "org.openqa.grid.internal.utils.DefaultCapabilityMatcher", 8 | "throwOnCapabilityNotPresent": true, 9 | "cleanUpCycle": 5000, 10 | "role": "hub", 11 | "debug": false, 12 | "browserTimeout": 60, 13 | "timeout": 60 14 | } -------------------------------------------------------------------------------- /ansible/roles/jigasi-haproxy-agent/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: jigasi_haproxy_agent 4 | author: Aaron van Meerten 5 | description: configure haproxy agent for jigasi 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - { role: nodejs, when: jigasi_haproxy_agent_install_flag } 15 | -------------------------------------------------------------------------------- /ansible/roles/jigasi-rtcstats-push/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: jigasi_rtcstats_push 4 | author: Aaron van Meerten 5 | description: configure rtcstats push for jigasi 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - { role: nodejs, when: jigasi_rtcstats_push_install_flag } 15 | -------------------------------------------------------------------------------- /ansible/roles/jigasi/files/monitor-terminating-instance.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Jigasi Terminating Instance Monitor 3 | After=network.target 4 | 5 | [Service] 6 | WorkingDirectory=/usr/local/bin 7 | ExecStart=/usr/local/bin/monitor-terminating-instance.sh 8 | Restart=on-failure 9 | Type=simple 10 | StandardOutput=syslog 11 | StandardError=syslog 12 | SyslogIdentifier=monitor-terminating-instance 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge/files/configure-jvb-oracle.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | 4 | import json 5 | 6 | # Oracle does not use multiple shards 7 | # Create dummy shard details for now 8 | def main(): 9 | local_shard = 'standalone' 10 | facts = { 11 | 'shard': local_shard 12 | } 13 | facts['shards'] = {local_shard: dict(facts)} 14 | 15 | print(json.dumps(facts)) 16 | 17 | if __name__ == '__main__': 18 | main() 19 | -------------------------------------------------------------------------------- /ansible/roles/common/files/download.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ $# -ne 1 ]; then 3 | echo "Usage: download.sh file ..." 4 | exit 1 5 | fi 6 | for fn in "$@" 7 | do 8 | if [ -r "$fn" ] ; then 9 | printf '\033]1337;File=name='`echo -n "$fn" | base64`";" 10 | wc -c "$fn" | awk '{printf "size=%d",$1}' 11 | printf ":" 12 | base64 < "$fn" 13 | printf '\a' 14 | else 15 | echo File $fn does not exist or is not readable. 16 | fi 17 | done 18 | -------------------------------------------------------------------------------- /ansible/roles/hcv-haproxy-configure/templates/haproxy_default.j2: -------------------------------------------------------------------------------- 1 | # Defaults file for HAProxy 2 | # 3 | # This is sourced by both, the initscript and the systemd unit file, so do not 4 | # treat it as a shell script fragment. 5 | 6 | # Change the config file location if needed 7 | #CONFIG="/etc/haproxy/haproxy.cfg" 8 | 9 | # Add extra flags here, see haproxy(1) for a few options 10 | #EXTRAOPTS="-de -m 16" 11 | EXTRAOPTS="-L {{ ansible_hostname.split('.')[0] }}" -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge-ddns/templates/cleanup_route53_dns.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | set -x 3 | #make sure we exit early if we fail any step 4 | set -e 5 | 6 | # clean up the Route53 DNS 7 | cd /usr/share/jitsi-ddns-lambda 8 | node index.js update_by_info --action remove --instance_name {{ jvb_ddns_hostname }} --zone_id {{ jvb_ddns_zone }} --ipv4_addr "{{ jvb_ddns_ipv4_addr }}" {% if ipv6_addr %} --ipv6_addr "{{ ipv6_addr }}"{% endif %} || true 9 | cd - 10 | -------------------------------------------------------------------------------- /ansible/roles/nodejs/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nodejs_architecture: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}" 3 | nodejs_from_apt: false 4 | # Pin-Priority of NodeSource repository 5 | nodejs_nodesource_pin_priority: 500 6 | nodejs_url: "https://{{ jitsi_repo_username }}:{{ jitsi_repo_password }}@{{ jitsi_repo_host }}/debian/misc/nodejs_12.22.9-deb-1nodesource1_{{ nodejs_architecture }}.deb" 7 | # 0.10 or 0.12 or 4.x 8 | nodejs_version: "12.*" 9 | -------------------------------------------------------------------------------- /ansible/roles/selenium-grid/templates/hub_4444.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "port": 4444, 3 | "newSessionWaitTimeout": -1, 4 | "servlets": [ 5 | "com.groupon.seleniumgridextras.grid.servlets.ProxyStatusJsonServlet" 6 | ], 7 | "capabilityMatcher": "org.openqa.grid.internal.utils.DefaultCapabilityMatcher", 8 | "throwOnCapabilityNotPresent": true, 9 | "nodePolling": 5000, 10 | "cleanUpCycle": 5000, 11 | "browserTimeout": 60, 12 | "timeout": 60 13 | } 14 | -------------------------------------------------------------------------------- /ansible/templates/torture_wrapper.j2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export TORTURE_EXCLUDE_TESTS="{{ jitsi_torture_exclude_tests }}" 3 | 4 | cd {{ jitsi_torture_path }} 5 | 6 | {% if torture_longtest_only == 'long' %} 7 | ./test-runner-long.sh {{ jitsi_torture_domain }} {{ torture_longtest_duration }} 8 | {% elif torture_longtest_only == 'all' %} 9 | ./test-runner-all.sh {{ jitsi_torture_domain }} 10 | {% else %} 11 | ./test-runner.sh {{ jitsi_torture_domain }} 12 | {% endif %} -------------------------------------------------------------------------------- /ansible/roles/jicofo/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: jicofo 4 | author: Aaron van Meerten 5 | description: install and configure jicofo 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - { role: jitsi-repo, when: jicofo_install_flag } 15 | - { role: openjdk-java, when: jicofo_install_flag } 16 | -------------------------------------------------------------------------------- /ansible/roles/jiconop/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: jiconop 4 | author: Aaron van Meerten 5 | description: install and configure jiconop 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - { role: nodejs, when: jiconop_install_flag } 15 | - { role: jitsi-repo, when: jiconop_install_flag } 16 | -------------------------------------------------------------------------------- /ansible/roles/jvb-rtcstats-push/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: jvb_rtcstats_push 4 | author: Aaron van Meerten 5 | description: install and configure rtc stats pusher for videobridge 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - { role: nodejs, when: jvb_rtcstats_push_install_flag } 15 | -------------------------------------------------------------------------------- /ansible/roles/jvb-ssl/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jvb_ssl_certificate: "{{ jitsi_net_ssl_certificate }}{{ jitsi_net_ssl_extras }}" 3 | jvb_ssl_dest_dir: /etc/nginx/ssl 4 | jvb_ssl_domain_name: "{{ jvb_ssl_server_prefix }}.{{ jvb_ssl_domain_suffix }}" 5 | jvb_ssl_domain_suffix: "jitsi.net" 6 | jvb_ssl_install_flag: true 7 | jvb_ssl_key_name: "{{ jitsi_net_ssl_key_name }}" 8 | jvb_ssl_server_prefix: "{{ ansible_hostname.split('.')[0] }}" 9 | jvb_ssl_websockets_port: 9090 10 | -------------------------------------------------------------------------------- /ansible/roles/chromedriver/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | chromedriver_latest_release_url: https://chromedriver.storage.googleapis.com/LATEST_RELEASE 3 | chromedriver_path: /usr/bin/chromedriver 4 | chromedriver_tmp_dir: /tmp/chromedriver_linux64 5 | chromedriver_tmp_path: /tmp/chromedriver_linux64.zip 6 | chromedriver_url: https://chromedriver.storage.googleapis.com/{{ chromedriver_version }}/chromedriver_linux64.zip 7 | chromedriver_use_latest: true 8 | chromedriver_version: 85.0.4183.87 9 | -------------------------------------------------------------------------------- /ansible/roles/iptables-jenkins/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Open TCP port 443 via iptables 3 | ansible.builtin.iptables: 4 | chain: INPUT 5 | ctstate: NEW 6 | protocol: tcp 7 | destination_port: "443" 8 | jump: ACCEPT 9 | action: insert 10 | comment: Added via ansible post-launch configuration script 11 | 12 | - name: Save newly added iptable rules # noqa no-changed-when 13 | ansible.builtin.shell: iptables-save > /etc/iptables/rules.v4 14 | -------------------------------------------------------------------------------- /ansible/roles/jenkins/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: jenkins 4 | author: Aaron van Meerten 5 | description: install and configure jenkins for jitsi 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - { role: nginx, when: jenkins_install_flag } 15 | - { role: docker, when: jenkins_install_flag } 16 | -------------------------------------------------------------------------------- /ansible/roles/jibri-pjsua/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #v4l2loopback dependency 3 | # Install v4l2 loopback directly from the internet, 4 | # as apt v4l2loopback-dkms results in errors on Oracle Ubuntu version https://github.com/umlaeute/v4l2loopback/issues/247, 5 | # such as this error at modprobe: ERROR: could not insert 'v4l2loopback': Bad address 6 | - name: Install v4l2loopback package from the internet 7 | apt: 8 | deb: "{{ jibri_pjsua_v4l2_download_url }}" 9 | 10 | -------------------------------------------------------------------------------- /ansible/roles/fluentd-jitsi/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: fluentd_jitsi 4 | author: Aaron van Meerten 5 | description: configure fluentd for jitsi 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - { role: rsyslog, when: fluentd_install_flag } 15 | - { role: fluentd, when: fluentd_install_flag } 16 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-tenant-pin/templates/tenant-pin.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=haproxy tenant pin updater service 3 | After=multi-user.target 4 | 5 | [Service] 6 | EnvironmentFile=/etc/{{ haproxy_tenant_pin_service_name }}.conf 7 | ExecStart=/usr/bin/python /usr/local/bin/haproxy_tenant_sync.py 8 | Type=simple 9 | User=root 10 | Group=root 11 | Restart=always 12 | SyslogIdentifier={{ haproxy_tenant_pin_service_name }} 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /ansible/roles/rsyslog/tasks/deb_packages.yml: -------------------------------------------------------------------------------- 1 | # Install packages for Debian 2 | --- 3 | - name: Add rsyslog apt repo 4 | ansible.builtin.apt_repository: 5 | repo: "ppa:adiscon/v8-stable" 6 | state: present 7 | update_cache: true 8 | when: ansible_distribution == "Ubuntu" 9 | 10 | - name: Install packages for Debian 11 | tags: 12 | - rsyslog 13 | - packages 14 | ansible.builtin.apt: 15 | pkg: "{{ rsyslog_package_names }}" 16 | state: present 17 | -------------------------------------------------------------------------------- /ansible/roles/autoscaler-sidecar/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: autoscaler_sidecar 4 | author: Aaron van Meerten 5 | description: install and configure jitsi autoscaler sidecar service 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - { role: nodejs, when: autoscaler_install_flag and jitsi_autoscaler_sidecar } 15 | -------------------------------------------------------------------------------- /ansible/roles/google-cloud/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # broke for missing cloud-sdk-focal, so hardcoded to bionic for now 3 | # google_cloud_apt_release_name: "cloud-sdk-{{ ansible_distribution_release }}" 4 | google_cloud_apt_release_name: "cloud-sdk" 5 | google_cloud_conf_dir: /etc/google-cloud 6 | google_cloud_configure_flag: true 7 | google_cloud_install_flag: true 8 | google_cloud_service_key_json: "{{ google_cloud_service_key_json_default }}" 9 | google_cloud_service_key_json_default: 10 | -------------------------------------------------------------------------------- /ansible/roles/jvb-colibri-proxy/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: jvb_colibri_proxy 4 | author: Aaron van Meerten 5 | description: install and configure nginx to proxy jvb colibri 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | dependencies: 14 | - { role: nginx, nginx_start_service: false, when: jvb_colibri_proxy_install_flag} 15 | -------------------------------------------------------------------------------- /ansible/roles/coturn/templates/coturn-mark-unhealthy-oracle.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | INSTANCE_METADATA=`curl -s http://169.254.169.254/opc/v1/instance/` 3 | INSTANCE_ID=$(echo $INSTANCE_METADATA | jq .id -r) 4 | OCI_BIN="/usr/local/bin/oci" 5 | 6 | #Metric 7 | {% if coturn_copy_dumps_to_s3 %} 8 | {{coturn_scripts_path}}/dump-coturn.sh copy_and_send 9 | $OCI_BIN compute instance terminate --debug --instance-id "$INSTANCE_ID" --preserve-boot-volume false --force --auth instance_principal 10 | {% endif %} -------------------------------------------------------------------------------- /ansible/roles/consul-jigasi/templates/jigasi.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "service": { 3 | "name": "jigasi", 4 | "tags":["{{ hcv_environment }}"], 5 | "meta": { 6 | "environment":"{{ hcv_environment }}" 7 | }, 8 | "port": 80, 9 | "checks": [ 10 | { 11 | "name": "Jigasi REST Health", 12 | "http": "http://localhost:8788/about/health", 13 | "method": "GET", 14 | "interval": "10s", 15 | "timeout": "1s" 16 | } 17 | ] 18 | } 19 | } -------------------------------------------------------------------------------- /ansible/roles/hcv-haproxy-status-lock/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Lock haproxy to freeze proxymonitor scan 3 | ansible.builtin.file: 4 | mode: 0644 5 | path: '{{ hcv_haproxy_status_lock_file }}' 6 | state: touch 7 | when: hcv_haproxy_status_lock_action == 'lock' 8 | 9 | - name: Unlock haproxy to allow proxymonitor scan 10 | ansible.builtin.file: 11 | path: '{{ hcv_haproxy_status_lock_file }}' 12 | state: absent 13 | when: hcv_haproxy_status_lock_action == 'unlock' 14 | -------------------------------------------------------------------------------- /ansible/roles/rsyslog/templates/rsyslog-programrouting.conf.j2: -------------------------------------------------------------------------------- 1 | ## {{ ansible_managed }} 2 | 3 | $PrivDropToGroup adm 4 | 5 | {% for program in rsyslog_known_programs %} 6 | template(name="{{ program }}LocalLog" type="string" string="{{ rsyslog_local_log_directory }}%programname%.log") 7 | if $programname startswith '{{ program }}' then { 8 | action(type="omfile" DynaFile="{{ program }}LocalLog" file="{{ rsyslog_local_log_directory }}{{ program }}.log") 9 | stop 10 | } 11 | 12 | {% endfor %} -------------------------------------------------------------------------------- /ansible/roles/selenium-grid/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: openjdk-java, when: selenium_grid_install_flag } 4 | # not used if extras is used 5 | - { role: chromedriver, when: selenium_grid_install_flag } 6 | - { role: google-chrome, google_chrome_beta_flag: true } 7 | - { role: firefox, firefox_beta_flag: true, when: selenium_grid_install_flag } 8 | # not used if extras is used 9 | - { role: geckodriver, when: selenium_grid_install_flag } 10 | - { role: jitsi-torture-checkout } 11 | -------------------------------------------------------------------------------- /ansible/roles/sshusers/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | security_additional_groups: [] 3 | security_users: "{{ ssh_users_security }}" 4 | # How to manage users 5 | # ssh_users: 6 | # - username: vfedorov 7 | # groups: ["sshsudousers"] 8 | # real_name: Vadym Fedorov 9 | # state: absent 10 | # ssh_key: 11 | # - "ssh-rsa AAAA....Q== vfedorov@spider" 12 | ssh_users: "{{ ssh_users_jitsi }}" 13 | ssh_users_config_flag: true 14 | ssh_users_accounts_flag: true 15 | ssh_users_system_flag: true 16 | -------------------------------------------------------------------------------- /ansible/roles/selenium-grid/templates/selenium-grid-extras-node.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Selenium Grid 3 | After=selenium-xvfb.service 4 | Requires=selenium-xvfb.service 5 | 6 | [Service] 7 | User={{ selenium_grid_username }} 8 | Group={{ selenium_grid_groupname }} 9 | Environment=DISPLAY=:99 10 | ExecStart={{ selenium_grid_java_path }} -jar {{ selenium_grid_extras_file }} 11 | Restart=on-failure 12 | RestartPreventExitStatus=255 13 | Type=simple 14 | WorkingDirectory={{ selenium_grid_extras_path }} 15 | -------------------------------------------------------------------------------- /ansible/roles/signal-sidecar/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: signal_sidecar 4 | author: Aaron van Meerten 5 | description: install and configure signal sidecar 6 | license: Apache License Version 2.0 7 | min_ansible_version: '6.6.0' 8 | platforms: 9 | - name: Ubuntu 10 | releases: 11 | - focal 12 | - jammy 13 | 14 | dependencies: 15 | - { role: nodejs, when: signal_sidecar_install_flag } 16 | - { role: jitsi-repo, when: signal_sidecar_install_flag } 17 | -------------------------------------------------------------------------------- /ansible/roles/wavefront/templates/10-wavefront.conf.j2: -------------------------------------------------------------------------------- 1 | # # Configuration for Wavefront proxy to send metrics to 2 | [[outputs.wavefront]] 3 | {% if wavefront_tcp_mode %} 4 | host = "{{ wavefront_proxy_address }}" 5 | port = {{ wavefront_proxy_port }} 6 | {% else %} 7 | url = "http://{{ wavefront_proxy_address }}:{{ wavefront_proxy_json_port }}" 8 | {% endif %} 9 | metric_separator = "." 10 | source_override = ["hostname", "snmp_host", "node_host"] 11 | convert_paths = true 12 | use_regex = false 13 | -------------------------------------------------------------------------------- /ansible/roles/testrtc/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include install tasks 3 | ansible.builtin.include_tasks: install_testrtc.yml 4 | when: testrtc_install_flag 5 | 6 | - name: Check that testrtc folder exists 7 | ansible.builtin.stat: 8 | path: "{{ testrtc_base_path }}" 9 | register: testrtc_path 10 | 11 | - name: Include configure tasks 12 | ansible.builtin.include_tasks: configure_testrtc.yml 13 | when: testrtc_configure_flag and testrtc_path.stat.isdir is defined and testrtc_path.stat.isdir 14 | -------------------------------------------------------------------------------- /ansible/clear-cloud-cache.yml: -------------------------------------------------------------------------------- 1 | - name: Main 2 | hosts: all 3 | gather_facts: false 4 | strategy: free 5 | become_user: root 6 | become: true 7 | 8 | tasks: 9 | - name: Find cache files 10 | ansible.builtin.find: 11 | paths: /tmp 12 | patterns: "*_cache-*" 13 | register: find_results 14 | 15 | - name: Delete cache files 16 | ansible.builtin.file: 17 | path: "{{ item['path'] }}" 18 | state: absent 19 | with_items: "{{ find_results['files'] }}" 20 | -------------------------------------------------------------------------------- /ansible/roles/consul-server/templates/consul.hcl.j2: -------------------------------------------------------------------------------- 1 | datacenter = "{{ consul_datacenter }}" 2 | data_dir = "/opt/consul" 3 | encrypt = "{{ consul_encryption_key }}" 4 | retry_join = {{ consul_retry_join_lan }} 5 | retry_join_wan = {{ consul_retry_join_wan }} 6 | server = true 7 | bootstrap_expect = 3 8 | addresses { 9 | http = "0.0.0.0" 10 | } 11 | connect { 12 | enabled = true 13 | } 14 | telemetry { 15 | disable_hostname = true 16 | dogstatsd_addr = "localhost:8125" 17 | prometheus_retention_time = "5m" 18 | } -------------------------------------------------------------------------------- /ansible/roles/prosody/files/prosody-log-filter.systemd: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Prosody Log Filter 3 | After=network.target 4 | Before=prosody.service 5 | 6 | [Service] 7 | User=root 8 | Group=prosody 9 | WorkingDirectory=/var/log/prosody 10 | ExecStart=/usr/local/bin/prosody-log-filter.sh 11 | Restart=always 12 | KillSignal=SIGQUIT 13 | Type=simple 14 | StandardOutput=syslog 15 | StandardError=syslog 16 | NotifyAccess=all 17 | SyslogIdentifier=prosody-log-filter 18 | 19 | [Install] 20 | WantedBy=multi-user.target prosody.service -------------------------------------------------------------------------------- /ansible/roles/consul-install/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | consul_apt_key: https://apt.releases.hashicorp.com/gpg 3 | consul_apt_repo: https://apt.releases.hashicorp.com 4 | consul_architecture: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}" 5 | consul_download_url: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_{{ consul_architecture }}.zip" 6 | consul_log_dir: /var/log/ 7 | consul_version: 1.7.2 8 | consul_zip_hash: 5ab689cad175c08a226a5c41d16392bc7dd30ceaaf90788411542a756773e698 9 | -------------------------------------------------------------------------------- /ansible/roles/jibri-java/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart jibri 3 | ansible.builtin.service: 4 | name: jibri 5 | state: restarted 6 | when: not jibri_configure_only_flag 7 | 8 | - name: Restart jibri systemd 9 | ansible.builtin.systemd: 10 | name: jibri 11 | state: restarted 12 | daemon_reload: true 13 | when: not jibri_configure_only_flag 14 | 15 | - name: Restart Monitor Terminating Instance 16 | ansible.builtin.service: 17 | name: monitor-terminating-instance 18 | state: restarted 19 | -------------------------------------------------------------------------------- /ansible/roles/autoscaler-sidecar/files/terminate_instance_aws.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CURL_BIN="/usr/bin/curl" 4 | AWS_BIN="/usr/local/bin/aws" 5 | JQ_BIN="/usr/bin/jq" 6 | 7 | if [ -z "$INSTANCE_ID" ]; then 8 | INSTANCE_ID=$(/usr/bin/ec2metadata --instance-id) 9 | fi 10 | 11 | EC2_REGION=$($CURL_BIN -s http://169.254.169.254/latest/dynamic/instance-identity/document | $JQ_BIN .region -r) 12 | export AWS_DEFAULT_REGION=$EC2_REGION 13 | 14 | # terminate our instance 15 | $AWS_BIN ec2 terminate-instances --instance-ids "$INSTANCE_ID" 16 | -------------------------------------------------------------------------------- /ansible/roles/fluentd-jitsi/templates/bootstrap.conf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | 4 | type tail 5 | path /var/log/bootstrap.log 6 | pos_file /var/spool/td-agent/bootstrap.pos 7 | tag bootstrap 8 | 9 | format multiline 10 | format_firstline /^(?[\w]+\s?[\w]+) (?\[.*:.*\])\s?\**\s/ 11 | format1 /^(?[\w]+\s?[\w]+) (?\[[\w\-:].*\])\s?\**\s?(?[^ ]+) (?[^ ]+) => (?[\{|\(].*[\}|\)]+)$/ 12 | 13 | # Date and time format 14 | time_format %d/%B/%Y:%H:%M:%S 15 | 16 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart jitsi-videobridge 2 | service: name={{ jvb_service_name }} state=restarted 3 | when: ((jvb_image_build_flag == false) and (jvb_configure_from_template_flag == true) and (jvb_reconfigure_on_changes_flag == true)) 4 | 5 | - name: restart jitsi-videobridge systemd 6 | systemd: name={{ jvb_systemd_service_name }} state=restarted daemon_reload=yes 7 | when: ((jvb_image_build_flag == false) and (jvb_configure_from_template_flag == true) and (jvb_reconfigure_on_changes_flag == true)) -------------------------------------------------------------------------------- /ansible/roles/haproxy-jigasi/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload haproxy 3 | ansible.builtin.service: 4 | name: haproxy 5 | state: reloaded 6 | 7 | - name: Reload haproxy systemd 8 | ansible.builtin.systemd: 9 | name: haproxy 10 | state: reloaded 11 | daemon_reload: true 12 | when: ansible_service_mgr == "systemd" 13 | 14 | - name: Restart haproxy systemd 15 | ansible.builtin.systemd: 16 | name: haproxy 17 | state: restarted 18 | daemon_reload: true 19 | when: ansible_service_mgr == "systemd" 20 | -------------------------------------------------------------------------------- /ansible/roles/consul-server/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install consul config file 3 | ansible.builtin.template: 4 | src: "consul.hcl.j2" 5 | dest: "/etc/consul.d/consul.hcl" 6 | mode: 0640 7 | 8 | - name: Install consul env file 9 | ansible.builtin.copy: 10 | content: "" 11 | dest: "/etc/consul.d/consul.env" 12 | mode: 0644 13 | 14 | - name: Install consul config script 15 | ansible.builtin.copy: 16 | src: "consul-server-config.sh" 17 | dest: "/usr/local/bin/consul-server-config.sh" 18 | mode: 0755 19 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-videobridge-auth/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jvb_auth_domain: "auth.{{ prosody_domain_name }}" 3 | jvb_auth_domain_path: "/var/lib/prosody/{{ jvb_auth_domain|regex_replace('\\.','%2e')|regex_replace('-','%2d') }}/accounts/{{ jvb_auth_user }}.dat" 4 | jvb_auth_password: "{{ jvb_xmpp_password | default('replaceme') }}" 5 | # prosody-jvb 6 | jvb_auth_prosody_jvb_domain_path: "/var/lib/prosody-jvb/{{ prosody_jvb_auth_domain_name|regex_replace('\\.','%2e')|regex_replace('-','%2d') }}/accounts/{{ jvb_auth_user }}.dat" 7 | jvb_auth_user: "jvb" 8 | -------------------------------------------------------------------------------- /ansible/roles/common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | common_cloud_provider: "{{ cloud_provider | default('aws') }}" 3 | common_install_pip3_flag: "{{ true 4 | if (ansible_distribution_release == 'jammy') or (ansible_distribution_release == 'focal') 5 | or (common_cloud_provider == 'oracle') else false }}" 6 | common_install_pip_flag: "{{ 7 | false if (ansible_distribution_release == 'jammy') or (ansible_distribution_release == 'focal') 8 | or (common_cloud_provider == 'oracle') else true }}" 9 | gai_ipv6_resolve_disabled: true 10 | locale: en_US.UTF-8 11 | -------------------------------------------------------------------------------- /ansible/roles/jitsi-torture-checkout/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jitsi_torture_checkout_git_branch: 'master' 3 | jitsi_torture_checkout_git_repo: https://github.com/jitsi/jitsi-meet-torture.git 4 | jitsi_torture_checkout_path: /usr/share/jitsi-meet-torture 5 | jitsi_torture_cloud_provider: "{{ cloud_provider | default('aws') }}" 6 | jitsi_torture_oracle_bucket_name: "jvb-images-{{ jitsi_torture_oracle_environment }}" 7 | jitsi_torture_oracle_environment: "{{ hcv_environment | default('all') }}" 8 | jitsi_torture_oracle_region: "{{ oracle_region | default('') }}" 9 | -------------------------------------------------------------------------------- /ansible/roles/google-cloud/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Google Cloud repo signing key 3 | ansible.builtin.apt_key: 4 | url: https://packages.cloud.google.com/apt/doc/apt-key.gpg 5 | state: present 6 | 7 | - name: Google Cloud repo 8 | ansible.builtin.apt_repository: 9 | repo: "deb http://packages.cloud.google.com/apt {{ google_cloud_apt_release_name }} main" 10 | state: present 11 | update_cache: true 12 | 13 | - name: Install google cloud SDK 14 | ansible.builtin.apt: 15 | name: google-cloud-sdk 16 | state: present 17 | -------------------------------------------------------------------------------- /ansible/roles/coturn/templates/coturn_set_alarms_systemd.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Check instance termination state 3 | Before=shutdown.target reboot.target halt.target 4 | Requires=network-online.target network.target 5 | 6 | [Service] 7 | ExecStart={{coturn_scripts_path}}/coturn-set-alarms.sh ok 8 | ExecStart={{coturn_scripts_path}}/coturn-set-alarms.sh ok CoturnOpenedFilesLimitFailed 9 | ExecStop={{coturn_scripts_path}}/coturn-set-alarms.sh alarm 10 | Type=oneshot 11 | RemainAfterExit=yes 12 | 13 | [Install] 14 | WantedBy=reboot.target shutdown.target halt.target -------------------------------------------------------------------------------- /ansible/roles/jigasi-auth/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add jigasi XMPP control authentication 3 | ansible.builtin.command: prosodyctl register {{ jigasi_auth_user }} {{ jigasi_auth_domain }} {{ jigasi_auth_password }} 4 | args: 5 | creates: "{{ jigasi_auth_domain_path }}" 6 | 7 | - name: Add jigasi transcriber authentication 8 | ansible.builtin.command: prosodyctl register {{ jigasi_transcriber_auth_user }} {{ jigasi_transcriber_auth_domain }} {{ jigasi_transcriber_auth_password }} 9 | args: 10 | creates: "{{ jigasi_transcriber_auth_domain_path }}" 11 | -------------------------------------------------------------------------------- /ansible/roles/tcpdump-jigasi/files/tcpdump-jigasi.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=tcpdump for jigasi 3 | After=network.target 4 | 5 | [Service] 6 | User=root 7 | Group=root 8 | WorkingDirectory=/var/lib/tcpdump-jigasi 9 | ExecStart=/usr/bin/tcpdump -ni ens3 -G 1800 -w /var/lib/tcpdump-jigasi/trace-%%Y-%%m-%%d_%%H.%%M.%%S.pcap port 5061 10 | Restart=on-failure 11 | KillSignal=SIGQUIT 12 | Type=simple 13 | StandardOutput=syslog 14 | StandardError=syslog 15 | NotifyAccess=all 16 | SyslogIdentifier=tcpdump-jigasi 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | 21 | -------------------------------------------------------------------------------- /ansible/roles/unattended-upgrades/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install unattended-upgrades 3 | apt: name=unattended-upgrades state=present 4 | 5 | - name: create unattended-upgrades configuration (20auto-upgrades) 6 | template: > 7 | src=20auto-upgrades.j2 dest=/etc/apt/apt.conf.d/20auto-upgrades 8 | owner=root group=root mode=0644 9 | 10 | - name: create unattended-upgrades configuration (20auto-upgrades) 11 | template: > 12 | src=50unattended-upgrades.j2 dest=/etc/apt/apt.conf.d/50unattended-upgrades 13 | owner=root group=root mode=0644 14 | -------------------------------------------------------------------------------- /ansible/roles/autoscaler-sidecar/files/terminate_instance_oracle.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export OCI_BIN="/usr/local/bin/oci" 4 | export CURL_BIN="/usr/bin/curl" 5 | export JQ_BIN="/usr/bin/jq" 6 | 7 | if [ -z "$INSTANCE_ID" ]; then 8 | INSTANCE_ID=$($CURL_BIN -s http://169.254.169.254/opc/v1/instance/ | $JQ_BIN .id -r) 9 | fi 10 | 11 | # terminate our instance; we enable debug to have more details in case of oci cli failures 12 | $OCI_BIN compute instance terminate --debug --instance-id "$INSTANCE_ID" --preserve-boot-volume false --auth instance_principal --force 13 | -------------------------------------------------------------------------------- /ansible/roles/consul-install/files/consul.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description="HashiCorp Consul - A service mesh solution" 3 | Documentation=https://www.consul.io/ 4 | Requires=network-online.target 5 | After=network-online.target 6 | ConditionFileNotEmpty=/etc/consul.d/consul.hcl 7 | 8 | [Service] 9 | Type=notify 10 | User=consul 11 | Group=consul 12 | ExecStart=/usr/local/bin/consul agent -config-dir=/etc/consul.d/ 13 | ExecReload=/usr/local/bin/consul reload 14 | KillMode=process 15 | Restart=on-failure 16 | LimitNOFILE=65536 17 | 18 | [Install] 19 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /ansible/roles/fail2ban/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install fail2ban 3 | ansible.builtin.apt: 4 | name: fail2ban 5 | install_recommends: true 6 | state: present 7 | notify: Restart fail2ban 8 | 9 | - name: Make fail2ban sshd aggressive 10 | ansible.builtin.lineinfile: 11 | path: /etc/fail2ban/jail.conf 12 | regexp: '^#mode = normal' 13 | line: 'mode = aggressive' 14 | notify: Restart fail2ban 15 | 16 | - name: Start fail2ban at boot 17 | ansible.builtin.service: 18 | name: fail2ban 19 | state: started 20 | enabled: true 21 | -------------------------------------------------------------------------------- /ansible/roles/nginx/templates/47-nginx.conf.j2: -------------------------------------------------------------------------------- 1 | $ModLoad imfile 2 | $PrivDropToGroup adm 3 | 4 | # Nginx access file: 5 | $InputFileName /var/log/nginx/access.log 6 | $InputFileTag nginx-access: 7 | $InputFileStateFile stat-nginx-access 8 | $InputFileSeverity info 9 | $InputFilePersistStateInterval 20000 10 | $InputRunFileMonitor 11 | 12 | #Nginx Error file: 13 | $InputFileName /var/log/nginx/error.log 14 | $InputFileTag nginx-error: 15 | $InputFileStateFile stat-nginx-error 16 | $InputFileSeverity error 17 | $InputFilePersistStateInterval 20000 18 | $InputRunFileMonitor 19 | -------------------------------------------------------------------------------- /ansible/roles/prosody/files/mod_websocket_auth_token.patch: -------------------------------------------------------------------------------- 1 | --- mod_websocket.lua Thu Jan 23 21:59:13 2020 +0000 2 | +++ mod_websocket.lua Fri Jan 24 16:21:30 2020 +0000 3 | @@ -305,6 +305,8 @@ 4 | response.headers.sec_webSocket_accept = base64(sha1(request.headers.sec_websocket_key .. "258EAFA5-E914-47DA-95CA-C5AB0DC85B11")); 5 | response.headers.sec_webSocket_protocol = "xmpp"; 6 | 7 | + module:fire_event("websocket-session", { session = session, request = request }); 8 | + 9 | session.log("debug", "Sending WebSocket handshake"); 10 | 11 | return ""; 12 | 13 | -------------------------------------------------------------------------------- /ansible/roles/jibri-java/files/jibri.rsyslogd.conf: -------------------------------------------------------------------------------- 1 | if $programname == 'launch_jibri.sh' then { 2 | /var/log/local/jibri.log 3 | ~ 4 | } 5 | 6 | if $programname == 'monitor-terminating-instance.sh' then { 7 | /var/log/local/jibri-monitor.log 8 | ~ 9 | } 10 | 11 | if $programname == 'monitor-reconfigure-instance.sh' then { 12 | /var/log/local/jibri-monitor.log 13 | ~ 14 | } 15 | 16 | if $programname == 'icewm-session' then { 17 | /var/log/local/jibri-extras.log 18 | ~ 19 | } 20 | 21 | if $programname == 'Xorg' then { 22 | /var/log/local/jibri-extras.log 23 | ~ 24 | } -------------------------------------------------------------------------------- /ansible/roles/consul-standalone/templates/standalone.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "service": { 3 | "name": "all", 4 | "tags":["{{ hcv_environment }}"], 5 | "meta": { 6 | "environment":"{{ hcv_environment }}", 7 | "domain":"{{ prosody_domain_name }}" 8 | }, 9 | "tagged_addresses": { 10 | "lan": { 11 | "address": "{{ consul_standalone_private_ip }}", 12 | "port": 5280 13 | }, 14 | "wan": { 15 | "address": "{{ consul_standalone_public_ip }}", 16 | "port": 5280 17 | } 18 | }, 19 | "port": 5280 20 | } 21 | } -------------------------------------------------------------------------------- /ansible/roles/fluentd-jitsi/files/jigasi.conf: -------------------------------------------------------------------------------- 1 | # Jicofo 2 | 3 | @type tail 4 | path /var/log/jitsi/jigasi.log 5 | pos_file /var/spool/td-agent/jigasi.pos 6 | 7 | #java with possible multi-line 8 | format multiline 9 | format_firstline /\d{4}-\d{1,2}-\d{1,2}/ 10 | format1 /^(?