├── .editorconfig
├── .github
├── dependabot.yml
└── workflows
│ ├── cleanup.yml
│ ├── molecule-loadbalancer.yml
│ ├── molecule-mongo.yml
│ └── syntax.yml
├── .gitignore
├── LICENSE
├── README.md
├── config_base_playbook.yml
├── config_docker_playbook.yml
├── deploy_containers_playbook.yml
├── deploy_loadbalancers_playbook.yml
├── deploy_mariadb_playbook.yml
├── deploy_mongo_playbook.yml
├── environments
├── template
│ ├── files
│ │ ├── certs
│ │ │ ├── .gitkeep
│ │ │ ├── invite
│ │ │ │ └── public_key.pem
│ │ │ ├── myconext
│ │ │ │ └── myconext_saml.crt
│ │ │ └── oidcng
│ │ │ │ └── SURFconext-metadata-signer.pem
│ │ └── eb
│ │ │ └── languages
│ │ │ ├── overrides.en.php
│ │ │ └── overrides.nl.php
│ ├── group_vars
│ │ └── template.yml
│ ├── host_vars
│ │ └── template.yml
│ ├── inventory
│ └── secrets
│ │ └── skeleton.yml
└── vm
│ ├── files
│ ├── certs
│ │ ├── engineblock.crt
│ │ ├── invite
│ │ │ └── public_key.pem
│ │ ├── myconext
│ │ │ └── myconext_saml.crt
│ │ ├── oidc
│ │ │ └── oidcsaml.crt
│ │ ├── oidcng
│ │ │ └── SURFconext-metadata-signer.pem
│ │ └── shib-sp.crt
│ └── eb
│ │ └── languages
│ │ ├── overrides.en.php
│ │ └── overrides.nl.php
│ ├── group_vars
│ ├── dev.yml
│ └── vm.yml
│ ├── host_vars
│ └── 192.168.66.99.yml
│ ├── inventory
│ └── secrets
│ └── vm.yml
├── filter_plugins
├── depem.py
└── merge_usergroups.py
├── group_vars
├── all.yml
├── java-apps-common.yml
├── local-certs.yml
└── minimal.yml
├── library
└── maven_artifact.py
├── molecule
├── Dockerfile-Rocky8.j2
├── Dockerfile-Rocky9.j2
├── galera
│ ├── converge.yml
│ ├── molecule.yml
│ ├── prepare.yml
│ └── tests
│ │ └── test_default.py
├── loadbalancer
│ ├── converge.yml
│ ├── molecule.yml
│ ├── prepare.yml
│ └── tests
│ │ └── test_default.py
└── mongo
│ ├── converge.yml
│ ├── molecule.yml
│ └── tests
│ └── test_default.py
├── playbook_haproxy.yml
├── prep-env
├── provision
├── provision.yml
├── roles
├── apachefpm
│ ├── files
│ │ └── stepuplogging.conf
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── apachevhost.j2
│ │ └── phpfpm.j2
├── attribute-aggregation
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── apachelink.conf.j2
│ │ ├── attributeAuthorities.yml.j2
│ │ ├── logback.xml.j2
│ │ ├── serverapplication.yml.j2
│ │ └── serviceProviderConfig.json.j2
│ └── vars
│ │ └── main.yml
├── bind
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── create_keys.yml
│ │ └── main.yml
│ └── templates
│ │ ├── ha_zone_file.j2
│ │ ├── named.conf.local.j2
│ │ └── named.conf.options.j2
├── common
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── journald.conf
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── header_checks.j2
│ │ └── main.cf.j2
├── dashboard
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── dashboard.conf.j2
│ │ ├── logback.xml.j2
│ │ └── serverapplication.yml.j2
├── diyidp
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── diyidp.sql
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── 000-default.conf.j2
│ │ ├── authsources.php.j2
│ │ ├── config-override.php.j2
│ │ ├── saml20-idp-hosted.php.j2
│ │ ├── saml20-sp-remote.php.j2
│ │ └── showusers.php.j2
├── docker
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── iptablesdocker.service
│ │ └── router.yaml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── main.yml
│ │ ├── setup-debian.yml
│ │ └── setup-rocky.yml
│ └── templates
│ │ ├── daemon.json.j2
│ │ ├── ip4tables.sh.j2
│ │ └── traefik.yaml.j2
├── elk
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── elasticsearch.repo
│ │ └── logstash
│ │ │ ├── core
│ │ │ ├── 11-core-haproxy.conf
│ │ │ ├── 12-core-eblog.conf
│ │ │ ├── 14-core-pdp.conf
│ │ │ ├── 15-core-pdpstats.conf
│ │ │ ├── 16-core-myconext.conf
│ │ │ ├── 17-core-oidcng.conf
│ │ │ ├── 31-core-elastic-search-output.conf
│ │ │ └── 32-core-elastic-search-output.conf
│ │ │ ├── mapping_core.json
│ │ │ ├── patterns
│ │ │ ├── ebrequest
│ │ │ └── haproxy
│ │ │ └── sa
│ │ │ ├── 20-sa-mysqld.conf
│ │ │ ├── 20-sa-nginx.conf
│ │ │ ├── 20-sa-sendmail.conf
│ │ │ ├── 20-sa-stepup-authentication.conf
│ │ │ ├── 20-sa-symfony.conf
│ │ │ └── 30-sa-elastic-search-output.conf
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── elastic.yml
│ │ ├── kibana.yml
│ │ ├── logstash.yml
│ │ ├── logstash_elk.yml
│ │ ├── logstash_stats.yml
│ │ └── main.yml
│ └── templates
│ │ ├── elasticsearch.yml.j2
│ │ ├── kibana.conf.j2
│ │ ├── kibana.yml.j2
│ │ ├── logstash.yml.j2
│ │ ├── logstash
│ │ ├── core
│ │ │ ├── 02-filebeat-input.conf.j2
│ │ │ └── 13-core-ebauth.conf.j2
│ │ ├── sa
│ │ │ └── 02-sa-filebeat-input.conf.j2
│ │ └── stats
│ │ │ └── 33-core-influxdb-output.conf
│ │ └── pipelines.yml.j2
├── engineblock
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ └── parameters.yml.j2
│ └── vars
│ │ └── main.yml
├── filebeat
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── elasticsearch.repo
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── filebeat.yml.j2
├── galera
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── RPM-GPG-KEY-MariaDB
│ │ ├── mysql_reboot_cluster.sh
│ │ └── plugins.cnf
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── arbiter_node.yml
│ │ ├── cluster_nodes.yml
│ │ └── main.yml
│ └── templates
│ │ ├── garb.j2
│ │ ├── mariadb.repo.j2
│ │ ├── mariadb.repo.rocky8.j2
│ │ ├── mariadb.repo.rocky9.j2
│ │ ├── mariadb_backup.sh.j2
│ │ ├── my.cnf.j2
│ │ ├── mysql-clients.cnf.j2
│ │ ├── server.cnf.j2
│ │ └── timeoutstartsec.conf.j2
├── galera_create_users
│ └── tasks
│ │ └── main.yml
├── haproxy
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── lbops
│ │ ├── nosamesitebrowsers.lst
│ │ └── sysconfig_haproxy
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── acme.yml
│ │ ├── get_acme_certs.yml
│ │ └── main.yml
│ └── templates
│ │ ├── allowedips.acl.j2
│ │ ├── backends.map.j2
│ │ ├── backendsstaging.map.j2
│ │ ├── blockedips.acl.j2
│ │ ├── certlist.lst.j2
│ │ ├── haproxy_backend.cfg.j2
│ │ ├── haproxy_frontend.cfg.j2
│ │ ├── haproxy_global.cfg.j2
│ │ ├── haproxy_stick_table_backend.cfg.j2
│ │ ├── internalips.acl.j2
│ │ ├── ratelimits.map.j2
│ │ ├── redirects.map.j2
│ │ ├── stagingips.acl.j2
│ │ ├── update_ocsp.j2
│ │ ├── validvhostsrestricted.acl.j2
│ │ └── validvhostsunrestricted.acl.j2
├── haproxy_acls
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── allowedips.acl.j2
│ │ ├── blockedips.acl.j2
│ │ └── internalips.acl.j2
├── haproxy_mgnt
│ └── tasks
│ │ └── main.yml
├── hosts
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── influxdb
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── influx.repo
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── influxdb.conf.j2
├── invite
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── mockapplication.yml.j2
│ │ └── serverapplication.yml.j2
│ └── vars
│ │ └── main.yml
├── iptables
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── ip6tables.j2
│ │ └── iptables.j2
│ └── vars
│ │ └── Debian.yml
├── keepalived
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── clustercheck
│ │ ├── keepalived_check_maintenance
│ │ └── keepalived_notify
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── keepalived_dbcluster.conf.j2
│ │ └── keepalived_loadbalancer.conf.j2
├── lifecycle
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── env
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ └── parameters.yml.j2
│ └── vars
│ │ └── main.yml
├── manage
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── __cacert_entrypoint.sh
│ │ ├── metadata_templates
│ │ │ ├── oauth20_rs.template.json
│ │ │ ├── oidc10_rp.template.json
│ │ │ ├── policy.template.json
│ │ │ ├── provisioning.template.json
│ │ │ ├── saml20_idp.template.json
│ │ │ ├── saml20_sp.template.json
│ │ │ ├── single_tenant_template.template.json
│ │ │ └── sram.template.json
│ │ └── policies
│ │ │ ├── allowed_attributes.json
│ │ │ └── extra_saml_attributes.json
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── application.yml.j2
│ │ ├── logback.xml.j2
│ │ ├── manage-api-users.yml.j2
│ │ └── metadata_configuration
│ │ ├── oauth20_rs.schema.json.j2
│ │ ├── oidc10_rp.schema.json.j2
│ │ ├── policy.schema.json.j2
│ │ ├── provisioning.schema.json.j2
│ │ ├── saml20_idp.schema.json.j2
│ │ ├── saml20_sp.schema.json.j2
│ │ ├── single_tenant_template.schema.json.j2
│ │ └── sram.schema.json.j2
├── manage_provision_entities
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── client_credentials_client.j2
│ │ ├── oauth20_rs.j2
│ │ ├── oidc10_rp.j2
│ │ ├── saml20_idp.j2
│ │ └── saml20_sp.j2
├── mariadbdocker
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── settings.cnf
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── mariadb_backup.sh.j2
├── maven_artifact_requirements
│ └── tasks
│ │ └── main.yml
├── metadata
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── alive.txt
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── metadata.conf.j2
├── mongo
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── mongo.repo
│ │ ├── mongo_kernel_settings.sh
│ │ └── mongodb.logrotate
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── ca.yml
│ │ ├── certs.yml
│ │ ├── cluster.yml
│ │ ├── install.yml
│ │ ├── main.yml
│ │ ├── postinstall.yml
│ │ └── users.yml
│ └── templates
│ │ ├── backup_mongo.pl.j2
│ │ ├── mongod.conf.j2
│ │ └── mongoshrc.js.j2
├── mongodbdocker
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── backup_mongo.pl.j2
├── monitoring-tests
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── application.yml.j2
│ │ └── logback.xml.j2
│ └── vars
│ │ └── main.yml
├── mujina-idp
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── application.yml.j2
│ │ ├── logback.xml.j2
│ │ └── mujina_idp.conf.j2
│ └── vars
│ │ └── main.yml
├── mujina-sp
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── application.yml.j2
│ │ ├── logback.xml.j2
│ │ └── mujina_sp.conf.j2
│ └── vars
│ │ └── main.yml
├── myconext
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── __cacert_entrypoint.sh
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── application.yml.j2
│ │ ├── logback.xml.j2
│ │ └── tiqr.configuration.yml.j2
├── mysql
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── mariadb.repo.j2
│ │ ├── mariadb_backup.sh.j2
│ │ └── my.cnf.j2
├── oidc-playground
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── logback.xml.j2
│ │ └── serverapplication.yml.j2
├── oidcng
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── __cacert_entrypoint.sh
│ │ └── oidc_saml_mapping.json
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── application.yml.j2
│ │ ├── logback.xml.j2
│ │ ├── oidcng.conf.j2
│ │ ├── openid-configuration.json.j2
│ │ └── secret_keyset.json.j2
│ └── vars
│ │ └── main.yml
├── openconext-common
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── pdp
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── logback.xml.j2
│ │ ├── serverapplication.properties.j2
│ │ └── xacml.conext.properties.j2
│ └── vars
│ │ └── main.yml
├── profile
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── global_view_parameters.yml.j2
│ │ └── parameters.yml.j2
│ └── vars
│ │ └── main.yml
├── remove-java-app
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── rsyslog
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── lastseen.sql
│ │ └── log_logins.sql
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── main.yml
│ │ ├── process_auth_logs.yml
│ │ └── rsyslog_central.yml
│ └── templates
│ │ ├── centralsyslog.j2
│ │ ├── clean_loglogins.j2
│ │ ├── listener.conf.j2
│ │ ├── logrotate_ebauth.j2
│ │ ├── parse_ebauth_to_mysql.py.j2
│ │ ├── rsyslog.conf.j2
│ │ ├── rsyslog_onlyforward.conf.j2
│ │ ├── sc_ruleset.conf.j2
│ │ └── sc_template.conf.j2
├── selfsigned_certs
│ └── tasks
│ │ └── main.yml
├── spdashboard
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── env.j2
├── static
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── media
│ │ │ ├── alive.txt
│ │ │ ├── conext_logo.png
│ │ │ └── feide_logo.jpg
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── static.conf.j2
├── stats
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── config.yml.j2
│ │ └── stats.conf.j2
├── stats_backfill_cq
│ └── tasks
│ │ └── main.yml
├── stepupapp
│ └── tasks
│ │ ├── copygsspidpcerts.yml
│ │ ├── copygsspspcerts.yml
│ │ ├── copyimages.yml
│ │ ├── copysfimages.yml
│ │ ├── copyspcerts.yml
│ │ ├── main.yml
│ │ └── postinstall.yml
├── stepupazuremfa
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── env.j2
│ │ └── parameters.yaml.j2
│ └── vars
│ │ ├── docker.yml
│ │ └── main.yml
├── stepupgateway
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── env.j2
│ │ ├── global_view_parameters.yml.j2
│ │ ├── logout.php.j2
│ │ ├── parameters.yml.j2
│ │ ├── samlstepupproviders.yml.j2
│ │ └── samlstepupproviders_parameters.yml.j2
│ └── vars
│ │ ├── docker.yml
│ │ └── main.yml
├── stepupmiddleware
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── docker.yml
│ │ ├── main.yml
│ │ ├── migrate_identities.yml
│ │ └── vm.yml
│ ├── templates
│ │ ├── 01-middleware-db_migrate.sh.j2
│ │ ├── 06-middleware-bootstrap-sraa-users.sh.j2
│ │ ├── middleware-push-config.sh.j2
│ │ ├── middleware-push-institution.sh.j2
│ │ ├── middleware-push-whitelist.sh.j2
│ │ └── parameters.yaml.j2
│ └── vars
│ │ ├── docker.yml
│ │ └── main.yml
├── stepupra
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── global_view_parameters.yml.j2
│ │ ├── parameters.yml.j2
│ │ ├── samlstepupproviders.yml.j2
│ │ └── samlstepupproviders_parameters.yml.j2
│ └── vars
│ │ ├── docker.yml
│ │ └── main.yml
├── stepupselfservice
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── global_view_parameters.yml.j2
│ │ ├── parameters.yml.j2
│ │ ├── samlstepupproviders.yml.j2
│ │ └── samlstepupproviders_parameters.yml.j2
│ └── vars
│ │ ├── docker.yml
│ │ └── main.yml
├── stepuptiqr
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── 01-tiqr-db_init.sh.j2
│ │ ├── 02-tiqr-migrate-to-keyserver.php.j2
│ │ ├── env.j2
│ │ └── parameters.yaml.j2
│ └── vars
│ │ ├── docker.yml
│ │ └── main.yml
├── stepupwebauthn
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── 01-webauthn-db_init.sh.j2
│ │ ├── env.j2
│ │ └── parameters.yml.j2
│ └── vars
│ │ ├── docker.yml
│ │ └── main.yml
├── teams
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── logback.xml.j2
│ │ └── serverapplication.yml.j2
│ └── vars
│ │ └── main.yml
├── voot
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── externalProviders.yml.j2
│ │ ├── logback.xml.j2
│ │ └── serverapplication.yml.j2
│ └── vars
│ │ └── main.yml
└── welcome
│ ├── files
│ └── site
│ │ ├── css
│ │ └── style.min.css
│ │ ├── font
│ │ ├── fontawesome-webfont.eot
│ │ ├── fontawesome-webfont.svg
│ │ ├── fontawesome-webfont.svgz
│ │ ├── fontawesome-webfont.ttf
│ │ ├── fontawesome-webfont.woff
│ │ ├── vagrundschriftd-lig-webfont.eot
│ │ ├── vagrundschriftd-lig-webfont.svg
│ │ ├── vagrundschriftd-lig-webfont.ttf
│ │ └── vagrundschriftd-lig-webfont.woff
│ │ ├── images
│ │ ├── bg-app-grid.png
│ │ ├── bg-footer.png
│ │ ├── bg-header.png
│ │ ├── engine-block-logo.jpg
│ │ ├── mujina-logo.jpg
│ │ ├── oauth-2-sm.png
│ │ ├── openconext-logo.jpg
│ │ ├── openid_connect-logo.png
│ │ ├── profile-logo.png
│ │ ├── sr-logo.png
│ │ └── teams-logo.png
│ │ └── js
│ │ └── tools
│ │ └── sizewatcher.js
│ ├── tasks
│ └── main.yml
│ └── templates
│ ├── site
│ └── index.html
│ └── welcome-vm.conf.j2
├── scripts
├── ansible-vault-convert.py
├── decrypt
├── encrypt
├── gen_certs.sh
├── gen_secrets.sh
├── gen_tink_keyset_oidc.sh
├── prep-dev-env.sh
├── syntax-check
├── syntax-jinja
├── syntax-json
└── syntax-yml
└── test_containers_playbook.yml
/.editorconfig:
--------------------------------------------------------------------------------
1 | # EditorConfig is awesome: http://EditorConfig.org
2 |
3 | # top-most EditorConfig file
4 | root = true
5 |
6 | # Unix-style newlines with a newline ending every file
7 | [*]
8 | end_of_line = lf
9 | insert_final_newline = true
10 | indent_style = space
11 | indent_size = 2
12 |
13 | [*.py]
14 | indent_size = 4
15 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | ---
2 | version: 2
3 | updates:
4 | - package-ecosystem: "github-actions"
5 | directory: "/"
6 | schedule:
7 | interval: "weekly"
8 |
--------------------------------------------------------------------------------
/.github/workflows/cleanup.yml:
--------------------------------------------------------------------------------
1 | name: Remove old package versions
2 | on:
3 | workflow_dispatch:
4 | inputs:
5 | package_name:
6 | description: 'The name of the package to delete'
7 | required: true
8 |
9 | jobs:
10 | remove-package-versions:
11 | runs-on: ubuntu-20.04
12 | steps:
13 | - name: purge packages
14 | uses: dylanratcliffe/delete-untagged-containers@main
15 | with:
16 | package_name: ${{ github.event.inputs.package_name }}
17 | token: ${{ secrets.GH_CONTAINER_REGISTRY_DELETE_CONTAINERS }}
18 |
19 |
--------------------------------------------------------------------------------
/.github/workflows/molecule-loadbalancer.yml:
--------------------------------------------------------------------------------
1 | # Github action to run Molecule for "base"
2 |
3 | name: loadbalancer
4 | on:
5 | push:
6 | paths:
7 | - 'roles/haproxy/**'
8 | - 'roles/keepalived/**'
9 | - 'roles/bind/**'
10 | - 'molecule/loadbalancer/**'
11 | - 'roles/selfsigned_certs/**'
12 | - '.github/workflows/molecule-loadbalancer.yml'
13 | pull_request:
14 | paths:
15 | - 'roles/haproxy/**'
16 | - 'roles/keepalived/**'
17 | - 'roles/bind/**'
18 | - 'molecule/loadbalancer/**'
19 | - 'roles/selfsigned_certs/**'
20 | - '.github/workflows/molecule-loadbalancer.yml'
21 |
22 | jobs:
23 | build:
24 | runs-on: ubuntu-20.04
25 | steps:
26 | - uses: actions/checkout@v2
27 |
28 | - name: Set up Python 3.8
29 | uses: actions/setup-python@v2
30 | with:
31 | python-version: 3.8
32 |
33 | - name: Installing dependencies
34 | run: pip install jinja2 ansible molecule molecule-docker pytest-testinfra pytest setuptools
35 |
36 | - name: Run role tests
37 | run: molecule test -s loadbalancer
38 |
--------------------------------------------------------------------------------
/.github/workflows/molecule-mongo.yml:
--------------------------------------------------------------------------------
1 | # Github action to run Molecule for "mongo"
2 |
3 | name: mongo
4 | on:
5 | push:
6 | paths:
7 | - 'roles/mongo/**'
8 | - 'molecule/mongo/**'
9 | - '.github/workflows/molecule-mongo.yml'
10 | pull_request:
11 | paths:
12 | - 'roles/mongo/**'
13 | - 'molecule/mongo/**'
14 | - '.github/workflows/molecule-mongo.yml'
15 |
16 | jobs:
17 | build:
18 | runs-on: ubuntu-20.04
19 | steps:
20 | - uses: actions/checkout@v2
21 |
22 | - name: Set up Python 3.8
23 | uses: actions/setup-python@v2
24 | with:
25 | python-version: 3.8
26 |
27 | - name: Installing dependencies
28 | run: pip install jinja2 ansible molecule molecule-docker pytest-testinfra pytest setuptools
29 |
30 | - name: Run role tests
31 | run: molecule test -s mongo
32 |
--------------------------------------------------------------------------------
/.github/workflows/syntax.yml:
--------------------------------------------------------------------------------
1 | # Github action to run syntax check
2 |
3 | name: syntax
4 | on:
5 | push:
6 | paths:
7 | - 'environments/**'
8 | - 'group_vars/**'
9 | - 'roles/**'
10 | - '.github/workflows/syntax.yml'
11 | pull_request:
12 | paths:
13 | - 'environments/**'
14 | - 'group_vars/**'
15 | - 'roles/**'
16 | - '.github/workflows/syntax.yml'
17 |
18 | jobs:
19 | build:
20 | runs-on: ubuntu-latest
21 | steps:
22 | - uses: actions/checkout@v4
23 |
24 | - name: Set up Python 3.8
25 | uses: actions/setup-python@v5
26 | with:
27 | python-version: 3.8
28 |
29 | - name: Installing dependencies
30 | run: pip install jinja2 ansible
31 |
32 | - name: Run syntax check
33 | run: ./scripts/syntax-check
34 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .venv
2 | secrets_test.yml
3 | .vagrant
4 | .idea/
5 | .DS_Store
6 | delme
7 |
8 | *.pyc
9 | *.iml
10 | provision*.retry
11 |
12 | # ignore the directory where our decrypted keystore is kept during execution of playbooks
13 | .decrypted_keystore
14 |
15 | # ignore backup files
16 | *~
17 |
18 | # ignore the directories where external environment vars and external roles are put
19 | roles-external
20 | environments-external
21 | TODO.txt
22 | playbook_local.yml
23 |
--------------------------------------------------------------------------------
/config_docker_playbook.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Read inventory secrets
3 | hosts: docker_servers
4 | become: true
5 | tasks:
6 | - name: Read vars from secrets file
7 | ansible.builtin.include_vars: "{{ inventory_dir }}/secrets/secrets.yml"
8 | no_log: true
9 | tags:
10 | - always
11 |
12 | - name: Configure docker servers
13 | hosts: docker_servers
14 | become: true
15 | roles:
16 | - { role: docker, tags: ['docker'] }
17 |
--------------------------------------------------------------------------------
/deploy_loadbalancers_playbook.yml:
--------------------------------------------------------------------------------
1 | ---
--------------------------------------------------------------------------------
/deploy_mariadb_playbook.yml:
--------------------------------------------------------------------------------
1 | ---
--------------------------------------------------------------------------------
/deploy_mongo_playbook.yml:
--------------------------------------------------------------------------------
1 | ---
--------------------------------------------------------------------------------
/environments/template/files/certs/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/environments/template/files/certs/.gitkeep
--------------------------------------------------------------------------------
/environments/template/files/certs/invite/public_key.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN PUBLIC KEY-----
2 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAn6WGDICmAyApKaD+HKsI
3 | LsjKW6s/NFHmX84w0njlqxp0/0VWPnBdHAseTFS2uojaMHvRHs65p6WuUXMuizwM
4 | G96STAmwnbcwvLoteGSW/jtzv/q3MmuiVP6XfjAnztPhNDCCnh+9+EbJnKurIk1/
5 | yBboE4NRduRt/kKwHCwM5jFI7ryPx0w8UxGeKddHmJKYtd8UtA9OrnOfWEAVgjPm
6 | yQlguojUKoSxbuadVWEtNAJkWcCmzdXJjYSc+OCwYKYVRe668AUpytUk8uB7eL9z
7 | 9W21GNnKu77KCFaDp4G3IvKxpuMCQ+v60bUks9osSvaSD0u9Y3VsmPR3Gv1DD1nD
8 | hwIDAQAB
9 | -----END PUBLIC KEY-----
10 |
--------------------------------------------------------------------------------
/environments/template/files/certs/myconext/myconext_saml.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIICgTCCAeoCCQCuVzyqFgMSyDANBgkqhkiG9w0BAQsFADCBhDELMAkGA1UEBhMC
3 | VVMxEzARBgNVBAgMCldhc2hpbmd0b24xEjAQBgNVBAcMCVZhbmNvdXZlcjEdMBsG
4 | A1UECgwUU3ByaW5nIFNlY3VyaXR5IFNBTUwxCzAJBgNVBAsMAnNwMSAwHgYDVQQD
5 | DBdzcC5zcHJpbmcuc2VjdXJpdHkuc2FtbDAeFw0xODA1MTQxNDMwNDRaFw0yODA1
6 | MTExNDMwNDRaMIGEMQswCQYDVQQGEwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjES
7 | MBAGA1UEBwwJVmFuY291dmVyMR0wGwYDVQQKDBRTcHJpbmcgU2VjdXJpdHkgU0FN
8 | TDELMAkGA1UECwwCc3AxIDAeBgNVBAMMF3NwLnNwcmluZy5zZWN1cml0eS5zYW1s
9 | MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDRu7/EI0BlNzMEBFVAcbx+lLos
10 | vzIWU+01dGTY8gBdhMQNYKZ92lMceo2CuVJ66cUURPym3i7nGGzoSnAxAre+0YIM
11 | +U0razrWtAUE735bkcqELZkOTZLelaoOztmWqRbe5OuEmpewH7cx+kNgcVjdctOG
12 | y3Q6x+I4qakY/9qhBQIDAQABMA0GCSqGSIb3DQEBCwUAA4GBAAeViTvHOyQopWEi
13 | XOfI2Z9eukwrSknDwq/zscR0YxwwqDBMt/QdAODfSwAfnciiYLkmEjlozWRtOeN+
14 | qK7UFgP1bRl5qksrYX5S0z2iGJh0GvonLUt3e20Ssfl5tTEDDnAEUMLfBkyaxEHD
15 | RZ/nbTJ7VTeZOSyRoVn5XHhpuJ0B
16 | -----END CERTIFICATE-----
--------------------------------------------------------------------------------
/environments/template/files/eb/languages/overrides.en.php:
--------------------------------------------------------------------------------
1 | 'OpenConext',
5 | ];
6 |
--------------------------------------------------------------------------------
/environments/template/files/eb/languages/overrides.nl.php:
--------------------------------------------------------------------------------
1 | 'OpenConext',
5 | ];
6 |
--------------------------------------------------------------------------------
/environments/template/host_vars/template.yml:
--------------------------------------------------------------------------------
1 | myconext_cronjobmaster: false
2 | mongo_replication_role: primary
3 |
--------------------------------------------------------------------------------
/environments/template/inventory:
--------------------------------------------------------------------------------
1 | [storage]
2 | %target_host%
3 |
4 | [mongo_servers]
5 | %target_host%
6 |
7 | [selfsigned_certs]
8 | %target_host%
9 |
10 | [loadbalancer]
11 | %target_host%
12 |
13 | [%env%:children]
14 | storage
15 | mongo_servers
16 | selfsigned_certs
17 | sysloghost
18 | loadbalancer_ha
19 | loadbalancer
20 | elk
21 | lifecycle
22 | dbcluster
23 | dbcluster_nodes
24 | stats
25 |
26 | [sysloghost]
27 | [loadbalancer_ha]
28 | [elk]
29 | [lifecycle]
30 | [dbcluster]
31 | [dbcluster_nodes]
32 | [stats]
33 |
34 | [base:children]
35 | loadbalancer
36 | storage
37 | dbcluster
38 | sysloghost
39 | elk
40 | lifecycle
41 | selfsigned_certs
42 |
43 | [loadbalancer:children]
44 | loadbalancer_ha
45 |
46 | [frontend:children]
47 | lifecycle
48 |
49 | [db_mysql:children]
50 | storage
51 | dbcluster
52 | dbcluster_nodes
53 |
54 | [local]
55 | localhost ansible_connection=local
56 |
57 | # for refactored playbooks
58 | [base:children]
59 | docker_servers
60 |
61 | [docker_servers]
62 | docker1.example.com
63 | docker2.example.com
64 |
65 | [docker_invite]
66 | docker2.example.com
--------------------------------------------------------------------------------
/environments/vm/files/certs/invite/public_key.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN PUBLIC KEY-----
2 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAn6WGDICmAyApKaD+HKsI
3 | LsjKW6s/NFHmX84w0njlqxp0/0VWPnBdHAseTFS2uojaMHvRHs65p6WuUXMuizwM
4 | G96STAmwnbcwvLoteGSW/jtzv/q3MmuiVP6XfjAnztPhNDCCnh+9+EbJnKurIk1/
5 | yBboE4NRduRt/kKwHCwM5jFI7ryPx0w8UxGeKddHmJKYtd8UtA9OrnOfWEAVgjPm
6 | yQlguojUKoSxbuadVWEtNAJkWcCmzdXJjYSc+OCwYKYVRe668AUpytUk8uB7eL9z
7 | 9W21GNnKu77KCFaDp4G3IvKxpuMCQ+v60bUks9osSvaSD0u9Y3VsmPR3Gv1DD1nD
8 | hwIDAQAB
9 | -----END PUBLIC KEY-----
10 |
--------------------------------------------------------------------------------
/environments/vm/files/certs/myconext/myconext_saml.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIICgTCCAeoCCQCuVzyqFgMSyDANBgkqhkiG9w0BAQsFADCBhDELMAkGA1UEBhMC
3 | VVMxEzARBgNVBAgMCldhc2hpbmd0b24xEjAQBgNVBAcMCVZhbmNvdXZlcjEdMBsG
4 | A1UECgwUU3ByaW5nIFNlY3VyaXR5IFNBTUwxCzAJBgNVBAsMAnNwMSAwHgYDVQQD
5 | DBdzcC5zcHJpbmcuc2VjdXJpdHkuc2FtbDAeFw0xODA1MTQxNDMwNDRaFw0yODA1
6 | MTExNDMwNDRaMIGEMQswCQYDVQQGEwJVUzETMBEGA1UECAwKV2FzaGluZ3RvbjES
7 | MBAGA1UEBwwJVmFuY291dmVyMR0wGwYDVQQKDBRTcHJpbmcgU2VjdXJpdHkgU0FN
8 | TDELMAkGA1UECwwCc3AxIDAeBgNVBAMMF3NwLnNwcmluZy5zZWN1cml0eS5zYW1s
9 | MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDRu7/EI0BlNzMEBFVAcbx+lLos
10 | vzIWU+01dGTY8gBdhMQNYKZ92lMceo2CuVJ66cUURPym3i7nGGzoSnAxAre+0YIM
11 | +U0razrWtAUE735bkcqELZkOTZLelaoOztmWqRbe5OuEmpewH7cx+kNgcVjdctOG
12 | y3Q6x+I4qakY/9qhBQIDAQABMA0GCSqGSIb3DQEBCwUAA4GBAAeViTvHOyQopWEi
13 | XOfI2Z9eukwrSknDwq/zscR0YxwwqDBMt/QdAODfSwAfnciiYLkmEjlozWRtOeN+
14 | qK7UFgP1bRl5qksrYX5S0z2iGJh0GvonLUt3e20Ssfl5tTEDDnAEUMLfBkyaxEHD
15 | RZ/nbTJ7VTeZOSyRoVn5XHhpuJ0B
16 | -----END CERTIFICATE-----
--------------------------------------------------------------------------------
/environments/vm/files/certs/oidc/oidcsaml.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDmzCCAoOgAwIBAgIUECe8UKL5w/tmcNfotgIBAwaQ3uswDQYJKoZIhvcNAQEL
3 | BQAwbDELMAkGA1UEBhMCTkwxGjAYBgNVBAoMEXZtLm9wZW5jb25leHQub3JnMRow
4 | GAYDVQQDDBF2bS5vcGVuY29uZXh0Lm9yZzElMCMGCSqGSIb3DQEJARYWaW5mb0B2
5 | bS5vcGVuY29uZXh0Lm9yZzAeFw0xOTA2MTMxODQ0MjJaFw0yMjA2MTIxODQ0MjJa
6 | MGwxCzAJBgNVBAYTAk5MMRowGAYDVQQKDBF2bS5vcGVuY29uZXh0Lm9yZzEaMBgG
7 | A1UEAwwRdm0ub3BlbmNvbmV4dC5vcmcxJTAjBgkqhkiG9w0BCQEWFmluZm9Adm0u
8 | b3BlbmNvbmV4dC5vcmcwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDb
9 | xgEI0FBwE0uiUCHU+zvEg1RA9i6IZyEUkt9OIIS4iQIgTcsJO5fhfpNDw4dBpxOE
10 | 1tpr/YvN0XiH05gSYdALk7q4neTeZats6S7PkknpCeCHQkhrXwK91Z/ENgnCMLUZ
11 | Vf/a0j1Xyu+iHvWVNCN73QFehV67qotLl26ZiWMxVWJ6PFktpIx3Pq4bfUaKDFaA
12 | KIJqycEFjoTEdf2AV8/PkoHkqbEWxWnKYi9tr2EYHt40dX9EaX28jAVjP3VHMO0+
13 | pq/mIYqTTxUtmL6+TcVrywFIOKUMhNmw7pvcHqcbC0rue9aLnqYjOaDdUMIgoN/s
14 | 2/ei0dYfwEspDh0IR57bAgMBAAGjNTAzMDEGA1UdEQQqMCiCEXZtLm9wZW5jb25l
15 | eHQub3JnghMqLnZtLm9wZW5jb25leHQub3JnMA0GCSqGSIb3DQEBCwUAA4IBAQCz
16 | 4xtDP6umXDDKc35auhAnT07HvB+JnZ9MBfOsS/JlkuaQfZHTVifywDXJ5L+jGdtT
17 | aYbIDbUx4YS/PQmyrgmb9EfQhwyDIT/+LJIJ/xk9OZyHRiMrVJAWLPAoO5DK/9VE
18 | TRrePYcVQUDeSwNXkDpLYq89CqXJr28UIBLz/mpkZASSDgSCDUqEwzggVRKrUV1I
19 | 9aoy1OLO2l1lFOwLSrPzKtECeqi1pJxYFHZENNeEsrtKrV9LfJlKecGIr54LIX5F
20 | BUvHglk06vFZxn8f6sotyf/9c61mugd4WaboUq41Pzo5eSX45au9LYplLzsh/5pC
21 | dhrimFAx9UWFfKRFuIrV
22 | -----END CERTIFICATE-----
23 |
--------------------------------------------------------------------------------
/environments/vm/files/eb/languages/overrides.en.php:
--------------------------------------------------------------------------------
1 | '{{ instance_name }}',
5 | ];
6 |
--------------------------------------------------------------------------------
/environments/vm/files/eb/languages/overrides.nl.php:
--------------------------------------------------------------------------------
1 | '{{ instance_name }}',
5 | ];
6 |
--------------------------------------------------------------------------------
/environments/vm/group_vars/dev.yml:
--------------------------------------------------------------------------------
1 | php_opcode_validate_timestamps: 1
2 | engine_apache_symfony_environment: dev
3 | develop: true
4 |
--------------------------------------------------------------------------------
/environments/vm/host_vars/192.168.66.99.yml:
--------------------------------------------------------------------------------
1 | ---
2 | mongo_replication_role: primary
3 |
--------------------------------------------------------------------------------
/filter_plugins/depem.py:
--------------------------------------------------------------------------------
1 | # depem: Strip PEM headers and remove all whitespace from string
2 | # Usage: {{ foo | depem }}
3 |
4 | def depem(string):
5 | import re
6 |
7 | return re.sub(r'\s+|(-----(BEGIN|END).*-----)', '', string)
8 |
9 |
10 | class FilterModule(object):
11 | @staticmethod
12 | def filters():
13 | return {
14 | 'depem': depem,
15 | }
16 |
--------------------------------------------------------------------------------
/group_vars/java-apps-common.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Settings that are identical in every environment (vm, test etc)
3 | tomcat:
4 | install_dir: /etc/tomcat
5 | share_dir: /usr/share/tomcat
6 | service_name: tomcat
7 | server_port: 8005
8 | redirect_port: 8443
9 | properties_dir: appconf
10 | lib_dir: /var/lib/tomcat
11 | app_base_dir_name: appBase
12 | cache_dir: /var/cache/tomcat
13 | config_file: /etc/tomcat/tomcat.conf
14 |
--------------------------------------------------------------------------------
/group_vars/local-certs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | base_domain: vm.openconext.org
3 |
4 | certs:
5 | subject: /O=OpenConext/commonName=*.{{ base_domain }}
6 |
7 | tls:
8 | key_name: star.{{ base_domain }}.key
9 | crt_name: star.{{ base_domain }}.pem
10 | bundle_name: star.{{ base_domain }}_ca_bundle.pem
11 |
--------------------------------------------------------------------------------
/group_vars/minimal.yml:
--------------------------------------------------------------------------------
1 | # Default settings to support the minimal installation setup
2 | apache_app_listen_address:
3 | all: 0.0.0.0
4 |
--------------------------------------------------------------------------------
/molecule/Dockerfile-Rocky8.j2:
--------------------------------------------------------------------------------
1 | FROM rockylinux:8
2 |
3 | ENV container docker
4 |
5 | RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \
6 | rm -f /lib/systemd/system/multi-user.target.wants/*;\
7 | rm -f /etc/systemd/system/*.wants/*;\
8 | rm -f /lib/systemd/system/local-fs.target.wants/*; \
9 | rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
10 | rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
11 | rm -f /lib/systemd/system/basic.target.wants/*;\
12 | rm -f /lib/systemd/system/anaconda.target.wants/*;
13 |
14 | # install ansible
15 | RUN yum clean all && \
16 | yum -y update && \
17 | yum -y install epel-release && \
18 | yum -y install git ansible iproute
19 | RUN echo -e '[local]\nlocalhost' > /etc/ansible/hosts
20 |
21 | VOLUME [ "/sys/fs/cgroup" ]
22 |
23 | CMD ["/usr/sbin/init"]
24 |
--------------------------------------------------------------------------------
/molecule/Dockerfile-Rocky9.j2:
--------------------------------------------------------------------------------
1 | FROM rockylinux:9
2 | ENV container=docker
3 |
4 | RUN rm -f /lib/systemd/system/multi-user.target.wants/*;\
5 | rm -f /etc/systemd/system/*.wants/*;\
6 | rm -f /lib/systemd/system/local-fs.target.wants/*; \
7 | rm -f /lib/systemd/system/sockets.target.wants/*udev*; \
8 | rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \
9 | rm -f /lib/systemd/system/basic.target.wants/*;\
10 | rm -f /lib/systemd/system/anaconda.target.wants/*;
11 |
12 | RUN yum clean all && \
13 | yum -y update && \
14 | yum -y install epel-release && \
15 | yum -y install git ansible iproute systemd cronie procps-ng
16 | RUN echo -e '[local]\nlocalhost' > /etc/ansible/hosts
17 |
18 | VOLUME [ "/sys/fs/cgroup" ]
19 |
20 | CMD ["/usr/sbin/init"]
21 |
--------------------------------------------------------------------------------
/molecule/galera/molecule.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependency:
3 | name: galaxy
4 | driver:
5 | name: docker
6 |
7 | platforms:
8 | - name: openconext-rocky8-mysql
9 | image: rocky8-ansible
10 | dockerfile: ../Dockerfile-Rocky8.j2
11 | command: /usr/sbin/init
12 | privileged: True
13 | networks:
14 | - name: mariadb
15 | volumes:
16 | - /sys/fs/cgroup:/sys/fs/cgroup:ro
17 | groups:
18 | - storage
19 | - dbcluster
20 | - dbcluster_nodes
21 | - name: openconext-rocky8-mysql-2
22 | image: rocky8-ansible
23 | dockerfile: ../Dockerfile-Rocky8.j2
24 | command: /usr/sbin/init
25 | privileged: True
26 | networks:
27 | - name: mariadb
28 | volumes:
29 | - /sys/fs/cgroup:/sys/fs/cgroup:ro
30 | groups:
31 | - storage
32 | - dbcluster
33 | - dbcluster_nodes
34 |
35 | provisioner:
36 | name: ansible
37 | env:
38 | ANSIBLE_REMOTE_TMP: /tmp/
39 | ANSIBLE_ROLES_PATH: ../../roles
40 | ANSIBLE_FILTER_PLUGINS: ../../filter_plugins
41 | inventory:
42 | links:
43 | group_vars: ../../group_vars
44 |
--------------------------------------------------------------------------------
/molecule/loadbalancer/converge.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Converge
3 | hosts: all
4 |
5 | vars:
6 | inventory_dir: "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}/../../environments/vm"
7 |
8 | pre_tasks:
9 | - name: Read vars from secrets file
10 | include_vars: "../../environments/vm/secrets/vm.yml"
11 | - name: Read vars from group_vars file
12 | include_vars: "../../environments/vm/group_vars/vm.yml"
13 |
14 | - name: "Override some defaults"
15 | set_fact:
16 | base_domain: molecule.openconext.org
17 | use_selfsigned_certs: True
18 |
19 | roles:
20 | - role: haproxy
21 | - role: keepalived
22 | keepalived:
23 | state_master: MASTER
24 | state_backup: BACKUP
25 | masterprio: 101
26 | backupprio: 100
27 | keepalived_loadbalancer_vrrp_password: mysecret
28 | - role: bind
29 | haproxy_sni_ip_restricted:
30 | ipv4: 127.0.0.2
31 | ipv6: "::1"
32 | certs:
33 | - name: star
34 | key_content: "{{ https_star_private_key }}"
35 | crt_name: star.{{ base_domain }}.pem
36 |
--------------------------------------------------------------------------------
/molecule/loadbalancer/molecule.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependency:
3 | name: galaxy
4 | driver:
5 | name: docker
6 |
7 | platforms:
8 | - name: openconext-centos7-loadbalancer
9 | image: centos7-ansible
10 | dockerfile: ../Dockerfile.j2
11 | command: /usr/sbin/init
12 | privileged: true
13 | volumes:
14 | - /sys/fs/cgroup:/sys/fs/cgroup:ro
15 | groups:
16 | - "loadbalancer-vm"
17 | - loadbalancer_ha
18 | - selfsigned_certs
19 | - loadbalancer
20 |
21 | provisioner:
22 | name: ansible
23 | env:
24 | ANSIBLE_REMOTE_TMP: /tmp/
25 | ANSIBLE_ROLES_PATH: ../../roles
26 | ANSIBLE_FILTER_PLUGINS: ../../filter_plugins
27 | inventory:
28 | links:
29 | group_vars: ../../group_vars
30 |
31 | scenario:
32 | test_sequence:
33 | - dependency
34 | - lint
35 | - cleanup
36 | - destroy
37 | - syntax
38 | - create
39 | - prepare
40 | - converge
41 | # No idempotence due to bind zone transfer (template with epoch seconds)
42 | # - idempotence
43 | - side_effect
44 | - verify
45 | - cleanup
46 | - destroy
47 | verifier:
48 | name: testinfra
49 |
--------------------------------------------------------------------------------
/molecule/loadbalancer/prepare.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Prepare
3 | hosts: all
4 | vars:
5 | inventory_dir: "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}/../../environments/vm"
6 |
7 | pre_tasks:
8 | - name: Read vars from secrets file
9 | include_vars: "../../environments/vm/secrets/vm.yml"
10 | - name: Read vars from group_vars file
11 | include_vars: "../../environments/vm/group_vars/vm.yml"
12 |
13 | - name: "Override some defaults"
14 | set_fact:
15 | base_domain: molecule.openconext.org
16 |
17 | - name: "Install package(s)"
18 | yum:
19 | name: crontabs
20 | state: present
21 | register: prepare_packages_installed
22 | until: prepare_packages_installed is succeeded
23 |
24 | roles:
25 | - role: selfsigned_certs
26 |
--------------------------------------------------------------------------------
/molecule/mongo/converge.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Converge
3 | hosts: all
4 |
5 | vars:
6 | inventory_dir: "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}/../../environments/vm"
7 |
8 | pre_tasks:
9 | - name: Read vars from secrets file
10 | include_vars: "../../environments/vm/secrets/vm.yml"
11 | - name: Read vars from group_vars file
12 | include_vars: "../../environments/vm/group_vars/vm.yml"
13 |
14 | - name: "Set some facts"
15 | set_fact:
16 | mongo_replication_role: primary
17 | mongo_tls_host_altname_dnsorip: DNS
18 |
19 | roles:
20 | - role: mongo
21 |
--------------------------------------------------------------------------------
/molecule/mongo/molecule.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependency:
3 | name: galaxy
4 | driver:
5 | name: docker
6 |
7 | platforms:
8 | - name: openconext-rocky9-mongod
9 | dockerfile: ../Dockerfile-Rocky9.j2
10 | image: rockylinux:9
11 | command: /usr/sbin/init
12 | privileged: true
13 | volumes:
14 | - /sys/fs/cgroup:/sys/fs/cgroup:ro
15 | groups:
16 | - mongo_servers
17 |
18 | provisioner:
19 | name: ansible
20 | env:
21 | ANSIBLE_REMOTE_TMP: /tmp/
22 | ANSIBLE_ROLES_PATH: ../../roles
23 | ANSIBLE_FILTER_PLUGINS: ../../filter_plugins
24 | inventory:
25 | links:
26 | group_vars: ../../group_vars
27 |
28 | verifier:
29 | name: testinfra
30 |
--------------------------------------------------------------------------------
/playbook_haproxy.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: loadbalancer_ha
3 | become: false
4 | gather_facts: no
5 | roles:
6 | - { role: haproxy_mgnt, tags: ['haproxy_mgnt'] }
7 | - { role: haproxy_acls, tags: ['haproxy_acls'] }
8 |
--------------------------------------------------------------------------------
/roles/apachefpm/files/stepuplogging.conf:
--------------------------------------------------------------------------------
1 |
2 | LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" \"%{X-Forwarded-For}i\" \"%{x-stepup-request-id}o\"" stepup
3 |
4 |
5 |
--------------------------------------------------------------------------------
/roles/apachefpm/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart phpfpm
3 | service:
4 | name: php72-php-fpm
5 | state: restarted
6 |
7 | - name: restart httpd
8 | service:
9 | name: httpd
10 | state: restarted
11 |
--------------------------------------------------------------------------------
/roles/apachefpm/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add group {{ appname }}
3 | group:
4 | name: "{{ appname }}"
5 | state: present
6 |
7 | - name: Add user {{ appname }}
8 | user:
9 | name: "{{ appname }}"
10 | group: "{{ appname }}"
11 | createhome: no
12 | state: present
13 |
14 | - name: Create directory for vhosts to store PHP sessions
15 | file:
16 | path: "{{ php_session_dir}}/{{ appname }}"
17 | state: directory
18 | owner: "{{ appname }}"
19 | group: root
20 | mode: 0770
21 |
22 | - name: Add stepup logformat config
23 | copy:
24 | src: stepuplogging.conf
25 | dest: /etc/httpd/conf.d/stepuplogging.conf
26 | notify:
27 | - "restart httpd"
28 |
29 | - name: Install Apache vhost
30 | template:
31 | src: "apachevhost.j2"
32 | dest: "/etc/httpd/conf.d/{{ appname }}.conf"
33 | notify:
34 | - "restart httpd"
35 |
36 | - name: Install php-fpm 72 config
37 | template:
38 | src: "phpfpm.j2"
39 | dest: "/etc/opt/remi/php72/php-fpm.d/{{ appname }}.conf"
40 | notify:
41 | - "restart phpfpm"
42 |
--------------------------------------------------------------------------------
/roles/apachefpm/templates/phpfpm.j2:
--------------------------------------------------------------------------------
1 | [{{ appname }}]
2 | listen = /var/run/php-fpm/{{ appname }}-pool-72.sock
3 | listen.allowed_clients = 127.0.0.1
4 | listen.owner = apache
5 | listen.group = apache
6 | listen.mode = 0640
7 | user = {{ appname }}
8 | group = {{ appname }}
9 | pm = dynamic
10 | pm.max_children = 40
11 | pm.start_servers = 5
12 | pm.min_spare_servers = 5
13 | pm.max_spare_servers = 35
14 | pm.status_path = /status
15 | php_admin_flag[log_errors] = on
16 | php_admin_value[memory_limit] = {{ fpmmemory | default('128M') }}
17 | php_value[session.save_handler] = files
18 | php_value[session.save_path] = {{ php_session_dir }}/{{ appname }}
19 | php_value[disable_functions] = {{ php_disabled_functions }}
20 | php_value[session.cookie_domain] = {{ vhost_name }}
21 | php_value[session.cookie_lifetime] = 0
22 | php_value[session.gc_maxlifetime] = {{ app_session_expiry_time }}
23 |
--------------------------------------------------------------------------------
/roles/attribute-aggregation/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | attribute_aggregation_pseudo_mail_postfix: demo.openconext.org
3 | attribute_aggregation_pseudo_emails_retention_days_period: 90
4 | aa_cronjobmaster: true
5 | attribute_aggregator_api_lifecycle_username: attribute_aggregator_api_lifecycle_user
6 | aa_oauth2_token_url: "https://connect.{{ base_domain }}/oidc/token"
7 | aa_manage_provision_oidcrp_name_en: "AA client credentials client for VOOT access"
8 | aa_manage_provision_oidcrp_description_en: "OAuth client to access VOOT for group information"
9 | aa_manage_provision_oidcrp_grants: "client_credentials"
10 | aa_manage_provision_oidcrp_allowed_resource_servers: '{"name": "{{ voot.oidcng_checkToken_clientId }}"}'
11 | aa_spring_flyway_enabled: true
12 | aa_docker_networks:
13 | - name: loadbalancer
14 |
--------------------------------------------------------------------------------
/roles/attribute-aggregation/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart attribute-aggregationserver
2 | community.docker.docker_container:
3 | name: aaserver
4 | state: started
5 | restart: true
6 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
7 | comparisons:
8 | '*': ignore
9 | when: aaservercontainer is success and aaservercontainer is not change
10 |
--------------------------------------------------------------------------------
/roles/attribute-aggregation/templates/apachelink.conf.j2:
--------------------------------------------------------------------------------
1 | ServerName https://${HTTPD_SERVERNAME}
2 | RewriteEngine on
3 | RewriteCond %{REQUEST_URI} !\.html$
4 | RewriteCond %{REQUEST_URI} !^/aa/
5 | RewriteCond %{REQUEST_URI} !^/internal/
6 | RewriteCond %{REQUEST_URI} !^/redirect
7 | RewriteCond %{REQUEST_URI} !^/fonts/
8 | RewriteCond %{REQUEST_URI} !^/orcid
9 | RewriteRule (.*) /index.html [L]
10 |
11 | Redirect /orcid https://link.{{ base_domain }}/aa/api/client/information.html
12 | ProxyPass /Shibboleth.sso !
13 |
14 | ProxyPass /redirect http://aaserver:8080/aa/api/redirect
15 | ProxyPass /internal/health http://aaserver:8080/aa/api/internal/health
16 | ProxyPass /internal/info http://aaserver:8080/aa/api/internal/info
17 |
18 | ProxyPass /aa/api http://aaserver:8080/aa/api
19 | ProxyPassReverse /aa/api http://aaserver:8080/aa/api
20 | ProxyPassReverse /aa/api/client http://aaserver:8080/aa/api/client
21 |
22 | Header always set X-Frame-Options "DENY"
23 | Header always set Referrer-Policy "strict-origin-when-cross-origin"
24 | Header always set X-Content-Type-Options "nosniff"
25 |
--------------------------------------------------------------------------------
/roles/attribute-aggregation/templates/logback.xml.j2:
--------------------------------------------------------------------------------
1 | #jinja2:lstrip_blocks: True
2 |
3 |
4 |
5 |
6 |
7 | %d{ISO8601} %5p [%t] %logger{40}:%L - %m%n
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 | {{ smtp_server }}
16 | {{ noreply_email }}
17 | {{ error_mail_to }}
18 | {{ error_subject_prefix }}Unexpected error attribute-aggregation
19 |
20 |
21 |
22 | ERROR
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
--------------------------------------------------------------------------------
/roles/attribute-aggregation/templates/serviceProviderConfig.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "https://mock-sp": {
3 | "baseUrl": "http://localhost:8989/",
4 | "userName": "api-user",
5 | "password": "secret"
6 | },
7 | "https://mock-rp": {
8 | "baseUrl": "http://localhost:8989",
9 | "userName": "api-user",
10 | "password": "secret"
11 | }
12 | }
--------------------------------------------------------------------------------
/roles/attribute-aggregation/vars/main.yml:
--------------------------------------------------------------------------------
1 | manage_provision_oidcrp_client_id: "{{ aa.authz_client_id }}"
2 | manage_provision_oidcrp_secret: "{{ aa.authz_secret }}"
3 | manage_provision_oidcrp_name_en: "{{ aa_manage_provision_oidcrp_name_en }}"
4 | manage_provision_oidcrp_description_en: "{{ aa_manage_provision_oidcrp_description_en }}"
5 | manage_provision_oidcrp_grants: "{{ aa_manage_provision_oidcrp_grants }}"
6 | manage_provision_oidcrp_allowed_resource_servers: "{{ aa_manage_provision_oidcrp_allowed_resource_servers }}"
7 |
--------------------------------------------------------------------------------
/roles/bind/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dns_create_keys: False
3 | dns_work_dir: "/var/cache/bind"
4 |
--------------------------------------------------------------------------------
/roles/bind/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart bind
3 | service:
4 | name: "named"
5 | state: "restarted"
6 |
7 | - name: reload bind
8 | service:
9 | name: "named"
10 | state: "reloaded"
11 |
--------------------------------------------------------------------------------
/roles/bind/tasks/create_keys.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create KSK
3 | ansible.builtin.command: |
4 | dnssec-keygen -K {{ dns_work_dir }}/keys -f KSK -3 -a RSASHA256 -b 2048 -n ZONE "hb.{{ base_domain }}"
5 | creates: "{{ dns_work_dir }}/keys/Khb.{{ base_domain }}.+*"
6 |
7 | - name: create ZSK
8 | ansible.builtin.command: |
9 | dnssec-keygen -K {{ dns_work_dir }}/keys -3 -a RSASHA256 -b 1024 -n ZONE "hb.{{ base_domain }}"
10 | creates: "{{ dns_work_dir }}/keys/Khb.{{ base_domain }}.+*"
11 |
12 | - name: chown the keys
13 | ansible.builtin.file:
14 | dest: "{{ dns_work_dir }}/keys"
15 | owner: "named"
16 | group: "named"
17 | mode: "u=rwX,go="
18 | recurse: true
19 |
--------------------------------------------------------------------------------
/roles/bind/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install bind packages
3 | ansible.builtin.apt:
4 | name:
5 | - "bind9"
6 | state: "present"
7 |
8 | - name: create bind directories
9 | ansible.builtin.file:
10 | dest: "{{ dns_work_dir }}/{{ item }}"
11 | state: "directory"
12 | owner: "bind"
13 | group: "bind"
14 | mode: "0750"
15 | loop:
16 | - "master"
17 | - "keys"
18 |
19 | - name: configure bind
20 | ansible.builtin.template:
21 | src: "{{ item }}.j2"
22 | dest: "/etc/bind/{{ item }}"
23 | owner: "root"
24 | group: "bind"
25 | mode: "0640"
26 | loop:
27 | - "named.conf.options"
28 | - "named.conf.local"
29 | notify:
30 | - "restart bind"
31 |
32 | - name: copy ha zone file
33 | ansible.builtin.template:
34 | src: "ha_zone_file.j2"
35 | dest: "{{ dns_work_dir }}/master/hb.{{ base_domain }}.db"
36 | owner: "bind"
37 | group: "bind"
38 | mode: "0640"
39 | notify:
40 | - "reload bind"
41 |
42 | - name: enable bind service
43 | ansible.builtin.service:
44 | name: "named"
45 | state: "started"
46 | enabled: true
47 |
--------------------------------------------------------------------------------
/roles/bind/templates/ha_zone_file.j2:
--------------------------------------------------------------------------------
1 | $TTL 4h
2 | $ORIGIN hb.{{ base_domain }}.
3 | @ IN SOA ns1hb.{{ base_domain }}. {{ admin_email | replace('@', '.') }}. (
4 | {{ ansible_date_time.epoch }} ; Serial
5 | 1d ; slave refresh (1 day)
6 | 2h ; slave retry time in case of a problem (2 hours)
7 | 2w ; slave expiration time (2 weeks)
8 | 2d ; minimum caching time in case of failed lookups (2 days)
9 | )
10 | IN NS ns1hb.{{ base_domain }}.
11 | IN NS ns2hb.{{ base_domain }}.
12 | IN MX 0 .
13 | IN TXT "v=spf1 -all"
14 | IN TXT "v=DMARC1; p=reject;"
15 | restricted 300 IN A {{haproxy_sni_ip_restricted.ipv4 }}
16 | restricted 300 IN AAAA {{haproxy_sni_ip_restricted.ipv6 }}
17 | unrestricted 300 IN A {{haproxy_sni_ip.ipv4 }}
18 | unrestricted 300 IN AAAA {{haproxy_sni_ip.ipv6 }}
19 |
--------------------------------------------------------------------------------
/roles/bind/templates/named.conf.local.j2:
--------------------------------------------------------------------------------
1 | zone "hb.{{ base_domain }}" {
2 | type master;
3 | file "master/hb.{{ base_domain }}.db";
4 | dnssec-policy default;
5 | inline-signing yes;
6 | };
7 |
--------------------------------------------------------------------------------
/roles/bind/templates/named.conf.options.j2:
--------------------------------------------------------------------------------
1 | options
2 | {
3 | directory "{{ dns_work_dir }}"; // "Working" directory
4 | dump-file "data/cache_dump.db";
5 | statistics-file "data/named_stats.txt";
6 | memstatistics-file "data/named_mem_stats.txt";
7 | listen-on port 53 { any; };
8 | listen-on-v6 port 53 { any; };
9 | allow-query { any; };
10 | allow-transfer { none; };
11 | recursion no;
12 | pid-file "/run/named/named.pid";
13 | session-keyfile "/run/named/session.key";
14 | key-directory "keys";
15 | bindkeys-file "/etc/bind/bind.keys";
16 | version "{{ instance_name }} DNS server";
17 | };
18 |
--------------------------------------------------------------------------------
/roles/common/defaults/main.yml:
--------------------------------------------------------------------------------
1 | sendmail_smarthost: localhost
2 | sendmail_smarthost_port: 25
3 | handle_pseudo_email: false
4 | postfix_interfaces: all
5 |
6 |
--------------------------------------------------------------------------------
/roles/common/files/journald.conf:
--------------------------------------------------------------------------------
1 | # This file is part of systemd.
2 | #
3 | # systemd is free software; you can redistribute it and/or modify it
4 | # under the terms of the GNU Lesser General Public License as published by
5 | # the Free Software Foundation; either version 2.1 of the License, or
6 | # (at your option) any later version.
7 | #
8 | # Entries in this file show the compile time defaults.
9 | # You can change settings by editing this file.
10 | # Defaults can be restored by simply deleting this file.
11 | #
12 | # See journald.conf(5) for details.
13 |
14 | [Journal]
15 | #Storage=auto
16 | Compress=no
17 | #Seal=yes
18 | #SplitMode=uid
19 | #SyncIntervalSec=5m
20 | RateLimitInterval=30s
21 | RateLimitBurst=20000
22 | #SystemMaxUse=
23 | #SystemKeepFree=
24 | #SystemMaxFileSize=
25 | #RuntimeMaxUse=
26 | #RuntimeKeepFree=
27 | #RuntimeMaxFileSize=
28 | #MaxRetentionSec=
29 | #MaxFileSec=1month
30 | #ForwardToSyslog=no
31 | #ForwardToKMsg=no
32 | #ForwardToConsole=no
33 | #ForwardToWall=yes
34 | #TTYPath=/dev/console
35 | #MaxLevelStore=debug
36 | #MaxLevelSyslog=debug
37 | #MaxLevelKMsg=notice
38 | #MaxLevelConsole=info
39 | #MaxLevelWall=emerg
40 |
41 |
--------------------------------------------------------------------------------
/roles/common/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: restart iptables
4 | service:
5 | name: iptables
6 | state: restarted
7 |
8 | - name: restart ip6tables
9 | service:
10 | name: ip6tables
11 | state: restarted
12 |
13 | - name: restart journald
14 | service:
15 | name: systemd-journald
16 | state: restarted
17 |
18 | - name: reload postfix
19 | service:
20 | name: postfix
21 | state: reloaded
22 |
23 | - name: rebuild header_checks
24 | command: /usr/sbin/postmap header_checks
25 | args:
26 | chdir: /etc/postfix
27 |
--------------------------------------------------------------------------------
/roles/common/templates/header_checks.j2:
--------------------------------------------------------------------------------
1 | /^Received:/ IGNORE
2 | /^X-Originating-IP:/ IGNORE
3 | /^X-Mailer:/ IGNORE
4 | /^Mime-Version: ([\d.]+)/ REPLACE Mime-Version: $1"
5 |
--------------------------------------------------------------------------------
/roles/dashboard/defaults/main.yml:
--------------------------------------------------------------------------------
1 | dashboard_organization: SURFconext
2 | dashboard_hide_tabs: none
3 |
--------------------------------------------------------------------------------
/roles/dashboard/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart dashboardserver
2 | community.docker.docker_container:
3 | name: dashboardserver
4 | state: started
5 | restart: true
6 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
7 | comparisons:
8 | '*': ignore
9 | when: dashboardservercontainer is success and dashboardservercontainer is not change
10 |
--------------------------------------------------------------------------------
/roles/dashboard/templates/logback.xml.j2:
--------------------------------------------------------------------------------
1 | #jinja2:lstrip_blocks: True
2 |
3 |
4 |
5 |
6 |
7 | %d{ISO8601} %5p [%t] %logger{40}:%L - %m%n
8 |
9 |
10 |
11 |
12 | {{ smtp_server }}
13 | {{ noreply_email }}
14 | {{ error_mail_to }}
15 | {{ error_subject_prefix }}Unexpected error dashboard
16 |
17 |
18 |
19 | ERROR
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/roles/diyidp/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | diyidp_domain: "diyidp.{{ base_domain }}"
3 | diyidp_cert: "diyidp.crt"
4 | diyidp:
5 | db_host: "{{ mariadb_host }}"
6 | db_name: diyidp
7 | db_user: diyidprw
8 | db_password: "{{ mysql_passwords.diyidp }}"
9 | secretsalt: "{{ diyidp_secret_salt }} "
10 | admin_password: "{{ diyidp_secret }}"
11 | diyidp_secret_salt: secretsecret
12 | diyidp_secret: secret
13 | diyidp_remotesp:
14 | - name: "{{ instance_name }} SP metadata"
15 | metadataurl: "https://engine.{{ base_domain }}/authentication/sp/metadata"
16 | acslocation: "https://engine.{{ base_domain }}/authentication/sp/consume-assertion"
17 | diyidp_docker_networks:
18 | - name: "loadbalancer"
19 |
--------------------------------------------------------------------------------
/roles/diyidp/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart diyidp
3 | command: docker restart diyidp
4 |
--------------------------------------------------------------------------------
/roles/diyidp/templates/000-default.conf.j2:
--------------------------------------------------------------------------------
1 |
2 | DocumentRoot /var/simplesamlphp/public
3 |
4 |
5 | Require all granted
6 |
7 |
8 |
--------------------------------------------------------------------------------
/roles/diyidp/templates/authsources.php.j2:
--------------------------------------------------------------------------------
1 | array(
7 | // The default is to use core:AdminPassword, but it can be replaced with
8 | // any authentication source.
9 |
10 | 'core:AdminPassword',
11 | ),
12 |
13 |
14 | 'sql_user' => array(
15 | 'core:loginpage_links' => [
16 | 'users' => ['href' => '/showusers.php', 'text' => 'List of available users'],
17 | ],
18 | 'sqlauth:SQL',
19 | 'dsn' => 'mysql:host={{ diyidp.db_host}};port=3306;dbname={{ diyidp.db_name }}',
20 | 'username' => '{{ diyidp.db_user}}',
21 | 'password' => '{{ diyidp.db_password}}',
22 | 'query' => "SELECT uid,givenName,sn,cn, mail,displayName,schacHomeOrganization, CONCAT(uid, '@', schacHomeOrganization) as eduPersonPrincipalName,eduPersonEntitlement,eduPersonAffiliation,isMemberOf,schacPersonalUniqueCode,eduPersonScopedAffiliation
23 | FROM users WHERE username = :username AND password = :password",
24 | ),
25 | );
26 |
--------------------------------------------------------------------------------
/roles/diyidp/templates/config-override.php.j2:
--------------------------------------------------------------------------------
1 | '{{ remotesp.acslocation }}',
16 | 'IDPList' => array( 'sql_users', ),
17 | 'NameIDFormat' => 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent',
18 | );
19 | {% endfor %}
20 |
--------------------------------------------------------------------------------
/roles/docker/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_apt_release_channel: stable
2 | docker_repo_url: https://download.docker.com/linux
3 | docker_apt_arch: "{{ 'arm64' if ansible_architecture == 'aarch64' else 'amd64' }}"
4 | docker_apt_repository: "deb [arch={{ docker_apt_arch }} signed-by=/etc/apt/trusted.gpg.d/docker.asc] {{ docker_repo_url }}/debian {{ ansible_distribution_release }} {{ docker_apt_release_channel }}"
5 | docker_apt_ignore_key_error: true
6 | docker_apt_gpg_key: "{{ docker_repo_url }}/{{ ansible_distribution | lower }}/gpg"
7 | docker_apt_gpg_key_checksum: "sha256:1500c1f56fa9e26b9b8f42452a553675796ade0807cdce11975eb98170b3a570"
8 | docker_apt_filename: "docker"
9 | docker_install_traefik: true
10 | docker_traefik_ldaps: false
11 | docker_traefik_ports:
12 | - 0.0.0.0:443:443
13 |
--------------------------------------------------------------------------------
/roles/docker/files/iptablesdocker.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Restore iptables firewall rules
3 | Before=network-pre.target
4 |
5 | [Service]
6 | Type=oneshot
7 | ExecStart=/usr/local/sbin/ip4tables.sh
8 |
9 | [Install]
10 | WantedBy=multi-user.target
11 |
12 |
--------------------------------------------------------------------------------
/roles/docker/files/router.yaml:
--------------------------------------------------------------------------------
1 | tls:
2 | stores:
3 | default:
4 | defaultCertificate:
5 | certFile: /config/certs/backend.crt
6 | keyFile: /config/certs/backend.key
7 |
8 |
--------------------------------------------------------------------------------
/roles/docker/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart docker
3 | ansible.builtin.systemd:
4 | name: docker
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/roles/docker/tasks/setup-debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure old versions of Docker are not installed.
3 | ansible.builtin.package:
4 | name:
5 | - docker
6 | - docker-engine
7 | state: absent
8 |
9 | - name: Ensure dependencies are installed.
10 | ansible.builtin.apt:
11 | name:
12 | - apt-transport-https
13 | - ca-certificates
14 | - python3-requests
15 | state: present
16 |
17 | - name: Add Docker apt key.
18 | ansible.builtin.get_url:
19 | url: "{{ docker_apt_gpg_key }}"
20 | dest: /etc/apt/trusted.gpg.d/docker.asc
21 | mode: '0644'
22 | force: false
23 | checksum: "{{ docker_apt_gpg_key_checksum | default(omit) }}"
24 |
25 | - name: Add Docker repository.
26 | ansible.builtin.apt_repository:
27 | repo: "{{ docker_apt_repository }}"
28 | state: present
29 | filename: "{{ docker_apt_filename }}"
30 | update_cache: true
31 |
--------------------------------------------------------------------------------
/roles/docker/tasks/setup-rocky.yml:
--------------------------------------------------------------------------------
1 | - name: Add Docker GPG key.
2 | ansible.builtin.rpm_key:
3 | key: "https://download.docker.com/linux/centos/gpg"
4 | state: present
5 |
6 | - name: Add Docker repository.
7 | ansible.builtin.get_url:
8 | url: https://download.docker.com/linux/centos/docker-ce.repo
9 | dest: '/etc/yum.repos.d/docker-ce.repo'
10 | owner: root
11 | group: root
12 | mode: "0644"
13 |
--------------------------------------------------------------------------------
/roles/docker/templates/daemon.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "live-restore": true,
3 | "log-driver": "journald",
4 | "log-opts" : {
5 | "tag" : "{{ '{{' }}.Name{{ '}}' }}"
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/roles/docker/templates/traefik.yaml.j2:
--------------------------------------------------------------------------------
1 | providers:
2 | docker:
3 | exposedByDefault: false
4 | network: loadbalancer
5 | # File provider configuration
6 | file:
7 | directory: /config/config/
8 | watch: true
9 | # EntryPoints configuration
10 | entryPoints:
11 | websecure:
12 | address: ":443"
13 | {% if engine_trusted_proxy_ips is defined %}
14 | forwardedHeaders:
15 | trustedIPs:
16 | {% for engine_trusted_proxy_ip in engine_trusted_proxy_ips %}
17 | - {{ engine_trusted_proxy_ip }}
18 | {% endfor %}
19 | {% endif %}
20 | {% if docker_traefik_ldaps %}
21 | ldaps:
22 | address: ":636"
23 | {% endif %}
24 | # Server transport configuration
25 | serversTransport:
26 | insecureSkipVerify: true
27 | # Enable access log
28 | accessLog: {}
29 | # Enable ping
30 | ping: {}
31 | # Global configuration
32 | global:
33 | checkNewVersion: false
34 | sendAnonymousUsage: false
35 |
--------------------------------------------------------------------------------
/roles/elk/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | elastic_cluster: false
3 | kibana_standalone: false
4 | elastic_data_node: true
5 | elastic_minumum_nodes: 1
6 | kibana_tls_cert_path: "/etc/pki/tls/certs/{{ kibana_server_name }}.crt"
7 | kibana_tls_key_path: "/etc/pki/tls/private/{{ kibana_server_name }}.key"
8 | kibana_tls_chain_path: "/etc/pki/tls/private/{{ kibana_server_name }}_chain.crt"
9 | kibana_oidc_url: "oidc.{{ base_domain }}"
10 | kibana_oidc_clientid: "https@//kibana.{{ base_domain }}"
11 | elastic_instance_name: surfconext
12 | logstash_elk: False
13 | logstash_stats: True
14 | logstash_tls: False
15 | kibana_server_name: kibana.{{ base_domain }}
16 |
--------------------------------------------------------------------------------
/roles/elk/files/elasticsearch.repo:
--------------------------------------------------------------------------------
1 | [elasticsearch-7.x]
2 | name=Elasticsearch repository for 7.x packages
3 | baseurl=https://artifacts.elastic.co/packages/7.x/yum
4 | gpgcheck=1
5 | gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
6 | enabled=1
7 | autorefresh=1
8 | type=rpm-md
9 |
10 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash/core/11-core-haproxy.conf:
--------------------------------------------------------------------------------
1 | filter {
2 | if [program] == "haproxy" {
3 | grok {
4 | patterns_dir => "/etc/logstash/patterns"
5 | match => { "message" => "%{HAPROXYHTTPBASE}" }
6 | }
7 | useragent {
8 | source => "request_header_user_agent"
9 | target => "ua"
10 | }
11 | date {
12 | match => [ "accept_date", "dd/MMM/yyyy:HH:mm:ss.SSS" ]
13 | }
14 | mutate {
15 | add_field => { "reverse_dns" => "%{client_ip}"}
16 | }
17 | dns {
18 | reverse => [ "reverse_dns" ]
19 | action => "replace"
20 | hit_cache_ttl => 600
21 | failed_cache_ttl => 600
22 | }
23 | mutate {
24 | remove_field => [ "source" ]
25 | remove_field => [ "request_header_user_agent" ]
26 | remove_field => [ "accept_date" ]
27 | remove_field => [ "haproxy_hour" ]
28 | remove_field => [ "haproxy_milliseconds" ]
29 | remove_field => [ "haproxy_minutes" ]
30 | remove_field => [ "haproxy_month" ]
31 | remove_field => [ "haproxy_monthday" ]
32 | remove_field => [ "haproxy_second" ]
33 | remove_field => [ "haproxy_time" ]
34 | remove_field => [ "haproxy_year" ]
35 | }
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash/core/12-core-eblog.conf:
--------------------------------------------------------------------------------
1 | filter {
2 | if [program] == "EBLOG" {
3 | grok {
4 | patterns_dir => "/etc/logstash/patterns"
5 | match => { "message" => "\[%{EBDATETIME:datetime}\] %{DATA:ebfacility}\.%{DATA:eblevel}: %{DATA:ebmessage} \{%{EBSESSREQ:ebsessreq}?%{GREEDYDATA:ebrequest}" }
6 | }
7 | }
8 | mutate {
9 | gsub => [ "ebsessreq", "^", "{" ]
10 | gsub => [ "ebsessreq", "\[\]$", "" ]
11 | }
12 | if ([ebfacility] == "app" or [ebfacility] == "security" or [ebfacility] == "request") and [eblevel] != "ERROR" {
13 | json {
14 | source => "ebrequest"
15 | target => "ebdebug"
16 | }
17 | mutate {
18 | remove_field => [ "ebrequest" ]
19 | }
20 | }
21 | json {
22 | source => "ebsessreq"
23 | target => "ebid"
24 | }
25 | date {
26 | match => [ "datetime", "yyyy-MM-dd HH:mm:ss" ]
27 | }
28 | mutate {
29 | remove_field => [ "ebsessreq" ]
30 | remove_field => [ "ebrequest" ]
31 | remove_field => [ "timestamp" ]
32 | remove_field => [ "datetime" ]
33 | remove_field => [ "source" ]
34 | remove_field => [ "syslogbase" ]
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash/core/14-core-pdp.conf:
--------------------------------------------------------------------------------
1 | filter {
2 | if [program] == "PDPMAIN" {
3 | grok {
4 | match => { "message" => "%{TIMESTAMP_ISO8601:pdptimestamp} %{DATA:pdpdata}" }
5 | }
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash/core/15-core-pdpstats.conf:
--------------------------------------------------------------------------------
1 | filter {
2 | if [program] == "PDPANALYTICS" {
3 | grok {
4 | match => { "message" => "%{GREEDYDATA:pdpstats}" }
5 | }
6 | json {
7 | source => "pdpstats"
8 | target => "pdpstats"
9 | }
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash/core/16-core-myconext.conf:
--------------------------------------------------------------------------------
1 | filter {
2 | if [program] == "myconextjson" {
3 | json {
4 | source => "message"
5 | target => "myconext"
6 | }
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash/core/17-core-oidcng.conf:
--------------------------------------------------------------------------------
1 | filter {
2 | if [program] == "oidcngjson" {
3 | json {
4 | source => "message"
5 | target => "oidcng"
6 | }
7 | }
8 | }
9 |
10 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash/core/31-core-elastic-search-output.conf:
--------------------------------------------------------------------------------
1 | filter {
2 | mutate {
3 | remove_field => [ "message" ]
4 | }
5 | }
6 |
7 | output {
8 | if [@metadata][beat] != "metricbeat" {
9 | elasticsearch { hosts => ["http://localhost:9200"]
10 | index => "conext-%{+YYYY.MM.dd}"
11 | manage_template => "true"
12 | template => "/etc/logstash/conf.d/mapping_core.json"
13 | }
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash/core/32-core-elastic-search-output.conf:
--------------------------------------------------------------------------------
1 | output {
2 | if [@metadata][beat] == "metricbeat" {
3 | elasticsearch { hosts => ["http://localhost:9200"]
4 | index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
5 | }
6 | }
7 | }
8 |
9 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash/patterns/ebrequest:
--------------------------------------------------------------------------------
1 | EBSESSREQ .{36,72}\} (\[?)(\]?)
2 | EBDATETIME %{YEAR}-%{MONTHNUM}-%{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND}
3 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash/patterns/haproxy:
--------------------------------------------------------------------------------
1 | HAPROXYCAPTUREDREQUESTHEADERS %{DATA:request_header_user_agent}\|%{DATA:request_header_tls_cipher},%{DATA:request_header_tls_version},%{DATA:request_header_http_version}\|%{DATA:request_header_samesitesupport}\|%{DATA:http_req_rate_10s}\|%{DATA:http_req_rate_ip_path_1m}
2 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash/sa/20-sa-mysqld.conf:
--------------------------------------------------------------------------------
1 | filter {
2 |
3 | # Parse messages output my mysqld (MariaDB)
4 |
5 | # format to parse:
6 | # 150713 13:26:08 [Note] WSREP: bla bla bla
7 | # 2nd line
8 |
9 | # Note: the multiline plugin is not thread safe! This means that logstash must not be configured run multiple filter threads
10 | # I.e. Set "pipeline.workers: 1" in /etc/logstash/logstash.yml
11 |
12 | if [origin] == "stepup" and [program] == "mysqld" {
13 |
14 | # (Try) to group messages split over multiple lines in one line again
15 | multiline {
16 | # Pattern matches the start of a new log message
17 | pattern => "^[0-9]{6} [0-9]{2}:[0-9]{1,2}:[0-9]{2} \["
18 | what => "previous"
19 | negate => true
20 | }
21 |
22 | # Get the (non-standard) "YYMMdd (H)H:mm:ss" formatted timestamp and severity and replace the message
23 | grok {
24 | match => {
25 | message => "(?m)(?[0-9]{6} [0-9]{2}:[0-9]{1,2}:[0-9]{2}) \[(?[^\]]+)\] %{GREEDYDATA:message}"
26 | }
27 | overwrite => [ "message" ]
28 | }
29 |
30 | if "_grokparsefailure" not in [tags] {
31 | # Mysql syslog reports all messages at error, replace severity with severity from the actual mysql logline
32 | mutate {
33 | rename => [ "mysql_severity", "severity" ]
34 | }
35 | }
36 | }
37 |
38 | }
39 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash/sa/20-sa-nginx.conf:
--------------------------------------------------------------------------------
1 | filter {
2 |
3 | if [origin] == "stepup" and [program] =~ /^nginx_access*/ {
4 | grok {
5 | match => [ "message" , "%{COMBINEDAPACHELOG}"]
6 | }
7 | mutate {
8 | convert => ["response", "integer"]
9 | convert => ["bytes", "integer"]
10 | convert => ["responsetime", "float"]
11 |
12 | }
13 | useragent {
14 | source => "agent"
15 | }
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash/sa/20-sa-sendmail.conf:
--------------------------------------------------------------------------------
1 | filter {
2 |
3 | # Parse sendmail messages
4 |
5 | # format to parse:
6 | # w5KCT17T011546: from=;, size=945, class=0, nrcpts=1, msgid=, proto=ESMTP, daemon=MTA, relay=localhost [127.0.0.1]
7 | # STARTTLS=client, relay=outgoing.example.org., version=TLSv1/SSLv3, verify=FAIL, cipher=DHE-RSA-AES128-GCM-SHA256, bits=128/128
8 |
9 | if [program] == "sendmail" {
10 | grok {
11 | match => {
12 | "message" => "(%{WORD:sendmail.qid}: )?%{GREEDYDATA:fields}"
13 | }
14 | }
15 | if "_grokparsefailure" not in [tags] {
16 | # Split stats in keys and values, and prefix keys with "sendmail_"
17 | kv {
18 | source => "fields"
19 | target => "sendmail"
20 | field_split => ", "
21 | }
22 | }
23 | mutate {
24 | remove_field => "fields"
25 | }
26 | }
27 |
28 | }
29 |
30 |
--------------------------------------------------------------------------------
/roles/elk/files/logstash/sa/30-sa-elastic-search-output.conf:
--------------------------------------------------------------------------------
1 | output {
2 |
3 | if [origin] == "stepup" {
4 |
5 | elasticsearch {
6 | hosts => ["http://localhost:9200"]
7 | index => "stepup-%{+YYYY.MM.dd}"
8 | manage_template => false
9 | }
10 |
11 | }
12 |
13 | }
14 |
15 |
--------------------------------------------------------------------------------
/roles/elk/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart httpd
2 | service: name=httpd state=restarted
3 |
4 | - name: reload httpd
5 | service: name=httpd state=reloaded
6 |
7 | - name: restart elasticsearch
8 | service: name=elasticsearch state=restarted
9 |
10 | - name: restart logstash
11 | service: name=logstash state=restarted
12 |
13 | - name: restart kibana
14 | service: name=kibana state=restarted
15 |
--------------------------------------------------------------------------------
/roles/elk/tasks/elastic.yml:
--------------------------------------------------------------------------------
1 | - name: Install elasticsearch
2 | yum:
3 | name: elasticsearch
4 | state: present
5 |
6 | - name: Create datadirectory
7 | file:
8 | path: /data/elasticsearch
9 | state: directory
10 | owner: elasticsearch
11 | mode: 0775
12 |
13 | - name: Make sure java 8 is used
14 | lineinfile:
15 | dest: /etc/sysconfig/elasticsearch
16 | line: 'JAVA_HOME=/usr/lib/jvm/jre-1.8.0/'
17 | state: present
18 |
19 | - name: Install elasticsearch configuration file
20 | template:
21 | src: "{{ item }}.j2"
22 | dest: "/etc/elasticsearch/{{ item }}"
23 | with_items:
24 | - elasticsearch.yml
25 | notify: restart elasticsearch
26 |
27 | - name: Enable and start elasticsearch service
28 | service:
29 | name: elasticsearch
30 | state: started
31 | enabled: yes
32 |
33 |
--------------------------------------------------------------------------------
/roles/elk/tasks/logstash_stats.yml:
--------------------------------------------------------------------------------
1 | - name: Install logstash plugins
2 | logstash_plugin:
3 | state: present
4 | name: "logstash-filter-fingerprint logstash-output-influxdb"
5 |
6 | - name: Copy stats specific files
7 | template:
8 | src: "logstash/stats/33-core-influxdb-output.conf"
9 | dest: "/etc/logstash/conf.d/core/33-core-influxdb-output.conf"
10 |
11 | - name: Copy common specific files
12 | template:
13 | src: "logstash/core/{{ item }}.j2"
14 | dest: "/etc/logstash/conf.d/core/{{ item }}"
15 | with_items:
16 | - 02-filebeat-input.conf
17 | - 13-core-ebauth.conf
18 |
--------------------------------------------------------------------------------
/roles/elk/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Add Elasticsearch GPG key.
2 | rpm_key:
3 | key: https://packages.elasticsearch.org/GPG-KEY-elasticsearch
4 | state: present
5 |
6 | - name: Add elasticsearch yum repo
7 | copy:
8 | src: elasticsearch.repo
9 | dest: /etc/yum.repos.d/elasticsearch.repo
10 | mode: 0644
11 |
12 | - name: Install java
13 | yum:
14 | name: java
15 | state: present
16 |
17 | - name: Create datadirectory
18 | file:
19 | path: /data
20 | state: directory
21 | owner: root
22 | mode: 0775
23 |
24 | - name: Include logstash specific tasks
25 | include_tasks: logstash.yml
26 | when: "'logstash' in group_names"
27 |
28 | - name: Include elastic specific tasks
29 | include_tasks: elastic.yml
30 | when: "'elasticsearch' in group_names"
31 |
32 | - name: Include kibana specific tasks
33 | include_tasks: kibana.yml
34 | when: "'kibana' in group_names"
35 |
--------------------------------------------------------------------------------
/roles/elk/templates/elasticsearch.yml.j2:
--------------------------------------------------------------------------------
1 | cluster.name: {{ elastic_instance_name }}
2 | path.data: /data/elasticsearch
3 | path.logs: /var/log/elasticsearch
4 | {% if elastic_cluster %}
5 | node.name: {{ ansible_hostname }}
6 | node.data: {{ elastic_data_node }}
7 | discovery.zen.ping.unicast.hosts: [{% for host in groups['elasticsearch'] %}{{ hostvars[host]['backend_ipv4'] }}{% if not loop.last %},{% endif %}{% endfor %}]
8 | discovery.zen.minimum_master_nodes: {{ elastic_minimum_nodes }}
9 | network.host: ["{{ backend_ipv4 }}","_local_"]
10 | {% endif %}
11 |
--------------------------------------------------------------------------------
/roles/elk/templates/kibana.yml.j2:
--------------------------------------------------------------------------------
1 | elasticsearch.hosts: "http://localhost:9200"
2 |
3 |
--------------------------------------------------------------------------------
/roles/elk/templates/logstash.yml.j2:
--------------------------------------------------------------------------------
1 | pipeline.workers: 1
2 | queue.type: persisted
3 | queue.max_bytes: 4gb
4 | dead_letter_queue.enable: true
5 | dead_letter_queue.max_bytes: 1024mb
6 | path.logs: /var/log/logstash
7 | path.data: /data/logstash
8 |
--------------------------------------------------------------------------------
/roles/elk/templates/logstash/core/02-filebeat-input.conf.j2:
--------------------------------------------------------------------------------
1 | #jinja2: trim_blocks: True, lstrip_blocks: True
2 | input {
3 | beats {
4 | port => 5044
5 | type => "log"
6 | {% if logstash_tls %}
7 | ssl => true
8 | ssl_certificate => "/etc/pki/tls/certs/logstash.{{ base_domain}}.pem"
9 | ssl_key => "/etc/pki/tls/private/logstash.{{ base_domain }}.key"
10 | {% endif %}
11 | }
12 | }
13 |
14 | filter {
15 | grok {
16 | match => { "message" => "%{SYSLOGBASE}?%{GREEDYDATA:message}" }
17 | overwrite => [ "message" ]
18 | }
19 | # We overwrite the @timestamp with the syslogtimestamp in stead of the timestamp when the log message arrives at Logstash
20 | date {
21 | match => [ "timestamp" , "MMM d HH:mm:ss" , "MMM dd HH:mm:ss" ]
22 | }
23 | }
24 |
25 |
--------------------------------------------------------------------------------
/roles/elk/templates/logstash/core/13-core-ebauth.conf.j2:
--------------------------------------------------------------------------------
1 | filter {
2 | if [program] == "EBAUTH" {
3 | grok {
4 | patterns_dir => "/etc/logstash/patterns"
5 | match => { "message" => "%{GREEDYDATA:ebauth}" }
6 | }
7 | }
8 | json {
9 | source => "ebauth"
10 | target => "ebauth"
11 | }
12 | date {
13 | match => [ "[ebauth][context][login_stamp]", "yyyy-MM-dd'T'HH:mm:ss.SSSSSSZZ" ]
14 | }
15 | de_dot {
16 | }
17 | mutate {
18 | remove_field => [ "source" ]
19 | remove_field => [ "syslogbase" ]
20 | }
21 | {% if logstash_stats %}
22 | mutate {
23 | add_field => {"[month]" => "%{+MM}"}
24 | add_field => {"[year]" => "%{+YYYY}"}
25 | }
26 | fingerprint {
27 | key => {{ logstash_ebauth_sha_secret }}
28 | source => [ "[ebauth][context][user_id]" ]
29 | method => SHA256
30 | target => [ "[ebauth][context][user_id_hashed]" ]
31 | }
32 | ruby {
33 | code => 'quarter = (event.get("month").to_i / 3.0).ceil;
34 | event.set("quarter",quarter)'
35 | }
36 | {% endif %}
37 | }
38 |
--------------------------------------------------------------------------------
/roles/elk/templates/logstash/sa/02-sa-filebeat-input.conf.j2:
--------------------------------------------------------------------------------
1 | #jinja2: trim_blocks: True, lstrip_blocks: True
2 | input {
3 | beats {
4 | port => 5055
5 | {% if logstash_tls %}
6 | ssl => true
7 | ssl_certificate => "/etc/pki/tls/certs/logstash.{{ base_domain }}.pem"
8 | ssl_key => "/etc/pki/tls/private/logstash.{{ base_domain }}.key"
9 | {% endif %}
10 | add_field => { "origin" => "stepup" }
11 |
12 | }
13 | }
14 | filter {
15 | grok {
16 | match => { "message" => "%{SYSLOGBASE}?%{GREEDYDATA:message}" }
17 | overwrite => [ "message" ]
18 | }
19 | date {
20 | match => [ "timestamp" , "MMM d HH:mm:ss" , "MMM dd HH:mm:ss" ]
21 | }
22 | mutate {
23 | remove_field => "agent" # Agent is added by filebeat, and clashes with the nginx agent field
24 | remove_field => "timestamp"
25 | }
26 |
27 | }
28 |
--------------------------------------------------------------------------------
/roles/elk/templates/pipelines.yml.j2:
--------------------------------------------------------------------------------
1 | - pipeline.id: core
2 | path.config: "/etc/logstash/conf.d/core"
3 | pipeline.workers: 2
4 | queue.type: persisted
5 | - pipeline.id: sa
6 | path.config: "/etc/logstash/conf.d/sa"
7 | pipeline.workers: 2
8 | queue.type: persisted
9 |
10 |
--------------------------------------------------------------------------------
/roles/engineblock/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart engine
3 | community.docker.docker_container:
4 | name: engineblock
5 | state: started
6 | restart: true
7 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
8 | comparisons:
9 | '*': ignore
10 | when: ebcontainer is success and ebcontainer is not change
11 |
--------------------------------------------------------------------------------
/roles/engineblock/vars/main.yml:
--------------------------------------------------------------------------------
1 | current_release_config_dir_name: /opt/openconext/engine
2 | engine_config_dir: /var/www/html/app/config
3 | engine_certs_dir: /var/www/html/certs
4 |
--------------------------------------------------------------------------------
/roles/filebeat/defaults/main.yml:
--------------------------------------------------------------------------------
1 | filebeat_tls: False
2 | filebeat_eblog: False
3 |
--------------------------------------------------------------------------------
/roles/filebeat/files/elasticsearch.repo:
--------------------------------------------------------------------------------
1 | [elasticsearch-6.x]
2 | name=Elasticsearch repository for 6.x packages
3 | baseurl=https://artifacts.elastic.co/packages/6.x/yum
4 | gpgcheck=1
5 | gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
6 | enabled=1
7 | autorefresh=1
8 | type=rpm-md
9 |
10 |
--------------------------------------------------------------------------------
/roles/filebeat/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart filebeat
3 | service: name=filebeat state=restarted
4 |
--------------------------------------------------------------------------------
/roles/filebeat/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add Elasticsearch GPG key.
3 | rpm_key:
4 | key: http://packages.elasticsearch.org/GPG-KEY-elasticsearch
5 | state: present
6 |
7 | - name: Add elasticsearch yum repo
8 | copy:
9 | src: elasticsearch.repo
10 | dest: /etc/yum.repos.d/elasticsearch.repo
11 | mode: 0644
12 |
13 | - name: Install Filebeat
14 | yum: name=filebeat state=present
15 |
16 | #- name: Install cacert certificate
17 | #copy: src={{ inventory_dir }}/files/certs/elastic_ca.pem dest=/etc/pki/elastic/
18 |
19 | - name: Install filebeat.yml
20 | template: src=filebeat.yml.j2 dest=/etc/filebeat/filebeat.yml
21 | notify:
22 | - restart filebeat
23 |
24 | - name: Enable and start filebeat
25 | service: name=filebeat state=started enabled=yes
26 |
--------------------------------------------------------------------------------
/roles/filebeat/templates/filebeat.yml.j2:
--------------------------------------------------------------------------------
1 | filebeat.prospectors:
2 |
3 | {% if filebeat_eblog %}
4 | - type: log
5 | paths:
6 | - /var/log/messages
7 | include_lines: ["EBLOG"]
8 | fields:
9 | prog: EBLOG
10 | env: {{ env }}
11 | fields_under_root: true
12 | {% endif %}
13 |
14 | - type: log
15 | paths:
16 | - /var/log/messages
17 | include_lines: ['EBAUTH']
18 | exclude_lines: ['influxd']
19 | fields:
20 | prog: EBAUTH
21 | env: {{ env }}
22 | fields_under_root: true
23 |
24 |
25 | output.logstash:
26 | hosts: ["logstash.{{ base_domain }}:5044"]
27 | {% if filebeat_tls %}
28 | ssl.certificate_authorities: ["/etc/pki/filebeat/filebeat_ca.pem"]
29 | {% endif %}
30 |
--------------------------------------------------------------------------------
/roles/galera/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # Set galera_bootstrap_node to the inventory_hostname of the node to allow
2 | # that node to be bootstrapped.
3 | # Only one node of the cluster must be bootstrapped.
4 | # It is suggested you specify the node to bootstrap when running the playbook. E.g.:
5 | # ansible-playbook ... -e "galera_bootstrap_node="
6 | galera_bootstrap_node: ""
7 | mariadb_cluster_user: galera_sst_user
8 | mariadb_backup_user: backup_user
9 |
10 | percona_url_rpm: https://repo.percona.com/yum/percona-release-latest.noarch.rpm
11 |
12 | # Settings for galera TLS
13 | galera_ssl: true
14 | galera_tls_cert_path: /etc/pki/mysql
15 | galera_tls_ca: galera_ca.pem
16 | galera_tls:
17 | - name: "server"
18 | key_name: "{{ galera_server_key_name }}"
19 | crt_name: "{{ galera_server_crt_name }}"
20 | key_content: "{{ galera_server_key }}"
21 | - name: "client"
22 | key_name: "{{ galera_client_key_name }}"
23 | crt_name: "{{ galera_client_crt_name }}"
24 | key_content: "{{ galera_client_key }}"
25 |
26 | galera_server_key_name: galera_server.key
27 | galera_server_crt_name: galera_server.pem
28 | galera_client_key_name: galera_client.key
29 | galera_client_crt_name: galera_client.pem
30 | galera_sst_crt_name: galera_sst.pem
31 | galera_gmcast_segment: 1
32 | galera_handler_restart: True
33 |
34 | #Install memcached on the cluster nodes
35 | memcached_cluster: False
36 |
--------------------------------------------------------------------------------
/roles/galera/files/mysql_reboot_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Reboot a MariaDB Gallera cluster node
4 | #
5 | # Usage: mysql_reboot_cluster.sh ""
6 | #
7 | # When a node is the only node (i.e. first) node in a cluster it must
8 | # be bootstrapped. For MariaDB-Gallera-server this is done by using "--wsrep-new-cluster" instead of "start"
9 |
10 | root_password=$1
11 |
12 | if [ -z ${root_password} ]; then
13 | echo "Usage $0 "
14 | exit 1
15 | fi
16 |
17 |
18 | # Get cluster size
19 | echo -n "Getting cluster size... "
20 | cluster_size=`mysql -u root -p"${root_password}" -e 'SHOW STATUS LIKE "wsrep_cluster_size";' -B -N | cut -f2`
21 | res=$?
22 | if [ ${res} == 0 ]; then
23 | echo "OK. wsrep_cluster_size=${cluster_size}"
24 | else
25 | echo "Failed"
26 | exit 1
27 | fi
28 |
29 |
30 | # Choose restart method based on cluster size
31 | if [ "${cluster_size}" -gt "1" ]; then
32 | echo -n "Restarting mysql... "
33 | /sbin/service mysql restart
34 | res=$?
35 | elif [ "${cluster_size}" -eq "0" ]; then
36 | echo -n "Restarting mysql with the 'bootstrap' option... "
37 | /sbin/service mysql stop
38 | /sbin/service mysql --wsrep-new-cluster
39 | res=$?
40 | else
41 | echo "Expected cluster size >= 1";
42 | exit 1
43 | fi
44 |
45 | if [ ${res} == 0 ]; then
46 | echo "OK"
47 | else
48 | echo "Failed"
49 | exit 1
50 | fi
51 |
52 | exit 0
53 |
--------------------------------------------------------------------------------
/roles/galera/files/plugins.cnf:
--------------------------------------------------------------------------------
1 | [mysqld]
2 | plugin_load_add = sql_errlog
3 | sql_error_log_size_limit = 100M
4 |
--------------------------------------------------------------------------------
/roles/galera/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # MariaDB service from RedHat is called "mariadb", MariaDB service from MariaDB is called "mysql"
4 | - name: restart mariadb
5 | script: mysql_reboot_cluster.sh "{{ mariadb_root_password }}"
6 | when:
7 | - galera_bootstrap_node is not defined
8 | - galera_handler_restart
9 |
10 | - name: restart garb
11 | service:
12 | name: garb
13 | state: restarted
14 | when:
15 | - galera_handler_restart
16 |
17 | - name: restart mysql
18 | service:
19 | name: mysql
20 | state: restarted
21 | when:
22 | - galera_handler_restart
23 |
--------------------------------------------------------------------------------
/roles/galera/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Include arbiter installation
3 | include_tasks: arbiter_node.yml
4 | when: arbiter_node is defined
5 |
6 | - name: Include cluster node installation
7 | include_tasks: cluster_nodes.yml
8 | when: arbiter_node is not defined
9 |
--------------------------------------------------------------------------------
/roles/galera/templates/garb.j2:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2012 Codership Oy
2 | # This config file is to be sourced by garb service script.
3 |
4 | # A comma-separated list of node addresses (address[:port]) in the cluster
5 | GALERA_NODES="{% for host in groups['dbcluster'] %}{{ hostvars[host]['backend_ipv4'] }}:4567{% if not loop.last %},{% endif %}{% endfor %}"
6 |
7 |
8 | # Galera cluster name, should be the same as on the rest of the nodes.
9 | GALERA_GROUP="{{ mariadb_cluster_name }}"
10 |
11 | # Optional Galera internal options string (e.g. SSL settings)
12 | # see http://galeracluster.com/documentation-webpages/galeraparameters.html
13 | {% if galera_ssl %}
14 | GALERA_OPTIONS="socket.ssl_key={{ galera_tls_cert_path }}/{{ galera_server_key_name }};socket.ssl_cert={{ galera_tls_cert_path }}/{{ galera_server_crt_name }};socket.ssl_ca={{ galera_tls_cert_path }}/{{ galera_tls_ca }};socket.ssl_cipher=AES128-SHA;gmcast.segment={{ galera_gmcast_segment }}"
15 | {% endif %}
16 | # Log file for garbd. Optional, by default logs to syslog
17 | # LOG_FILE=""
18 |
19 |
--------------------------------------------------------------------------------
/roles/galera/templates/mariadb.repo.j2:
--------------------------------------------------------------------------------
1 | # http://mariadb.org/mariadb/repositories/
2 | [mariadb]
3 | name = MariaDB
4 | baseurl = http://yum.mariadb.org/10.6/centos7-amd64
5 | module_hotfixes=1
6 | gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
7 | gpgcheck=1
8 |
--------------------------------------------------------------------------------
/roles/galera/templates/mariadb.repo.rocky8.j2:
--------------------------------------------------------------------------------
1 | # http://mariadb.org/mariadb/repositories/
2 | [mariadb]
3 | name = MariaDB
4 | baseurl = http://yum.mariadb.org/10.6/rhel8-amd64
5 | module_hotfixes=1
6 | gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
7 | gpgcheck=1
8 |
--------------------------------------------------------------------------------
/roles/galera/templates/mariadb.repo.rocky9.j2:
--------------------------------------------------------------------------------
1 | # http://mariadb.org/mariadb/repositories/
2 | [mariadb]
3 | name = MariaDB
4 | baseurl = http://yum.mariadb.org/10.6/rhel9-amd64
5 | module_hotfixes=1
6 | gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
7 | gpgcheck=1
8 |
--------------------------------------------------------------------------------
/roles/galera/templates/my.cnf.j2:
--------------------------------------------------------------------------------
1 | [client]
2 | user=root
3 | password={{ mariadb_root_password }}
4 | socket = /var/lib/mysql/mysql.sock
5 |
--------------------------------------------------------------------------------
/roles/galera/templates/mysql-clients.cnf.j2:
--------------------------------------------------------------------------------
1 | #
2 | # These groups are read by MariaDB command-line tools
3 | # Use it for options that affect only one utility
4 | #
5 |
6 | [mysql]
7 | ssl-ca = {{ galera_tls_cert_path }}/{{ galera_tls_ca }}
8 | ssl-key = {{ galera_tls_cert_path }}/{{ galera_client_key_name }}
9 | ssl-cert = {{ galera_tls_cert_path }}/{{ galera_client_crt_name }}
10 | [mysql_upgrade]
11 |
12 | [mysqladmin]
13 |
14 | [mysqlbinlog]
15 |
16 | [mysqlcheck]
17 |
18 | [mysqldump]
19 |
20 | [mysqlimport]
21 |
22 | [mysqlshow]
23 |
24 | [mysqlslap]
25 |
26 |
--------------------------------------------------------------------------------
/roles/galera/templates/timeoutstartsec.conf.j2:
--------------------------------------------------------------------------------
1 | [Service]
2 | TimeoutStartSec=600
3 |
--------------------------------------------------------------------------------
/roles/galera_create_users/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Create database user
2 | mysql_user:
3 | name: "{{ item[0].name }}"
4 | host: "{{ item[1] }}"
5 | password: "{{ item[0].password }}"
6 | priv: "{{ item[0].db_name }}.*:{{item[0].privilege}}"
7 | state: present
8 | login_unix_socket: /var/lib/mysql/mysql.sock
9 | append_privs: true
10 | with_nested:
11 | - "{{ databases.users }}"
12 | - "{{ database_clients }}"
13 | run_once: true
14 | no_log: true
15 |
--------------------------------------------------------------------------------
/roles/haproxy/files/lbops:
--------------------------------------------------------------------------------
1 | lbops ALL=(ALL) NOPASSWD: /usr/bin/systemctl reload haproxy
2 |
--------------------------------------------------------------------------------
/roles/haproxy/files/nosamesitebrowsers.lst:
--------------------------------------------------------------------------------
1 | Chrom(e|ium)\/(5[1-9]|6[0-6])
2 | UCBrowser
3 | iPhone OS 12_
4 | iPad; CPU OS 12_
5 | Outlook-iOS
6 | \(Macintosh; Intel Mac OS X 10_14(_\d|)\) AppleWebKit\/[\.\d]+ \(KHTML, like Gecko\)$
7 | \(Macintosh; Intel Mac OS X 10_14(_\d+|)\) AppleWebKit/[.\d]+ \(KHTML, like Gecko\) Version\/.* Safari\/
8 | ownCloud-iOS
9 |
--------------------------------------------------------------------------------
/roles/haproxy/files/sysconfig_haproxy:
--------------------------------------------------------------------------------
1 | # Add extra options to the haproxy daemon here. This can be useful for
2 | # specifying multiple configuration files with multiple -f options.
3 | # See haproxy(1) for a complete list of options.
4 | # This overrides the default haproxy.cfg config file
5 | CONFIG="/etc/haproxy/haproxy_global.cfg -f /etc/haproxy/haproxy_frontend.cfg -f /etc/haproxy/haproxy_backend.cfg -f /etc/haproxy/haproxy_stick_table_backend.cfg"
6 |
--------------------------------------------------------------------------------
/roles/haproxy/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart haproxy
3 | ansible.builtin.service:
4 | name: haproxy
5 | state: restarted
6 |
7 | - name: reload haproxy
8 | ansible.builtin.service:
9 | name: haproxy
10 | state: reloaded
11 |
12 | - name: restart rsyslog
13 | ansible.builtin.service:
14 | name: rsyslog
15 | state: restarted
16 |
17 | - name: reload systemd
18 | ansible.builtin.systemd:
19 | daemon_reload: true
20 |
--------------------------------------------------------------------------------
/roles/haproxy/templates/allowedips.acl.j2:
--------------------------------------------------------------------------------
1 | {% for ip in haproxy_allowlistips %}
2 | {{ ip }}
3 | {% endfor %}
4 |
--------------------------------------------------------------------------------
/roles/haproxy/templates/backends.map.j2:
--------------------------------------------------------------------------------
1 | {% for application in haproxy_applications %}
2 | {{ application.vhost_name }} {{ application.name }}_be
3 | {% endfor %}
4 |
--------------------------------------------------------------------------------
/roles/haproxy/templates/backendsstaging.map.j2:
--------------------------------------------------------------------------------
1 | {% for application in haproxy_applications %}
2 | {% if application.stagingservers is defined %}
3 | {{ application.vhost_name }} {{ application.name }}_staging_be
4 | {% endif %}
5 | {% endfor %}
6 |
--------------------------------------------------------------------------------
/roles/haproxy/templates/blockedips.acl.j2:
--------------------------------------------------------------------------------
1 | {% for ip in haproxy_blocklistips %}
2 | {{ ip }}
3 | {% endfor %}
4 |
--------------------------------------------------------------------------------
/roles/haproxy/templates/certlist.lst.j2:
--------------------------------------------------------------------------------
1 | {% if haproxy_acme | bool %}
2 | {% for host in haproxy_ssl_hosts %}
3 | /etc/haproxy/certs/{{ host }}.pem [ocsp-update on]
4 | {% endfor %}
5 | {% endif %}
6 | {% if haproxy_sni_ip.certs is defined %}
7 | {% for cert in haproxy_sni_ip.certs %}
8 | /etc/haproxy/certs/{{ cert.name }}_haproxy.pem [ocsp-update on]
9 | {% endfor %}
10 | {% endif %}
11 | {% if haproxy_extra_certs is defined %}
12 | {% for cert in haproxy_extra_certs %}
13 | {{ cert }} [ocsp-update on]
14 | {% endfor %}
15 | {% endif %}
16 |
--------------------------------------------------------------------------------
/roles/haproxy/templates/haproxy_stick_table_backend.cfg.j2:
--------------------------------------------------------------------------------
1 | # Measure number of http requests per 10 seconds
2 | # ipv4 addresses are mapped to ipv6 for type ipv6, and work for both
3 | backend st_httpreqs_per_ip
4 | stick-table type ipv6 size 1m expire 10s store http_req_rate(10s)
5 |
6 | # Measure the unique ip and path request rates per minute
7 | backend st_httpreqs_per_ip_and_path
8 | stick-table type binary len 20 size 1m expire 1m store http_req_rate(1m)
9 |
--------------------------------------------------------------------------------
/roles/haproxy/templates/internalips.acl.j2:
--------------------------------------------------------------------------------
1 | {% for ip in haproxy_internalips %}
2 | {{ ip }}
3 | {% endfor %}
4 |
--------------------------------------------------------------------------------
/roles/haproxy/templates/ratelimits.map.j2:
--------------------------------------------------------------------------------
1 | /oidc/introspect {{ haproxy_max_request_rate_ip_path_exceptions }}
2 | /oidc/token {{ haproxy_max_request_rate_ip_path_exceptions }}
3 | /oidc/userinfo {{ haproxy_max_request_rate_ip_path_exceptions }}
4 |
--------------------------------------------------------------------------------
/roles/haproxy/templates/redirects.map.j2:
--------------------------------------------------------------------------------
1 | {%if haproxy_redirects is defined %}
2 | {% for application in haproxy_redirects %}
3 | {{ application.url }} {{ application.redirecturl }}
4 | {% endfor %}
5 | {% endif %}
6 |
--------------------------------------------------------------------------------
/roles/haproxy/templates/stagingips.acl.j2:
--------------------------------------------------------------------------------
1 | {% for ip in haproxy_stagingips %}
2 | {{ ip }}
3 | {% endfor %}
4 |
--------------------------------------------------------------------------------
/roles/haproxy/templates/update_ocsp.j2:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # Call hapos-upd to update OCSP stapling info foreach of our haproxy certificates
3 |
4 | # probably we want to continue even if one fails
5 | set -e
6 |
7 | {% for cert in haproxy_sni_ip.certs %}
8 | /usr/local/sbin/hapos-upd --partial-chain --good-only --socket /var/lib/haproxy/haproxy.stats \
9 | --VAfile /etc/pki/haproxy/{{ cert.name }}_haproxy.pem \
10 | --cert /etc/pki/haproxy/{{ cert.name }}_haproxy.pem
11 | {% endfor %}
12 |
--------------------------------------------------------------------------------
/roles/haproxy/templates/validvhostsrestricted.acl.j2:
--------------------------------------------------------------------------------
1 | {% for application in haproxy_applications %}
2 | {%if application.restricted is defined %}
3 | {{ application.vhost_name }}
4 | {% endif %}
5 | {% endfor %}
6 |
--------------------------------------------------------------------------------
/roles/haproxy/templates/validvhostsunrestricted.acl.j2:
--------------------------------------------------------------------------------
1 | {% for application in haproxy_applications %}
2 | {%if application.restricted is not defined %}
3 | {{ application.vhost_name }}
4 | {% endif %}
5 | {% endfor %}
6 | {%if haproxy_redirects is defined %}
7 | {% for application in haproxy_redirects %}
8 | {%if application.hostname is defined %}
9 | {{ application.hostname }}
10 | {% endif %}
11 | {% endfor %}
12 | {% endif %}
--------------------------------------------------------------------------------
/roles/haproxy_acls/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: reload haproxy
3 | command: "sudo systemctl reload haproxy"
4 |
--------------------------------------------------------------------------------
/roles/haproxy_acls/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create block- and allowlists
3 | template:
4 | src: "{{ item }}.j2"
5 | dest: "/etc/haproxy/acls/{{ item }}"
6 | with_items:
7 | - allowedips.acl
8 | - blockedips.acl
9 | - internalips.acl
10 | notify:
11 | - reload haproxy
12 |
--------------------------------------------------------------------------------
/roles/haproxy_acls/templates/allowedips.acl.j2:
--------------------------------------------------------------------------------
1 | {% for ip in haproxy_allowlistips %}
2 | {{ ip }}
3 | {% endfor %}
4 |
--------------------------------------------------------------------------------
/roles/haproxy_acls/templates/blockedips.acl.j2:
--------------------------------------------------------------------------------
1 | {% for ip in haproxy_blocklistips %}
2 | {{ ip }}
3 | {% endfor %}
4 |
--------------------------------------------------------------------------------
/roles/haproxy_acls/templates/internalips.acl.j2:
--------------------------------------------------------------------------------
1 | {% for ip in haproxy_internalips %}
2 | {{ ip }}
3 | {% endfor %}
4 |
--------------------------------------------------------------------------------
/roles/hosts/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hosts_ip_address: 192.168.66.98
3 |
--------------------------------------------------------------------------------
/roles/hosts/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set hosts
3 | lineinfile:
4 | dest: /etc/hosts
5 | line: "{{ hosts_ip_address }} {{ item }}"
6 | unsafe_writes: yes
7 | with_items:
8 | - "static.vm.openconext.org"
9 | - "metadata.vm.openconext.org"
10 | - "engine.vm.openconext.org"
11 | - "profile.vm.openconext.org"
12 | - "mujina-sp.vm.openconext.org"
13 | - "mujina-idp.vm.openconext.org"
14 | - "voot.vm.openconext.org"
15 | - "lb.vm.openconext.org"
16 | - "apps.vm.openconext.org"
17 | - "db.vm.openconext.org"
18 | - "pdp.vm.openconext.org"
19 | - "engine-api.vm.openconext.org"
20 | - "aa.vm.openconext.org"
21 | - "link.vm.openconext.org"
22 | - "connect.vm.openconext.org"
23 | - "teams.vm.openconext.org"
24 | - "manage.vm.openconext.org"
25 |
26 | - name: Set logstash in hostsfile
27 | lineinfile: dest=/etc/hosts line="192.168.66.99 {{ item }}" unsafe_writes=yes
28 | with_items:
29 | - "logstash.vm.openconext.org"
30 |
--------------------------------------------------------------------------------
/roles/influxdb/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | influx_data_dir: /data/influxdb
3 |
4 |
--------------------------------------------------------------------------------
/roles/influxdb/files/influx.repo:
--------------------------------------------------------------------------------
1 | [influxdb]
2 | name = InfluxDB Repository - RHEL $releasever
3 | baseurl = https://repos.influxdata.com/rhel/$releasever/$basearch/stable
4 | enabled = 1
5 | gpgcheck = 1
6 | gpgkey = https://repos.influxdata.com/influxdb.key
7 |
--------------------------------------------------------------------------------
/roles/influxdb/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart influxdb
3 | service: name=influxdb state=restarted
4 |
--------------------------------------------------------------------------------
/roles/invite/defaults/main.yml:
--------------------------------------------------------------------------------
1 | invite_manage_provision_oidcrp_client_id: "{{ invite.oidc_client_id }}"
2 | invite_manage_provision_oidcrp_name_en: "{{ instance_name }} invite"
3 | invite_manage_provision_oidcrp_description_en: "{{ instance_name }} invite"
4 | invite_manage_provision_oidcrp_secret: "{{ invite.oidc_secret }}"
5 | invite_manage_provision_oidcrp_redirecturls: "https://invite.{{ base_domain }}/login/oauth2/code/oidcng"
6 | invite_manage_provision_oidcrp_grants: "authorization_code"
7 | invite_manage_provision_oidcrp_allowed_resource_servers: '{"name": "{{ invite.resource_server_id }}"}'
8 | invite_manage_provision_oidcrp_is_public_client: false
9 |
10 | invite_manage_provision_oauth_rs_name_en: "{{ instance_name }} invite Resource Server"
11 | invite_manage_provision_oauth_rs_description_en: "{{ instance_name }} invite Resource Server"
12 | invite_manage_provision_oauth_rs_client_id: "{{ invite.resource_server_id }}"
13 | invite_manage_provision_oauth_rs_rp_secret: "{{ invite.resource_server_secret }}"
14 | invite_manage_provision_oauth_rs_scopes: "openid"
15 | invite_mock_install: false
16 | # Override is in the dockerX.env host_var files
17 | invite_cronjobmaster: true
18 | invite_docker_networks:
19 | - name: loadbalancer
20 |
--------------------------------------------------------------------------------
/roles/invite/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart inviteserver
2 | community.docker.docker_container:
3 | name: inviteserver
4 | state: started
5 | restart: true
6 | when: inviteservercontainer is success and inviteservercontainer is not change
7 |
8 | - name: restart inviteprovisioningmock
9 | community.docker.docker_container:
10 | name: inviteprovisioningmock
11 | state: started
12 | restart: true
13 |
--------------------------------------------------------------------------------
/roles/invite/templates/mockapplication.yml.j2:
--------------------------------------------------------------------------------
1 | logging:
2 | level:
3 | org.springframework: INFO
4 | org.springframework.security: INFO
5 |
6 | environment: {{ env }}
7 | mock-server-base-url: "https://mock.{{ base_domain }}"
8 | invite-server-ms-redirect-url: https://invite.{{ base_domain }}/api/v1/users/ms-accept-return
9 |
10 | spring:
11 | jpa:
12 | properties:
13 | hibernate:
14 | naming-strategy: org.hibernate.cfg.ImprovedNamingStrategy
15 | dialect: org.hibernate.dialect.MariaDBDialect
16 | open-in-view: false
17 | datasource:
18 | driver-class-name: com.mysql.cj.jdbc.Driver
19 | url: jdbc:mysql://{{ invite.db_host }}/invite
20 | username: {{ invite.db_user }}
21 | password: "{{ invite.db_secret }}"
22 |
23 | server:
24 | port: 8081
25 | error:
26 | whitelabel:
27 | enabled: false
28 |
29 | management:
30 | endpoints:
31 | web:
32 | exposure:
33 | include: "health,info,mappings"
34 | base-path: "/internal"
35 | endpoint:
36 | info:
37 | enabled: true
38 | health:
39 | enabled: true
40 | mappings:
41 | enabled: true
42 | info:
43 | git:
44 | mode: full
45 |
--------------------------------------------------------------------------------
/roles/invite/vars/main.yml:
--------------------------------------------------------------------------------
1 | manage_provision_oidcrp_client_id: "{{ invite_manage_provision_oidcrp_client_id }}"
2 | manage_provision_oidcrp_name_en: "{{ invite_manage_provision_oidcrp_name_en }}"
3 | manage_provision_oidcrp_description_en: "{{ invite_manage_provision_oidcrp_description_en }}"
4 | manage_provision_oidcrp_secret: "{{ invite_manage_provision_oidcrp_secret }}"
5 | manage_provision_oidcrp_redirecturls: "{{ invite_manage_provision_oidcrp_redirecturls }}"
6 | manage_provision_oidcrp_grants: "{{ invite_manage_provision_oidcrp_grants }}"
7 | manage_provision_oidcrp_allowed_resource_servers: "{{ invite_manage_provision_oidcrp_allowed_resource_servers }}"
8 | manage_provision_oidcrp_is_public_client: "{{ invite_manage_provision_oidcrp_is_public_client }}"
9 | manage_provision_oauth_rs_name_en: "{{ invite_manage_provision_oauth_rs_name_en }}"
10 | manage_provision_oauth_rs_description_en: "{{ invite_manage_provision_oauth_rs_description_en }}"
11 | manage_provision_oauth_rs_client_id: "{{ invite_manage_provision_oauth_rs_client_id }}"
12 | manage_provision_oauth_rs_secret: "{{ invite_manage_provision_oauth_rs_rp_secret }}"
13 | manage_provision_oauth_rs_scopes: "{{ invite_manage_provision_oauth_rs_scopes }}"
14 |
--------------------------------------------------------------------------------
/roles/iptables/defaults/main.yml:
--------------------------------------------------------------------------------
1 | iptables_enable: True
2 |
--------------------------------------------------------------------------------
/roles/iptables/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart iptables
3 | service:
4 | name: iptables
5 | state: restarted
6 |
7 | - name: restart ip6tables
8 | service:
9 | name: ip6tables
10 | state: restarted
11 |
--------------------------------------------------------------------------------
/roles/iptables/vars/Debian.yml:
--------------------------------------------------------------------------------
1 | iptables_location: /etc/sysconfig/iptables
2 | iptables_location6: /etc/sysconfig/ip6tables
3 |
--------------------------------------------------------------------------------
/roles/keepalived/defaults/main.yml:
--------------------------------------------------------------------------------
1 | keepalived_config_name: loadbalancer
2 |
--------------------------------------------------------------------------------
/roles/keepalived/files/keepalived_check_maintenance:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #Check if there is a maintenance file
3 | if [ -f "/var/lib/keepalived/maintenance" ]; then
4 | exit 1
5 | else
6 | exit 0
7 | fi
8 |
--------------------------------------------------------------------------------
/roles/keepalived/files/keepalived_notify:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | TYPE=$1
4 | NAME=$2
5 | STATE=$3
6 |
7 | case $NAME in
8 | "ipv6") FILENAME=/var/lib/keepalived/keepalived6.state ;;
9 |
10 | *) FILENAME=/var/lib/keepalived/keepalived.state ;;
11 |
12 | esac
13 |
14 | case $STATE in
15 | "MASTER") /bin/echo master > $FILENAME
16 | exit 0
17 | ;;
18 | "BACKUP") /bin/echo backup > $FILENAME
19 | exit 0
20 | ;;
21 | "FAULT") /bin/echo fault > $FILENAME
22 | exit 0
23 | ;;
24 | *) echo "unknown state"
25 | exit 1
26 | ;;
27 | esac
28 |
--------------------------------------------------------------------------------
/roles/keepalived/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: reload keepalived
3 | service:
4 | name: keepalived
5 | state: reloaded
6 |
7 | - name: restart keepalived
8 | service:
9 | name: keepalived
10 | state: restarted
11 |
--------------------------------------------------------------------------------
/roles/keepalived/templates/keepalived_dbcluster.conf.j2:
--------------------------------------------------------------------------------
1 | global_defs {
2 | vrrp_garp_master_refresh 60
3 | enable_script_security true
4 | script_user keepalived_script mysqlusers
5 | }
6 | vrrp_script chk_cluster {
7 | script "/usr/local/bin/clustercheck clustercheck {{ galera_clustercheck_password }} 1"
8 | interval 5
9 | fall 3
10 | rise 1
11 | }
12 |
13 | vrrp_instance galera {
14 | interface {{ ansible_default_ipv4.interface }} # interface to monitor
15 | state {{ keepalived.state }}
16 | virtual_router_id {{ keepalived_vrid }} # Assign one ID for this route
17 | priority {{ keepalived.prio }} # 101 on master, 100 on backup
18 | advert_int 1
19 | authentication {
20 | auth_type PASS
21 | auth_pass {{ keepalived_dbcluster_vrrp_password }}
22 | }
23 | virtual_ipaddress {
24 | {{ dbcluster_ip }}
25 |
26 | }
27 | track_script {
28 | chk_cluster
29 | }
30 | notify /usr/local/bin/keepalived_notify
31 | }
32 |
--------------------------------------------------------------------------------
/roles/lifecycle/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | appname: lifecycle
3 | lifecycle_version: ''
4 | lifecycle_user: lifecycle
5 | lifecycle_symfony_env: prod
6 | lifecycle_eb_logins_db: eb_logins
7 | lifecycle_db_host: localhost
8 | lifecycle_user_quota: 1500
9 | lifecycle_inactivity_period: 38
10 | lifecycle_api_enabled: true
11 | lifecycle_api_password: secret
12 | lifecycle_api_username: lifecycle
13 | current_release_config_dir_name: /opt/openconext/{{ appname }}
14 | lifecycle_docker_networks:
15 | - name: loadbalancer
16 |
--------------------------------------------------------------------------------
/roles/lifecycle/files/env:
--------------------------------------------------------------------------------
1 | APP_ENV=prod
2 |
--------------------------------------------------------------------------------
/roles/lifecycle/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart lifecycle
3 | community.docker.docker_container:
4 | name: lifecycle
5 | state: started
6 | restart: true
7 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
8 | comparisons:
9 | '*': ignore
10 | when: lifecyclecontainer is success and lifecyclecontainer is not change
11 |
--------------------------------------------------------------------------------
/roles/lifecycle/vars/main.yml:
--------------------------------------------------------------------------------
1 | appname: lifecycle
2 | current_release_config_dir_name: /opt/openconext/{{ appname }}
3 |
--------------------------------------------------------------------------------
/roles/manage/files/__cacert_entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | # Converted to POSIX shell to avoid the need for bash in the image
3 |
4 | set -e
5 |
6 | # Opt-in is only activated if the environment variable is set
7 | if [ -n "$USE_SYSTEM_CA_CERTS" ]; then
8 |
9 | # Copy certificates from /certificates to the system truststore, but only if the directory exists and is not empty.
10 | # The reason why this is not part of the opt-in is because it leaves open the option to mount certificates at the
11 | # system location, for whatever reason.
12 | if [ -d /certificates ] && [ -n "$(ls -A /certificates 2>/dev/null)" ]; then
13 | cp -a /certificates/* /usr/local/share/ca-certificates/
14 | fi
15 |
16 | CACERT="$JAVA_HOME/lib/security/cacerts"
17 |
18 | # JDK8 puts its JRE in a subdirectory
19 | if [ -f "$JAVA_HOME/jre/lib/security/cacerts" ]; then
20 | CACERT="$JAVA_HOME/jre/lib/security/cacerts"
21 | fi
22 |
23 | # OpenJDK images used to create a hook for `update-ca-certificates`. Since we are using an entrypoint anyway, we
24 | # might as well just generate the truststore and skip the hooks.
25 | update-ca-certificates
26 |
27 | trust extract --overwrite --format=java-cacerts --filter=ca-anchors --purpose=server-auth "$CACERT"
28 | fi
29 |
30 | exec "$@"
31 |
--------------------------------------------------------------------------------
/roles/manage/files/metadata_templates/oauth20_rs.template.json:
--------------------------------------------------------------------------------
1 | {
2 | "entityid": "",
3 | "state": "testaccepted",
4 | "metaDataFields": {
5 | "name:en": "",
6 | "secret": "",
7 | "scopes": [
8 | "openid"
9 | ],
10 | "NameIDFormat": "urn:oasis:names:tc:SAML:2.0:nameid-format:persistent",
11 | "OrganizationName:en": ""
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/roles/manage/files/metadata_templates/oidc10_rp.template.json:
--------------------------------------------------------------------------------
1 | {
2 | "entityid": "",
3 | "state": "testaccepted",
4 | "allowedall": true,
5 | "arp": {
6 | "enabled": false,
7 | "attributes": {}
8 | },
9 | "metaDataFields": {
10 | "name:en": "",
11 | "OrganizationName:en": "",
12 | "secret": "",
13 | "redirectUrls": [],
14 | "grants": ["authorization_code"],
15 | "NameIDFormat": "urn:oasis:names:tc:SAML:2.0:nameid-format:persistent"
16 | },
17 | "allowedEntities": [],
18 | "allowedResourceServers": []
19 | }
20 |
--------------------------------------------------------------------------------
/roles/manage/files/metadata_templates/policy.template.json:
--------------------------------------------------------------------------------
1 | {
2 | "metaDataFields": {},
3 | "name": "",
4 | "entityid": "",
5 | "description": "",
6 | "serviceProviderIds": [],
7 | "identityProviderIds": [],
8 | "attributes": [],
9 | "loas": [],
10 | "denyAdvice": "",
11 | "denyRule": false,
12 | "allAttributesMustMatch": false,
13 | "userDisplayName": "",
14 | "authenticatingAuthorityName": "",
15 | "denyAdviceNl": "",
16 | "active": true,
17 | "type": "reg"
18 | }
19 |
--------------------------------------------------------------------------------
/roles/manage/files/metadata_templates/provisioning.template.json:
--------------------------------------------------------------------------------
1 | {
2 | "entityid": "",
3 | "state": "prodaccepted",
4 | "metaDataFields": {
5 | "name:en": "",
6 | "provisioning_type": "scim"
7 | },
8 | "applications": []
9 | }
10 |
--------------------------------------------------------------------------------
/roles/manage/files/metadata_templates/saml20_idp.template.json:
--------------------------------------------------------------------------------
1 | {
2 | "entityid": "",
3 | "state": "testaccepted",
4 | "allowedall": true,
5 | "allowedEntities": [],
6 | "disableConsent": [],
7 | "stepupEntities": [],
8 | "metaDataFields": {
9 | "name:en": "",
10 | "OrganizationName:en": "",
11 | "SingleSignOnService:0:Binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect",
12 | "SingleSignOnService:0:Location": "",
13 | "coin:disable_scoping": true,
14 | "DiscoveryName:0:en": ""
15 | },
16 | "autoRefresh": {
17 | "enabled": true,
18 | "allowAll": true,
19 | "fields": {
20 | }
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/roles/manage/files/metadata_templates/saml20_sp.template.json:
--------------------------------------------------------------------------------
1 | {
2 | "entityid": "",
3 | "state": "testaccepted",
4 | "allowedall": true,
5 | "arp": {
6 | "enabled": false,
7 | "attributes": {}
8 | },
9 | "metaDataFields": {
10 | "name:en": "",
11 | "OrganizationName:en": "",
12 | "AssertionConsumerService:0:Binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST",
13 | "AssertionConsumerService:0:Location": "",
14 | "NameIDFormat": "urn:oasis:names:tc:SAML:2.0:nameid-format:persistent",
15 | "coin:signature_method": "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256"
16 | },
17 | "allowedEntities": [],
18 | "autoRefresh": {
19 | "enabled": false,
20 | "allowAll": false,
21 | "fields": {
22 | }
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/roles/manage/files/metadata_templates/single_tenant_template.template.json:
--------------------------------------------------------------------------------
1 | {
2 | "entityid": "",
3 | "state": "testaccepted",
4 | "allowedall": true,
5 | "arp": {
6 | "enabled": false,
7 | "attributes": {}
8 | },
9 | "metaDataFields": {
10 | "name:en": "Single Tenant Template for ....",
11 | "OrganizationName:en": ""
12 | },
13 | "allowedEntities": []
14 | }
--------------------------------------------------------------------------------
/roles/manage/files/metadata_templates/sram.template.json:
--------------------------------------------------------------------------------
1 | {
2 | "entityid": "",
3 | "state": "testaccepted",
4 | "allowedall": true,
5 | "arp": {
6 | "enabled": false,
7 | "attributes": {}
8 | },
9 | "metaDataFields": {
10 | "name:en": "",
11 | "OrganizationName:en": "",
12 | "AssertionConsumerService:0:Binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST",
13 | "AssertionConsumerService:0:Location": "https://trusted.proxy.acs.location.rules",
14 | "NameIDFormat": "urn:oasis:names:tc:SAML:2.0:nameid-format:persistent",
15 | "coin:signature_method": "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256",
16 | "connection_type": "oidc_rp",
17 | "secret": "",
18 | "redirectUrls": [],
19 | "grants": ["authorization_code"],
20 | "coin:collab_enabled": true
21 | },
22 | "allowedEntities": [],
23 | "autoRefresh": {
24 | "enabled": false,
25 | "allowAll": false,
26 | "fields": {
27 | }
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/roles/manage/files/policies/allowed_attributes.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "value": "urn:mace:terena.org:attribute-def:schacHomeOrganization",
4 | "label": "Schac home organization"
5 | },
6 | {
7 | "value": "urn:mace:terena.org:attribute-def:schacHomeOrganizationType",
8 | "label": "Schac home organization type"
9 | },
10 | {
11 | "value": "urn:mace:dir:attribute-def:eduPersonAffiliation",
12 | "label": "Edu person affiliation"
13 | },
14 | {
15 | "value": "urn:mace:dir:attribute-def:eduPersonScopedAffiliation",
16 | "label": "Edu person scoped affiliation"
17 | },
18 | {
19 | "value": "urn:mace:dir:attribute-def:eduPersonEntitlement",
20 | "label": "Edu person entitlement"
21 | },
22 | {
23 | "value": "urn:mace:dir:attribute-def:isMemberOf",
24 | "label": "Is-member-of"
25 | },
26 | {
27 | "value": "urn:collab:group:surfteams.nl",
28 | "label": "SURFteams group name (fully qualified)"
29 | },
30 | {
31 | "value": "urn:collab:sab:surfnet.nl",
32 | "label": "SAB role"
33 |
34 | },
35 | {
36 | "value": "urn:mace:dir:attribute-def:mail",
37 | "label": "Mail address"
38 |
39 | }
40 | ]
--------------------------------------------------------------------------------
/roles/manage/files/policies/extra_saml_attributes.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "value": "urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified",
4 | "label": "Unspecified name ID"
5 | },
6 | {
7 | "value": "urn:mace:surfnet.nl:collab:xacml-attribute:ip-address",
8 | "label": "IP Address"
9 | }
10 |
11 | ]
--------------------------------------------------------------------------------
/roles/manage/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart manageserver
2 | community.docker.docker_container:
3 | name: manageserver
4 | state: started
5 | restart: true
6 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
7 | comparisons:
8 | '*': ignore
9 | when: manageservercontainer is success and manageservercontainer is not change
10 |
--------------------------------------------------------------------------------
/roles/manage/templates/logback.xml.j2:
--------------------------------------------------------------------------------
1 | #jinja2:lstrip_blocks: True
2 |
3 |
4 |
5 |
6 |
7 | %d{ISO8601} %5p [%t] %logger{40}:%L - %m%n
8 |
9 |
10 |
11 |
12 | {{ smtp_server }}
13 | {{ noreply_email }}
14 | {{ error_mail_to }}
15 | {{ error_subject_prefix }}Unexpected error manage
16 |
17 |
18 |
19 | org.everit.json.schema.ValidationException
20 | ERROR
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/roles/manage/templates/manage-api-users.yml.j2:
--------------------------------------------------------------------------------
1 | # Valid scopes are manage.api.Scope.values(); =>
2 | # ADMIN, //Standard scope for all GUI related endpoint (e.g. /manage/api/client/** endpoints)
3 | # CHANGE_REQUEST_IDP, //Allowed to create change requests for IdP
4 | # CHANGE_REQUEST_SP, //Allowed to create change requests for SP
5 | # POLICIES, //Allowed to CRUD policies scoped for the real user
6 | # PUSH, //Allowed to push changes to EB & OIDC-NG
7 | # READ, //Allowed to read entities
8 | # SYSTEM, //Allowed everything including Attribute Manipulation
9 | # WRITE_SP, //Allowed to CRUD SP / RP /RS
10 | # WRITE_IDP //Allowed to CRUD IdP
11 |
12 | apiUsers:
13 | {% for user in manage.apiUsers %}
14 | - {
15 | name: "{{ user.name }}",
16 | password: "{{ user.password }}",
17 | scopes: [ {{ user.scopes|join(', ') }} ]
18 | }
19 | {% endfor %}
20 |
--------------------------------------------------------------------------------
/roles/manage_provision_entities/defaults/main.yml:
--------------------------------------------------------------------------------
1 | manage_provision_state: "prodaccepted"
2 | manage_provision_samlsp_sign: false
3 | manage_provision_samlsp_trusted_proxy: false
4 | manage_provision_samlsp_sp_cert: ""
5 |
--------------------------------------------------------------------------------
/roles/manage_provision_entities/templates/client_credentials_client.j2:
--------------------------------------------------------------------------------
1 | {
2 | "version": 0,
3 | "type": "oidc10_rp",
4 | "data": {
5 | "entityid": "{{ manage_provision_cc_client_id }}",
6 | "state": "{{ manage_provision_state }}",
7 | "allowedall": false,
8 | "arp": {
9 | "enabled": true,
10 | "attributes": {}
11 | },
12 | "metaDataFields": {
13 | "name:en": "{{ manage_provision_cc_name_en }}",
14 | "name:nl": "{{ manage_provision_cc_name_en }}",
15 | "description:en": "{{ manage_provision_cc_description_en }}",
16 | "OrganizationName:en": "{{ instance_name }}",
17 | "secret": "{{ manage_provision_cc_rp_secret }}",
18 | "scopes": [ "{{ manage_provision_cc_scopes }}" ],
19 | "grants": [ "client_credentials" ],
20 | "isResourceServer": {{ manage_provision_cc_is_resource_server }}
21 | },
22 | "allowedEntities": [],
23 | "allowedResourceServers": [],
24 | "revisionnote": "Initial import"
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/roles/manage_provision_entities/templates/oauth20_rs.j2:
--------------------------------------------------------------------------------
1 | {
2 | "version": 0,
3 | "type": "oauth20_rs",
4 | "data": {
5 | "entityid": "{{ manage_provision_oauth_rs_client_id }}",
6 | "state": "{{ manage_provision_state }}",
7 | "metaDataFields": {
8 | "name:en": "{{ manage_provision_oauth_rs_name_en }}",
9 | "name:nl": "{{ manage_provision_oauth_rs_name_en }}",
10 | "description:en": "{{ manage_provision_oauth_rs_description_en }}",
11 | "OrganizationName:en": "{{ instance_name }}",
12 | "scopes": [ " {{ manage_provision_oauth_rs_scopes }}" ],
13 | "secret": "{{ manage_provision_oauth_rs_secret }}"
14 | },
15 | "revisionnote": "Initial import"
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/roles/manage_provision_entities/templates/oidc10_rp.j2:
--------------------------------------------------------------------------------
1 | {
2 | "version": 0,
3 | "type": "oidc10_rp",
4 | "data": {
5 | "entityid": "{{ manage_provision_oidcrp_client_id }}",
6 | "state": "{{ manage_provision_state }}",
7 | "allowedall": true,
8 | "arp": {
9 | "enabled": true,
10 | "attributes": {}
11 | },
12 | "metaDataFields": {
13 | "name:en": "{{ manage_provision_oidcrp_name_en }}",
14 | "name:nl": "{{ manage_provision_oidcrp_name_en }}",
15 | "description:en": "{{ manage_provision_oidcrp_description_en }}",
16 | "OrganizationName:en": "{{ instance_name }}",
17 | "secret": "{{ manage_provision_oidcrp_secret }}",
18 | {% if manage_provision_oidcrp_grants != "client_credentials" %}
19 | "redirectUrls": [ "{{ manage_provision_oidcrp_redirecturls }}" ],
20 | "isPublicClient": "{{ manage_provision_oidcrp_is_public_client }}",
21 | {% endif %}
22 | "grants": [ "{{ manage_provision_oidcrp_grants }}" ]
23 | },
24 | "allowedEntities": [],
25 | "allowedResourceServers": [ {{ manage_provision_oidcrp_allowed_resource_servers }} ],
26 | "revisionnote": "Initial import"
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/roles/manage_provision_entities/templates/saml20_idp.j2:
--------------------------------------------------------------------------------
1 | {
2 | "version": 0,
3 | "type": "saml20_idp",
4 | "data": {
5 | "entityid": "{{ manage_provision_samlidp_entity_id }}",
6 | "state": "{{ manage_provision_state }}",
7 | "type": "saml20-idp",
8 | "allowedall": true,
9 | "metaDataFields": {
10 | "description:en": "{{ manage_provision_samlidp_description_en }}",
11 | "name:en": "{{ manage_provision_samlidp_name_en }}",
12 | "name:nl": "{{ manage_provision_samlidp_name_en }}",
13 | "OrganizationName:en": "{{ instance_name }}",
14 | "certData": "{{ manage_provision_samlidp_idp_cert }}",
15 | "SingleSignOnService:0:Binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST",
16 | "SingleSignOnService:0:Location": "{{ manage_provision_samlidp_idp_sso_location }}"
17 | },
18 | "allowedEntities": [],
19 | "disableConsent": [],
20 | "stepupEntities": [],
21 | "mfaEntities": []
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/roles/mariadbdocker/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_mariadb_network_range: "172.21.21.0/24"
2 | mysql_backup_user: backup_user
3 | backup_node: True
4 |
--------------------------------------------------------------------------------
/roles/mariadbdocker/files/settings.cnf:
--------------------------------------------------------------------------------
1 | [mariadb]
2 | sql_mode=NO_ENGINE_SUBSTITUTION
3 |
--------------------------------------------------------------------------------
/roles/maven_artifact_requirements/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install dependencies for lxml
3 | yum:
4 | name:
5 | - python-lxml
6 | state: present
7 |
--------------------------------------------------------------------------------
/roles/metadata/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata_dir: "/opt/openconext/metadata/www"
3 | metadata:
4 | sync_user: root
5 |
--------------------------------------------------------------------------------
/roles/metadata/files/alive.txt:
--------------------------------------------------------------------------------
1 | dummy file to serve as endpoint for haproxy-healthchecks
2 |
--------------------------------------------------------------------------------
/roles/metadata/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create sync user
3 | user: name={{ metadata.sync_user }} system=yes
4 |
5 | - name: create metadata dir
6 | file: path={{ metadata_dir }} state=directory owner={{ metadata.sync_user }}
7 |
8 | - name: copy dummy files
9 | copy: src=alive.txt dest={{ metadata_dir }}
10 |
11 | - name: copy apache config
12 | template: src=metadata.conf.j2 dest=/etc/httpd/conf.d/metadata.conf
13 | notify: reload httpd
14 |
--------------------------------------------------------------------------------
/roles/mongo/README.md:
--------------------------------------------------------------------------------
1 | # Mongo installation
2 | This role will install Mongo in either standalone mode, or in cluster mode. It is intended to be used in the OpenConext platform.
3 |
4 | You need to set the role of your mongo hosts in the host_vars.
5 |
6 | the key is `mongo_replication_role:` and it can have the values: "primary", "secondary" or arbiter.
7 |
8 | Please review the official Mongo documentation for more information.
9 |
--------------------------------------------------------------------------------
/roles/mongo/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # The global variable file mongodb installation
2 |
3 | # In the current mongo role only one cluster per environments
4 | # is possible, that works for now.
5 | mongo_servers: [] # Set this in group_vars
6 | # - mongo1.example.com
7 | # - mongo2.example.com
8 |
9 | # The port for mongo server
10 | mongod_port: 27017
11 |
12 | # The password for admin user
13 | mongo_admin_pass: "{{ mongo_admin_password }}" # Set this in secrets
14 |
15 | # The name of the replication set
16 | replica_set_name: "{{ instance_name }}" # Set this in group_vars
17 |
18 | # Add a database
19 | mongo:
20 | users:
21 | - { name: managerw, db_name: metadata, password: "{{ mongo_passwords.manage }}" }
22 | - { name: oidcsrw, db_name: oidc, password: "{{ mongo_passwords.oidcng }}" }
23 | - { name: myconextrw, db_name: myconext, password: "{{ mongo_passwords.myconext }}" }
24 |
25 | # Listen on all addresses by default
26 | mongo_bind_listen_address: "0.0.0.0"
27 |
--------------------------------------------------------------------------------
/roles/mongo/files/mongo.repo:
--------------------------------------------------------------------------------
1 | [mongodb-org-6.0]
2 | name=MongoDB Repository
3 | baseurl=https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/6.0/x86_64/
4 | gpgcheck=1
5 | enabled=1
6 | gpgkey=https://www.mongodb.org/static/pgp/server-6.0.asc
7 |
--------------------------------------------------------------------------------
/roles/mongo/files/mongo_kernel_settings.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | if test -f /sys/kernel/mm/transparent_hugepage/enabled; then
3 | echo never > /sys/kernel/mm/transparent_hugepage/enabled
4 | fi
5 | if test -f /sys/kernel/mm/transparent_hugepage/defrag; then
6 | echo never > /sys/kernel/mm/transparent_hugepage/defrag
7 | fi
8 |
--------------------------------------------------------------------------------
/roles/mongo/files/mongodb.logrotate:
--------------------------------------------------------------------------------
1 | /var/log/mongodb/*.log {
2 | daily
3 | rotate 14
4 | copytruncate
5 | delaycompress
6 | compress
7 | notifempty
8 | missingok
9 | dateext
10 | sharedscripts
11 | postrotate
12 | /usr/bin/pkill -USR1 mongod
13 | endscript
14 | }
15 |
--------------------------------------------------------------------------------
/roles/mongo/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart mongod
3 | throttle: 1
4 | service:
5 | name: mongod
6 | state: restarted
7 |
--------------------------------------------------------------------------------
/roles/mongo/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Use temporarily python3 as remote interpreter, this fixes pymongo
3 | ansible.builtin.set_fact:
4 | ansible_python_interpreter: "/usr/bin/python3"
5 | tags: mongo_users
6 |
7 | - name: Include CA tasks
8 | ansible.builtin.include_tasks:
9 | file: ca.yml
10 | apply:
11 | delegate_to: localhost
12 | run_once: true
13 | become: false
14 |
15 | - name: Include Certificate tasks
16 | ansible.builtin.include_tasks:
17 | file: certs.yml
18 |
19 | - name: Include installation tasks
20 | ansible.builtin.include_tasks:
21 | file: install.yml
22 |
23 | - name: Include cluster installation tasks
24 | ansible.builtin.include_tasks:
25 | file: cluster.yml
26 |
27 | - name: Include user creation
28 | ansible.builtin.include_tasks:
29 | file: users.yml
30 |
31 | - name: Include postinstallation tasks
32 | ansible.builtin.include_tasks:
33 | file: postinstall.yml
34 |
35 | - name: Use python2 again as remote interpreter
36 | ansible.builtin.set_fact:
37 | ansible_python_interpreter: "/usr/bin/python"
38 | when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7'
39 |
--------------------------------------------------------------------------------
/roles/mongo/tasks/postinstall.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add logrotate snippet
3 | ansible.builtin.copy:
4 | src: "mongodb.logrotate"
5 | dest: "/etc/logrotate.d/mongodb"
6 | mode: "0644"
7 | owner: root
8 | group: root
9 |
10 | - name: Create the backup directory
11 | ansible.builtin.file:
12 | path: /home/backup
13 | state: directory
14 | owner: root
15 | group: root
16 | mode: "0700"
17 |
18 | - name: Install the backup script
19 | ansible.builtin.template:
20 | src: "backup_mongo.pl.j2"
21 | dest: "/usr/local/sbin/backup_mongo.pl"
22 | mode: "0700"
23 | owner: root
24 | group: root
25 | when: mongo_replication_role != 'arbiter'
26 |
27 |
28 | - name: Create cron symlink for backup script
29 | ansible.builtin.file:
30 | src: "/usr/local/sbin/backup_mongo.pl"
31 | dest: "/etc/cron.daily/mongodb_backup"
32 | state: link
33 | mode: "0700"
34 | owner: root
35 | when: mongo_replication_role != 'arbiter'
36 |
37 | # TODO: this template gets mongo_servers from
38 | # the inventory, maybe change that to group vars
39 | # this is not on an per app basis. These are mongoservers
40 | # in the same cluster.
41 | - name: Create mongosh config file
42 | ansible.builtin.template:
43 | src: mongoshrc.js.j2
44 | dest: /root/.mongoshrc.js
45 | owner: root
46 | group: root
47 | mode: "0640"
48 |
--------------------------------------------------------------------------------
/roles/mongo/tasks/users.yml:
--------------------------------------------------------------------------------
1 | - name: Create mongo database users
2 | mongodb_user:
3 | login_database: admin
4 | database: "{{ item.db_name }}"
5 | login_user: admin
6 | login_password: "{{ mongo_admin_pass }}"
7 | name: "{{ item.name }}"
8 | password: "{{ item.password }}"
9 | roles: readWrite
10 | replica_set: "{{ replica_set_name }}"
11 | no_log: true
12 | run_once: true
13 | with_items: "{{ mongo.users }}"
14 | changed_when: false
15 | tags: mongo_users
16 |
--------------------------------------------------------------------------------
/roles/mongo/templates/backup_mongo.pl.j2:
--------------------------------------------------------------------------------
1 | #!/usr/bin/perl
2 | # Variables
3 |
4 | $backupdir = "/home/backup";
5 | $username = "admin";
6 | $password = "{{ mongo_admin_password }}";
7 |
8 | umask 0077;
9 |
10 | # Determine current day
11 | $day = `/bin/date +'%a'`;
12 | chomp($day);
13 |
14 | # Remove old backups if exists
15 | if ( -e "$backupdir/mongo-dump-$day/") {
16 | `rm -rf $backupdir/mongo-dump-$day/`;
17 | }
18 |
19 | # Dump databases
20 | `mongodump --username $username --password $password --authenticationDatabase admin --out $backupdir/mongo-dump-$day`;
21 |
22 | # Gzip dumps
23 | opendir(BDIR, "$backupdir/mongo-dump-$day/");
24 | my @files = readdir(BDIR);
25 | closedir(BDIR);
26 | chdir("$backupdir/mongo-dump-$day/");
27 | foreach $dir (@files) {
28 | if ($dir !~ /^\.+$/) {
29 | if ($dir !~ /\.\./g) {
30 | if ( -d "$backupdir/mongo-dump-$day/$dir") {
31 | `tar -cvzf $backupdir/mongo-dump-$day/$dir.tar.gz $dir/`;
32 | `rm -rf $backupdir/mongo-dump-$day/$dir/`;
33 | }
34 | }
35 | }
36 | }
37 | umask 0022;
38 |
--------------------------------------------------------------------------------
/roles/mongo/templates/mongod.conf.j2:
--------------------------------------------------------------------------------
1 | systemLog:
2 | destination: file
3 | logRotate: reopen
4 | logAppend: true
5 | path: /var/log/mongodb/mongod.log
6 |
7 | net:
8 | bindIp: {{ mongo_bind_listen_address }}
9 | port: 27017
10 | tls:
11 | mode: preferTLS
12 | certificateKeyFile: /etc/pki/mongo/keyandcert.pem
13 | CAFile: /etc/pki/mongo/mongoca.pem
14 | allowConnectionsWithoutCertificates: true
15 |
16 | storage:
17 | dbPath: /var/lib/mongo
18 |
19 | replication:
20 | replSetName: {{ replica_set_name }}
21 |
22 | security:
23 | authorization: enabled
24 | clusterAuthMode: x509
25 |
--------------------------------------------------------------------------------
/roles/mongo/templates/mongoshrc.js.j2:
--------------------------------------------------------------------------------
1 | db = connect("mongodb://admin:{{ mongo_admin_password }}@{% for mongo_server in mongo_servers %}{{ mongo_server }}:{{ mongod_port }}{% if not loop.last %},{% endif %}{% endfor %}?ssl=true&tlsCAFile=/etc/pki/mongo/mongoca.pem")
2 |
3 |
--------------------------------------------------------------------------------
/roles/mongodbdocker/defaults/main.yml:
--------------------------------------------------------------------------------
1 | replica_set_name: "{{ instance_name }}"
2 | docker_mongodb_network_range: "172.21.22.0/24"
3 |
--------------------------------------------------------------------------------
/roles/mongodbdocker/templates/backup_mongo.pl.j2:
--------------------------------------------------------------------------------
1 | #!/usr/bin/perl
2 | # Variables
3 |
4 | $backupdir = "/home/backup";
5 | $username = "admin";
6 | $password = "{{ mongo_admin_password }}";
7 |
8 | umask 0077;
9 |
10 | # Determine current day
11 | $day = `/bin/date +'%a'`;
12 | chomp($day);
13 |
14 | # Remove old backups if exists
15 | if ( -e "$backupdir/mongo-dump-$day/") {
16 | `rm -rf $backupdir/mongo-dump-$day/`;
17 | }
18 |
19 | # Dump databases
20 | `docker exec openconext_mongodb mongodump --username $username --password $password --authenticationDatabase admin --out $backupdir/mongo-dump-$day`;
21 |
22 | # Gzip dumps
23 | opendir(BDIR, "$backupdir/mongo-dump-$day/");
24 | my @files = readdir(BDIR);
25 | closedir(BDIR);
26 | chdir("$backupdir/mongo-dump-$day/");
27 | foreach $dir (@files) {
28 | if ($dir !~ /^\.+$/) {
29 | if ($dir !~ /\.\./g) {
30 | if ( -d "$backupdir/mongo-dump-$day/$dir") {
31 | `tar -cvzf $backupdir/mongo-dump-$day/$dir.tar.gz $dir/`;
32 | `rm -rf $backupdir/mongo-dump-$day/$dir/`;
33 | }
34 | }
35 | }
36 | }
37 | umask 0022;
38 |
--------------------------------------------------------------------------------
/roles/monitoring-tests/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | monitoring_tests_dir: /opt/monitoring-tests
3 | monitoring_tests_version: ''
4 | monitoring_tests_snapshot_timestamp: ''
5 | monitoring_tests_jar: monitoring-tests-current.jar
6 | monitoring_tests_metadata_sp_url: https://engine.{{ base_domain }}/sp-metadata.xml
7 | monitoring_tests_metadata_idp_url: https://engine.{{ base_domain }}/idp-metadata.xml
8 | monitoring_tests_min_heapsize: "128m"
9 | monitoring_tests_max_heapsize: "128m"
10 |
11 |
--------------------------------------------------------------------------------
/roles/monitoring-tests/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart monitoring-tests
3 | systemd:
4 | name: "{{ springapp_service_name }}"
5 | state: restarted
6 | daemon_reload: yes
7 | -
8 |
9 |
10 | - name: restart monitoring-tests-acc
11 | systemd:
12 | name: monitoring-tests-acc
13 | state: restarted
14 | daemon_reload: yes
15 |
16 | - name: restart monitoring-tests-prd
17 | systemd:
18 | name: monitoring-tests-prd
19 | state: restarted
20 | daemon_reload: yes
21 |
--------------------------------------------------------------------------------
/roles/monitoring-tests/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependencies:
3 | - java
4 | - httpd
5 |
--------------------------------------------------------------------------------
/roles/monitoring-tests/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Copy logging config
3 | template:
4 | src: "logback.xml.j2"
5 | dest: "{{ monitoring_tests_dir }}/logback.xml"
6 | owner: "{{ springapp_user }}"
7 | group: monitoring-tests
8 | mode: 0740
9 | notify:
10 | - "restart monitoring-tests"
11 |
12 | - name: Copy application config
13 | template:
14 | src: "application.yml.j2"
15 | dest: "{{ monitoring_tests_dir }}/application.yml"
16 | owner: "{{ springapp_user }}"
17 | group: monitoring-tests
18 | mode: 0740
19 | notify:
20 | - "restart monitoring-tests"
21 |
--------------------------------------------------------------------------------
/roles/monitoring-tests/templates/logback.xml.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | /var/log/{{ springapp_service_name }}/monitoring-tests.log
6 |
7 |
8 | /var/log/{{ springapp_service_name }}/monitoring-tests-%d{yyyy-MM-dd}.log.gz
9 | {{ logback_max_history }}
10 |
11 |
12 | %d{ISO8601} %5p [%t] %logger{40}:%L - %m%n
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/roles/monitoring-tests/vars/main.yml:
--------------------------------------------------------------------------------
1 | springapp_artifact_id: monitoring-tests
2 | springapp_artifact_type: jar
3 | springapp_artifact_group_dir: org/openconext
4 | springapp_version: "{{ monitoring_tests_version }}"
5 | springapp_dir: "{{ monitoring_tests_dir }}"
6 | springapp_user: monitoring-tests
7 | springapp_service_name: monitoring-tests
8 | springapp_jar: "{{ monitoring_tests_jar }}"
9 | springapp_tcpport: 9392
10 | springapp_min_heapsize: "{{ monitoring_tests_min_heapsize }}"
11 | springapp_max_heapsize: "{{ monitoring_tests_max_heapsize }}"
12 | springapp_random_source: "file:///dev/urandom"
13 |
--------------------------------------------------------------------------------
/roles/mujina-idp/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | mujina_idp_dir: /opt/mujina-idp
3 | mujina_idp_dir_docker: /opt/openconext/mujina-idp
4 | mujina_idp_version: ""
5 | mujina_idp_snapshot_timestamp: ""
6 | mujina_idp_jar: mujina-idp-current.jar
7 | mujina_manage_provision_samlidp_entity_id: "{{ mujina_idp.entity_id }}"
8 | mujina_manage_provision_samlidp_description_en: "{{ instance_name }} Mujina IdP"
9 | mujina_manage_provision_samlidp_name_en: "The {{ instance_name }} Mujina Mock IdP"
10 | mujina_manage_provision_samlidp_idp_cert: "{{ mujina_idp.certificate }}"
11 | mujina_manage_provision_samlidp_idp_sso_location: "https://mujina-idp.{{ base_domain }}/SingleSignOnService"
12 |
--------------------------------------------------------------------------------
/roles/mujina-idp/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart mujina-idp-docker
3 | community.docker.docker_container:
4 | name: mujina-idp
5 | state: started
6 | restart: true
7 | when: mujinaidpcontainer is success and mujinaidpcontainer is not change
8 |
--------------------------------------------------------------------------------
/roles/mujina-idp/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
--------------------------------------------------------------------------------
/roles/mujina-idp/templates/logback.xml.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %d{ISO8601} %5p [%t] %logger{40}:%L - %m%n
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/roles/mujina-idp/vars/main.yml:
--------------------------------------------------------------------------------
1 | springapp_artifact_id: mujina-idp
2 | springapp_artifact_type: jar
3 | springapp_artifact_group_dir: org/openconext
4 | springapp_version: "{{ mujina_version }}"
5 | springapp_dir: "{{ mujina_idp_dir }}"
6 | springapp_user: mujina-idp
7 | springapp_service_name: mujina-idp
8 | springapp_jar: "{{ mujina_idp_jar }}"
9 | springapp_tcpport: 9390
10 | springapp_random_source: "file:///dev/urandom"
11 | manage_provision_samlidp_entity_id: "{{ mujina_manage_provision_samlidp_entity_id }}"
12 | manage_provision_samlidp_description_en: "{{ mujina_manage_provision_samlidp_description_en }}"
13 | manage_provision_samlidp_name_en: "{{ mujina_manage_provision_samlidp_name_en }}"
14 | manage_provision_samlidp_idp_cert: "{{ mujina_manage_provision_samlidp_idp_cert }}"
15 | manage_provision_samlidp_idp_sso_location: "{{ mujina_manage_provision_samlidp_idp_sso_location }}"
16 |
--------------------------------------------------------------------------------
/roles/mujina-sp/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | mujina_sp_dir: /opt/mujina-sp
3 | mujina_sp_jar: mujina-sp-current.jar
4 | mujina_sp_dir_docker: /opt/openconext/mujina-sp
5 | mujinasp_manage_provision_samlsp_client_id: "https://mujina-sp.{{ base_domain }}/shibboleth"
6 | mujinasp_manage_provision_samlsp_name_en: "{{ instance_name }} Mujina SP"
7 | mujinasp_manage_provision_samlsp_description_en: "{{ instance_name }} Mujina mock SP"
8 | mujinasp_manage_provision_samlsp_acs_location: "https://mujina-sp.{{ base_domain }}/Shibboleth.sso/SAML2/POST"
9 | mujinasp_manage_provision_samlsp_metadata_url: "https://mujina-sp.{{ base_domain }}/Shibboleth.sso/Metadata"
10 | mujinasp_manage_provision_samlsp_sp_cert: ""
11 | mujinasp_manage_provision_samlsp_trusted_proxy: false
12 | mujinasp_manage_provision_samlsp_sign: false
13 |
--------------------------------------------------------------------------------
/roles/mujina-sp/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart mujina-sp-docker
3 | community.docker.docker_container:
4 | name: mujina-sp
5 | state: started
6 | restart: true
7 | when: mujinaspcontainer is success and mujinaspcontainer is not change
8 |
--------------------------------------------------------------------------------
/roles/mujina-sp/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
--------------------------------------------------------------------------------
/roles/mujina-sp/templates/logback.xml.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %d{ISO8601} %5p [%t] %logger{40}:%L - %m%n
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/roles/mujina-sp/vars/main.yml:
--------------------------------------------------------------------------------
1 | springapp_artifact_id: mujina-sp
2 | springapp_artifact_type: jar
3 | springapp_artifact_group_dir: org/openconext
4 | springapp_version: "{{ mujina_version }}"
5 | springapp_dir: "{{ mujina_sp_dir }}"
6 | springapp_user: mujina-sp
7 | springapp_service_name: mujina-sp
8 | springapp_jar: "{{ mujina_sp_jar }}"
9 | springapp_random_source: "file:///dev/urandom"
10 | manage_provision_samlsp_client_id: "{{ mujinasp_manage_provision_samlsp_client_id }}"
11 | manage_provision_samlsp_name_en: "{{ mujinasp_manage_provision_samlsp_name_en }}"
12 | manage_provision_samlsp_description_en: "{{ mujinasp_manage_provision_samlsp_description_en }}"
13 | manage_provision_samlsp_acs_location: "{{ mujinasp_manage_provision_samlsp_acs_location }}"
14 | manage_provision_samlsp_metadata_url: "{{ mujinasp_manage_provision_samlsp_metadata_url }}"
15 | manage_provision_samlsp_sp_cert: "{{ mujinasp_manage_provision_samlsp_sp_cert }}"
16 | manage_provision_samlsp_trusted_proxy: "{{ mujinasp_manage_provision_samlsp_trusted_proxy }}"
17 | manage_provision_samlsp_sign: "{{ mujinasp_manage_provision_samlsp_sign }}"
18 |
--------------------------------------------------------------------------------
/roles/myconext/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | myconext_cronjobmaster: true
3 | myconext_docker_networks:
4 | - name: loadbalancer
5 |
--------------------------------------------------------------------------------
/roles/myconext/files/__cacert_entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | # Converted to POSIX shell to avoid the need for bash in the image
3 |
4 | set -e
5 |
6 | # Opt-in is only activated if the environment variable is set
7 | if [ -n "$USE_SYSTEM_CA_CERTS" ]; then
8 |
9 | # Copy certificates from /certificates to the system truststore, but only if the directory exists and is not empty.
10 | # The reason why this is not part of the opt-in is because it leaves open the option to mount certificates at the
11 | # system location, for whatever reason.
12 | if [ -d /certificates ] && [ -n "$(ls -A /certificates 2>/dev/null)" ]; then
13 | cp -a /certificates/* /usr/local/share/ca-certificates/
14 | fi
15 |
16 | CACERT="$JAVA_HOME/lib/security/cacerts"
17 |
18 | # JDK8 puts its JRE in a subdirectory
19 | if [ -f "$JAVA_HOME/jre/lib/security/cacerts" ]; then
20 | CACERT="$JAVA_HOME/jre/lib/security/cacerts"
21 | fi
22 |
23 | # OpenJDK images used to create a hook for `update-ca-certificates`. Since we are using an entrypoint anyway, we
24 | # might as well just generate the truststore and skip the hooks.
25 | update-ca-certificates
26 |
27 | trust extract --overwrite --format=java-cacerts --filter=ca-anchors --purpose=server-auth "$CACERT"
28 | fi
29 |
30 | exec "$@"
31 |
--------------------------------------------------------------------------------
/roles/myconext/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart myconextserver
2 | community.docker.docker_container:
3 | name: myconextserver
4 | state: started
5 | restart: true
6 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
7 | comparisons:
8 | '*': ignore
9 | when: myconextservercontainer is success and myconextservercontainer is not change
10 |
--------------------------------------------------------------------------------
/roles/myconext/templates/logback.xml.j2:
--------------------------------------------------------------------------------
1 | #jinja2:lstrip_blocks: True
2 |
3 |
4 |
5 |
6 |
7 | %d{ISO8601} %5p [%t] %logger{40}:%L - %m%n
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/roles/myconext/templates/tiqr.configuration.yml.j2:
--------------------------------------------------------------------------------
1 | encryptionSecret: "{{ myconext_tiqr_encryption }}"
2 | baseUrl: "https://login.{{ myconext_base_domain }}"
3 | identifier: "{{ myconext_base_domain }}"
4 | displayName: "{{ myconext_base_domain }}"
5 | version: "2"
6 | logoUrl: "https://static.surfconext.nl/media/idp/eduid.png"
7 | infoUrl: "https://{{ myconext_base_domain }}/help"
8 | pushNotificationsEnabled: true
9 | eduIdAppBaseUrl: "https://eduid.nl"
10 | rateLimitThreshold: 5
11 | rateLimitResetMinutes: 30
12 | smsRateLimitThreshold: 5
13 | smsRateLimitResetMinutes: 1440
14 | smsSendingDelayInMillis: 2500
15 |
16 | apns:
17 | serverHost: "api.push.apple.com"
18 | port: 443
19 | signingKey: "file:///config/apns.p8"
20 | # Leave empty for non-local development
21 | serverCertificateChain: ""
22 | topic: "nl.eduid"
23 | teamId: "{{ myconext.apns_teamid }}"
24 | keyId: "{{ myconext.apns_keyid }}"
25 |
26 | gcm:
27 | firebaseServiceAccount: "file:///config/firebase.json"
28 | appName: "tiqr"
29 |
--------------------------------------------------------------------------------
/roles/mysql/defaults/main.yml:
--------------------------------------------------------------------------------
1 | mysql_backup_user: backup_user
2 | backup_node: false
3 |
--------------------------------------------------------------------------------
/roles/mysql/templates/mariadb.repo.j2:
--------------------------------------------------------------------------------
1 | # http://mariadb.org/mariadb/repositories/
2 | [mariadb]
3 | name = MariaDB
4 | baseurl = http://yum.mariadb.org/10.6/centos7-amd64
5 | gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
6 | gpgcheck=1
7 |
--------------------------------------------------------------------------------
/roles/mysql/templates/my.cnf.j2:
--------------------------------------------------------------------------------
1 | [client]
2 | user=root
3 | password={{ mysql_root_password }}
4 |
--------------------------------------------------------------------------------
/roles/oidc-playground/defaults/main.yml:
--------------------------------------------------------------------------------
1 | oidc_playground_dir: /opt/openconext/oidc-playground
2 |
--------------------------------------------------------------------------------
/roles/oidc-playground/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart oidc-playground-docker
2 | community.docker.docker_container:
3 | name: oidcplaygroundserver
4 | state: started
5 | restart: true
6 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
7 | comparisons:
8 | '*': ignore
9 | when: oidcplaygroundservercontainer is success and oidcplaygroundservercontainer is not change
10 |
--------------------------------------------------------------------------------
/roles/oidc-playground/templates/logback.xml.j2:
--------------------------------------------------------------------------------
1 | #jinja2:lstrip_blocks: True
2 |
3 |
4 |
5 |
6 |
7 | %d{ISO8601} %5p [%t] %logger{40}:%L - %m%n
8 |
9 |
10 |
11 |
12 |
13 | {{ smtp_server }}
14 | {{ noreply_email }}
15 | {{ error_mail_to }}
16 | {{ error_subject_prefix }}Unexpected error oidc-playground
17 |
18 |
19 |
20 | ERROR
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/roles/oidcng/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | oidcng_dir: /opt/openconext/oidcng
3 | oidcng_config_dir: /config
4 | oidcng_version: ""
5 | oidcng_snapshot_timestamp: ""
6 | oidcng_cronjobmaster: true
7 | oidcng_saml_sp_entityid: https://connect.{{ base_domain }}
8 | oidcng_idp_metadata_url: https://engine.{{ base_domain }}/authentication/idp/metadata
9 | oidcng_base_hostname: connect.{{ base_domain }}
10 | oidcng_logback_email: true
11 | oidcng_logback_json: true
12 | oidcng_device_flow: false
13 | oidcng_idp_sso_location: https://engine.{{ base_domain }}/authentication/idp/single-sign-on
14 | oidcng_manage_provision_samlsp_client_id: "https://connect.{{ base_domain }}"
15 | oidcng_manage_provision_samlsp_name_en: "{{ instance_name }} OIDC Gateway"
16 | oidcng_manage_provision_samlsp_description_en: "Trusted proxy that handles the OIDC protocol"
17 | oidcng_manage_provision_samlsp_acs_location: "https://connect.{{ base_domain }}/oidc/SSO/alias/oidc-proxy"
18 | oidcng_manage_provision_samlsp_metadata_url: "https://connect.{{ base_domain }}/oidc/metadata"
19 | oidcng_manage_provision_samlsp_sp_cert: "{{ lookup('file', '{{ inventory_dir }}/files/certs/oidc/oidcsaml.crt') | depem }}"
20 | oidcng_manage_provision_samlsp_sign: "True"
21 | oidcng_manage_provision_samlsp_trusted_proxy: "True"
22 | oidcng_docker_networks:
23 | - name: loadbalancer
24 |
--------------------------------------------------------------------------------
/roles/oidcng/files/__cacert_entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | # Converted to POSIX shell to avoid the need for bash in the image
3 |
4 | set -e
5 |
6 | # Opt-in is only activated if the environment variable is set
7 | if [ -n "$USE_SYSTEM_CA_CERTS" ]; then
8 |
9 | # Copy certificates from /certificates to the system truststore, but only if the directory exists and is not empty.
10 | # The reason why this is not part of the opt-in is because it leaves open the option to mount certificates at the
11 | # system location, for whatever reason.
12 | if [ -d /certificates ] && [ -n "$(ls -A /certificates 2>/dev/null)" ]; then
13 | cp -a /certificates/* /usr/local/share/ca-certificates/
14 | fi
15 |
16 | CACERT="$JAVA_HOME/lib/security/cacerts"
17 |
18 | # JDK8 puts its JRE in a subdirectory
19 | if [ -f "$JAVA_HOME/jre/lib/security/cacerts" ]; then
20 | CACERT="$JAVA_HOME/jre/lib/security/cacerts"
21 | fi
22 |
23 | # OpenJDK images used to create a hook for `update-ca-certificates`. Since we are using an entrypoint anyway, we
24 | # might as well just generate the truststore and skip the hooks.
25 | update-ca-certificates
26 |
27 | trust extract --overwrite --format=java-cacerts --filter=ca-anchors --purpose=server-auth "$CACERT"
28 | fi
29 |
30 | exec "$@"
31 |
--------------------------------------------------------------------------------
/roles/oidcng/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "restart oidcng"
3 | community.docker.docker_container:
4 | name: oidcngserver
5 | state: started
6 | restart: true
7 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
8 | comparisons:
9 | '*': ignore
10 | when: oidcngservercontainer is success and oidcngservercontainer is not change
11 |
--------------------------------------------------------------------------------
/roles/oidcng/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
--------------------------------------------------------------------------------
/roles/oidcng/templates/secret_keyset.json.j2:
--------------------------------------------------------------------------------
1 | {{ oidcng_secret_keyset }}
2 |
3 |
--------------------------------------------------------------------------------
/roles/oidcng/vars/main.yml:
--------------------------------------------------------------------------------
1 | springapp_artifact_id: oidcng
2 | springapp_artifact_type: jar
3 | springapp_artifact_group_dir: org.openconext
4 | springapp_version: "{{ oidcng_version }}"
5 | springapp_dir: "{{ oidcng_dir }}"
6 | springapp_user: oidcng
7 | springapp_service_name: oidcng
8 | springapp_jar: "{{ oidcng_jar }}"
9 | springapp_tcpport: 9195
10 | springapp_random_source: "file:///dev/urandom"
11 | manage_provision_samlsp_client_id: "{{ oidcng_manage_provision_samlsp_client_id }}"
12 | manage_provision_samlsp_name_en: "{{ oidcng_manage_provision_samlsp_name_en }}"
13 | manage_provision_samlsp_description_en: "{{ oidcng_manage_provision_samlsp_description_en }}"
14 | manage_provision_samlsp_acs_location: "{{ oidcng_manage_provision_samlsp_acs_location }}"
15 | manage_provision_samlsp_metadata_url: "{{ oidcng_manage_provision_samlsp_metadata_url }}"
16 | manage_provision_samlsp_sp_cert: "{{ oidcng_manage_provision_samlsp_sp_cert }}"
17 | manage_provision_samlsp_sign: "{{ oidcng_manage_provision_samlsp_sign }}"
18 | manage_provision_samlsp_trusted_proxy: "{{ oidcng_manage_provision_samlsp_trusted_proxy }}"
19 |
--------------------------------------------------------------------------------
/roles/openconext-common/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # OpenConext global variables.
3 | openconext_configs_dir: "/etc/openconext"
4 |
5 |
--------------------------------------------------------------------------------
/roles/openconext-common/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure the directories exist
3 | file:
4 | path: "{{ item }}"
5 | state: directory
6 | with_items:
7 | - "{{ openconext_builds_dir }}"
8 | - "{{ openconext_configs_dir }}"
9 | - "{{ openconext_releases_dir }}"
10 |
--------------------------------------------------------------------------------
/roles/pdp/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart pdpserver
2 | community.docker.docker_container:
3 | name: pdpserver
4 | state: started
5 | restart: true
6 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
7 | comparisons:
8 | '*': ignore
9 | when: pdpservercontainer is success and pdpservercontainer is not change
10 |
--------------------------------------------------------------------------------
/roles/pdp/templates/logback.xml.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | %d{ISO8601} %5p [%t] %logger{40}:%L - %m%n
6 |
7 |
8 |
9 |
10 | %logger{40}:%L %d{ISO8601} %5p [%t] - %m%n
11 |
12 |
13 |
14 |
15 |
16 |
17 | {{ smtp_server }}
18 | {{ noreply_email }}
19 | {{ error_mail_to }}
20 | {{ error_subject_prefix }}Unexpected error pdp
21 |
22 |
23 | ERROR
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
--------------------------------------------------------------------------------
/roles/pdp/templates/xacml.conext.properties.j2:
--------------------------------------------------------------------------------
1 | #
2 | # Properties that the embedded PDP engine uses to configure and load
3 | #
4 | # Standard API Factories
5 | #
6 | xacml.dataTypeFactory=org.apache.openaz.xacml.std.StdDataTypeFactory
7 | xacml.pdpEngineFactory=pdp.xacml.OpenConextPDPEngineFactory
8 | xacml.pepEngineFactory=org.apache.openaz.xacml.std.pep.StdEngineFactory
9 | xacml.pipFinderFactory=org.apache.openaz.xacml.std.pip.StdPIPFinderFactory
10 |
11 | #
12 | # OpenAZ PDP Implementation Factories
13 | #
14 | xacml.openaz.evaluationContextFactory=pdp.xacml.OpenConextEvaluationContextFactory
15 | xacml.openaz.combiningAlgorithmFactory=org.apache.openaz.xacml.pdp.std.StdCombiningAlgorithmFactory
16 | xacml.openaz.functionDefinitionFactory=pdp.xacml.CustomFunctionDefinitionFactory
17 |
18 | xacml.openaz.policyFinderFactory.combineRootPolicies=urn:oasis:names:tc:xacml:3.0:policy-combining-algorithm:deny-overrides
19 |
20 | #
21 | # PIP Engine Definition
22 | #
23 | # xacml.pip.engines=teams
24 | # teams.classname=pdp.teams.TeamsPIP
25 |
26 | # Uncomment the following and comment the lines above this to activate SAB
27 | xacml.pip.engines=teams,sab
28 | teams.classname=pdp.teams.TeamsPIP
29 | sab.classname=pdp.sab.SabPIP
30 |
--------------------------------------------------------------------------------
/roles/pdp/vars/main.yml:
--------------------------------------------------------------------------------
1 | manage_provision_oidcrp_client_id: "{{ pdp_oauth2_clientid }}"
2 | manage_provision_oidcrp_secret: "{{ pdp_client_secret }}"
3 | manage_provision_oidcrp_name_en: "{{ pdp_manage_provision_oidcrp_name_en }}"
4 | manage_provision_oidcrp_description_en: "{{ pdp_manage_provision_oidcrp_description_en }}"
5 | manage_provision_oidcrp_grants: "{{ pdp_manage_provision_oidcrp_grants }}"
6 | manage_provision_oidcrp_allowed_resource_servers: "{{ pdp_manage_provision_oidcrp_allowed_resource_servers }}"
7 | manage_provision_samlsp_client_id: "{{ pdp_manage_provision_samlsp_client_id }}"
8 | manage_provision_samlsp_name_en: "{{ pdp_manage_provision_samlsp_name_en }}"
9 | manage_provision_samlsp_description_en: "{{ pdp_manage_provision_samlsp_description_en }}"
10 | manage_provision_samlsp_acs_location: "{{ pdp_manage_provision_samlsp_acs_location }}"
11 | manage_provision_samlsp_metadata_url: "{{ pdp_manage_provision_samlsp_metadata_url }}"
12 | manage_provision_samlsp_sp_cert: "{{ pdp_manage_provision_samlsp_sp_cert }}"
13 | manage_provision_samlsp_trusted_proxy: "{{ pdp_manage_provision_samlsp_trusted_proxy }}"
14 | manage_provision_samlsp_sign: "{{ pdp_manage_provision_samlsp_sign }}"
15 |
--------------------------------------------------------------------------------
/roles/profile/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart profile
3 | community.docker.docker_container:
4 | name: profile
5 | state: started
6 | restart: true
7 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
8 | comparisons:
9 | '*': ignore
10 | when: profilecontainer is success and profilecontainer is not change
11 |
--------------------------------------------------------------------------------
/roles/profile/templates/global_view_parameters.yml.j2:
--------------------------------------------------------------------------------
1 | # This file is auto-generated during the composer install
2 | parameters:
3 | help_url:
4 | en: 'https://support.surfconext.nl/help-profile-en'
5 | nl: 'https://support.surfconext.nl/help-profile-nl'
6 | pt: 'https://support.surfconext.nl/help-profile-en'
7 | privacy_url:
8 | en: 'https://support.surfconext.nl/privacy-en'
9 | nl: 'https://support.surfconext.nl/privacy-nl'
10 | pt: 'https://support.surfconext.nl/privacy-en'
11 | terms_of_service_url:
12 | en: 'https://support.surfconext.nl/terms-en'
13 | nl: 'https://support.surfconext.nl/terms-nl'
14 | pt: 'https://support.surfconext.nl/terms-en'
15 | platform_url:
16 | en: 'https://www.surfconext.nl/en'
17 | nl: 'https://www.surfconext.nl'
18 | pt: 'https://www.surfconext.nl/en'
19 | profile_explanation_image_path:
20 | en: build/images/profile_home_en.png
21 | nl: build/images/profile_home_nl.png
22 | pt: build/images/profile_home_pt.png
23 | attribute_information_url:
24 | en: 'https://support.surfconext.nl/attributes-en'
25 | nl: 'https://support.surfconext.nl/attributes-nl'
26 | pt: 'https://support.surfconext.nl/attributes-en'
27 |
--------------------------------------------------------------------------------
/roles/profile/vars/main.yml:
--------------------------------------------------------------------------------
1 | appname: "profile"
2 | current_release_config_dir_name: "/opt/openconext/{{ appname }}"
3 |
4 | manage_provision_samlsp_client_id: "{{ profile_manage_provision_samlsp_client_id }}"
5 | manage_provision_samlsp_name_en: "{{ profile_manage_provision_samlsp_name_en }}"
6 | manage_provision_samlsp_description_en: "{{ profile_manage_provision_samlsp_description_en }}"
7 | manage_provision_samlsp_acs_location: "{{ profile_manage_provision_samlsp_acs_location }}"
8 | manage_provision_samlsp_metadata_url: "{{ profile_manage_provision_samlsp_metadata_url }}"
9 | manage_provision_samlsp_sp_cert: "{{ profile_manage_provision_samlsp_sp_cert }}"
10 | manage_provision_samlsp_trusted_proxy: "{{ profile_manage_provision_samlsp_trusted_proxy }}"
11 | manage_provision_samlsp_sign: "{{ profile_manage_provision_samlsp_sign }}"
12 |
--------------------------------------------------------------------------------
/roles/remove-java-app/defaults/main.yml:
--------------------------------------------------------------------------------
1 | java_apps_to_remove: []
2 |
--------------------------------------------------------------------------------
/roles/remove-java-app/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: daemon_reload
3 | systemd:
4 | daemon_reload: yes
5 |
6 | - name: restart httpd
7 | systemd:
8 | name: httpd
9 | state: restarted
10 |
--------------------------------------------------------------------------------
/roles/rsyslog/defaults/main.yml:
--------------------------------------------------------------------------------
1 | rsyslog_ca: /etc/pki/rsyslog/rsyslog_ca.pem
2 | rsyslog_dir: /opt/openconext/logs
3 | rsyslog_read_group: adm
4 | loglogins_max_age: 6
5 | stepupapps:
6 | - gateway
7 | - selfservice
8 | - ra
9 | - middleware
10 | - webauthn
11 | - tiqr
12 | - azure-mfa
13 | - azuremfa
14 | rsyslog_dir_file_modes: 'dirCreateMode="0755" fileCreateMode="0640" FileGroup="surfsudo"'
15 |
--------------------------------------------------------------------------------
/roles/rsyslog/files/lastseen.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE IF NOT EXISTS `last_login` (
2 | `userid` varchar(255) NOT NULL,
3 | `lastseen` date DEFAULT NULL,
4 | PRIMARY KEY (`userid`),
5 | UNIQUE KEY `idx_user` (`userid`),
6 | KEY `idx_lastseen` (`lastseen`)
7 | ) ENGINE=InnoDB DEFAULT CHARSET=utf8
8 |
--------------------------------------------------------------------------------
/roles/rsyslog/files/log_logins.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE IF NOT EXISTS `log_logins` (
2 | `loginstamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
3 | `userid` varchar(1000) NOT NULL,
4 | `spentityid` varchar(1000) DEFAULT NULL,
5 | `idpentityid` varchar(1000) DEFAULT NULL,
6 | `trustedproxyentityid` varchar(1000) DEFAULT NULL,
7 | `keyid` varchar(50) DEFAULT NULL,
8 | `id` int(11) NOT NULL AUTO_INCREMENT,
9 | `sessionid` varchar(50) DEFAULT NULL,
10 | `requestid` varchar(50) DEFAULT NULL,
11 | PRIMARY KEY (`id`),
12 | UNIQUE KEY `unique_index` (`sessionid`,`requestid`),
13 | KEY `loginstamp_index` (`loginstamp`),
14 | KEY `idpentityid_index` (`idpentityid`(255)),
15 | KEY `spentityid_index` (`spentityid`(255)),
16 | KEY `keyid_index` (`keyid`,`loginstamp`,`spentityid`(255)),
17 | KEY `userid_idp_index` (`userid`(128),`idpentityid`(64))
18 | ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
19 |
--------------------------------------------------------------------------------
/roles/rsyslog/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart rsyslog
3 | service:
4 | name: rsyslog
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/roles/rsyslog/templates/centralsyslog.j2:
--------------------------------------------------------------------------------
1 | {{ rsyslog_dir }}/hosts/*/*/*
2 | {{ rsyslog_dir }}/apps/*/*/*
3 | {{ rsyslog_dir }}/apps/*/auditd/*/*
4 | {{ rsyslog_dir }}/apps/*/bash/*/*
5 | {{ rsyslog_dir }}/apps/*/secure/*/*
6 |
7 | {
8 | missingok
9 | daily
10 | rotate 90
11 | sharedscripts
12 | olddir archive
13 | createolddir 0750 root {{ rsyslog_read_group }}
14 | create 0640 root {{ rsyslog_read_group }}
15 | dateext
16 | dateyesterday
17 | compress
18 | postrotate
19 | systemctl kill -s HUP rsyslog.service
20 | endscript
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/roles/rsyslog/templates/listener.conf.j2:
--------------------------------------------------------------------------------
1 | input(type="imrelp" port="{{ item.port }}"
2 | ruleset="{{ item.name }}"
3 | tls="on"
4 | tls.mycert="/etc/pki/rsyslog/rsyslogserver.crt"
5 | tls.myprivkey="/etc/pki/rsyslog/rsyslogserver.key"
6 | tls.caCert="{{ rsyslog_ca }}"
7 | tls.authmode="name"
8 | tls.permittedpeer="{{ item.permittedpeer }}")
9 |
10 |
--------------------------------------------------------------------------------
/roles/rsyslog/templates/logrotate_ebauth.j2:
--------------------------------------------------------------------------------
1 | {{ rsyslog_dir }}/log_logins/{{ item.name }}/eb-authentication.log
2 | {
3 | missingok
4 | daily
5 | rotate 180
6 | sharedscripts
7 | dateext
8 | dateyesterday
9 | compress
10 | delaycompress
11 | create 0640 root {{ rsyslog_read_group }}
12 | postrotate
13 | /usr/local/sbin/parse_ebauth_to_mysql_{{ item.name }}.py > /dev/null
14 | systemctl kill -s HUP rsyslog.service
15 | endscript
16 | }
17 |
--------------------------------------------------------------------------------
/roles/rsyslog/templates/rsyslog_onlyforward.conf.j2:
--------------------------------------------------------------------------------
1 | # This rsyslog configuration takes logs from journald and forwards them to a remote log serverad="imuxsock") # provides support for local system logging
2 | module(load="imuxsock")
3 | module(load="imklog") # provides kernel logging support
4 | module(load="immark" interval="600" ) # provides --MARK-- message capability
5 |
6 | $PreserveFQDN on
7 |
8 | *.emerg :omusrmsg:*
9 |
10 | {% if rsyslog_remote_server_relp is defined and 'sysloghost' not in group_names %}
11 | # Forward all logs to the central logging server using relp
12 | module(load="omrelp")
13 | action(type="omrelp"
14 | target="{{ rsyslog_remote_server_relp }}"
15 | port="{{ rsyslog_remote_relp_port }}"
16 | tls="on"
17 | tls.caCert="/etc/pki/rsyslog/rsyslogclientca.crt"
18 | tls.MyCert="/etc/pki/rsyslog/rsyslogclient.crt"
19 | tls.MyPrivKey="/etc/pki/rsyslog/rsyslogclient.key"
20 | tls.authmode="name"
21 | tls.permittedpeer=["{{ rsyslog_remote_server_relp }}"]
22 | queue.type="LinkedList"
23 | queue.filename="rsyslog_relp_q"
24 | queue.maxdiskspace="1G"
25 | queue.saveonshutdown="on"
26 | action.resumeRetryCount="-1"
27 | action.resumeInterval="5"
28 | action.writeAllMarkMessages="on")
29 | {% endif %}
30 |
--------------------------------------------------------------------------------
/roles/spdashboard/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create directory to keep configfile
3 | file:
4 | dest: "/opt/openconext/spdashboard"
5 | state: directory
6 | owner: root
7 | group: root
8 | mode: 0770
9 |
10 | - name: Place the configfile
11 | template:
12 | src: env.j2
13 | dest: /opt/openconext/spdashboard/env
14 | owner: root
15 | group: root
16 | mode: 0644
17 |
18 | - name: Add the MariaDB docker network to the list of networks when MariaDB runs in Docker
19 | ansible.builtin.set_fact:
20 | spdashboard_docker_networks:
21 | - name: loadbalancer
22 | - name: openconext_mariadb
23 | when: mariadb_in_docker | default(false) | bool
24 |
25 | - name: Create the container
26 | docker_container:
27 | name: spdashboard
28 | image: ghcr.io/surfnet/sp-dashboard/spdashboard:{{ spdashboard_version }}
29 | env_file: "/opt/openconext/spdashboard/env"
30 | pull: true
31 | restart_policy: "always"
32 | networks: "{{ spdashboard_docker_networks }}"
33 | labels:
34 | traefik.http.routers.spdashboard.rule: "Host(`{{ spdashboard_domain }}`)"
35 | traefik.http.routers.spdashboard.tls: "true"
36 | traefik.enable: "true"
37 | traefik.port: "8080"
38 | healthcheck:
39 | test: ["CMD", "curl", "--fail", "http://localhost"]
40 | interval: 10s
41 | timeout: 10s
42 | retries: 3
43 | start_period: 10s
44 |
--------------------------------------------------------------------------------
/roles/static/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | static_dir: "/var/www/static"
3 | static:
4 | sync_user: root
5 |
--------------------------------------------------------------------------------
/roles/static/files/media/alive.txt:
--------------------------------------------------------------------------------
1 | dummy file to serve as endpoint for haproxy-healthchecks
2 |
--------------------------------------------------------------------------------
/roles/static/files/media/conext_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/static/files/media/conext_logo.png
--------------------------------------------------------------------------------
/roles/static/files/media/feide_logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/static/files/media/feide_logo.jpg
--------------------------------------------------------------------------------
/roles/static/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create static sync user
3 | user:
4 | name: "{{ static.sync_user }}"
5 | createhome: no
6 | state: present
7 |
8 | - name: create static dir
9 | file:
10 | path: "{{ static_dir }}"
11 | state: directory
12 |
13 | - name: create static idp image directory
14 | file:
15 | path: "{{ static_dir }}/logos/idp"
16 | state: directory
17 | owner: "{{ static.sync_user }}"
18 |
19 | - name: create static sp image directory
20 | file:
21 | path: "{{ static_dir }}/logos/sp"
22 | state: directory
23 | owner: "{{ static.sync_user }}"
24 |
25 | - name: create static aa image directory
26 | file:
27 | path: "{{ static_dir }}/logos/aa"
28 | state: directory
29 | owner: "{{ static.sync_user }}"
30 |
31 | - name: copy media files
32 | copy:
33 | src: "media"
34 | dest: "{{ static_dir }}"
35 |
36 | - name: symlink legacy idp and sp logo locations
37 | file:
38 | src: "../logos/{{ item }}"
39 | dest: "{{ static_dir }}/media/{{ item }}"
40 | state: link
41 | with_items:
42 | - "idp"
43 | - "sp"
44 | - "aa"
45 |
46 | - name: copy apache config
47 | template:
48 | src: "static.conf.j2"
49 | dest: "/etc/httpd/conf.d/static.conf"
50 | notify:
51 | - "reload httpd"
52 |
--------------------------------------------------------------------------------
/roles/stats/defaults/main.yml:
--------------------------------------------------------------------------------
1 | openconext_releases_dir: "/opt/openconext"
2 | influx_ebauth_measurement: ebauth
3 | stats_manage_url: https://manage.{{ base_domain }}
4 | stats_manage_api_user: stats
5 | stats_base_domain: stats.{{ base_domain }}
6 | stats_oidc_metadata_url: "https://connect.{{ base_domain }}/.well-known/openid-configuration"
7 | stats_oidc_client_id: "stats.{{ base_domain }}"
8 | stats_domain: "stats.{{ base_domain }}"
9 |
--------------------------------------------------------------------------------
/roles/stats/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart statsserver
2 | community.docker.docker_container:
3 | name: statsserver
4 | state: started
5 | restart: true
6 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
7 | comparisons:
8 | '*': ignore
9 | when: statsservercontainer is success and statsservercontainer is not change
10 |
11 |
12 | - name: restart statsgui
13 | community.docker.docker_container:
14 | name: statsgui
15 | state: started
16 | restart: true
17 |
--------------------------------------------------------------------------------
/roles/stats/templates/config.yml.j2:
--------------------------------------------------------------------------------
1 | database:
2 | name: {{ influx_stats_db }}
3 | host: {{ influx_stats_dbhost }}
4 | port: 8086
5 | username: {{ influxdb_stats_user }}
6 | password: {{ influxdb_stats_password }}
7 |
8 | syslog:
9 | address: "/dev/log"
10 |
11 | log:
12 | measurement: {{ influx_ebauth_measurement }}
13 | user_id: user_id
14 | sp_id: sp_entity_id
15 | idp_id: idp_entity_id
16 |
17 | secret_key: {{ stats_api_secret }}
18 | product: {{ instance_name }}
19 | supported_language_codes: {{ supported_language_codes }}
20 |
21 | api_users:
22 | - name: "dashboard"
23 | password: "{{ stats_dashboard_api_password }}"
24 | scope: "read"
25 | - name: "sysadmin"
26 | password: "{{ stats_sysadmin_api_password }}"
27 | scope: "read, write"
28 |
29 | manage:
30 | url: {{ stats_manage_url }}
31 | user: {{ stats_manage_api_user }}
32 | password: {{ manage_stats_api_password }}
33 |
34 | feature:
35 | high_scores:
36 | True
37 |
38 | base_url: https://{{ stats_base_domain }}
39 |
--------------------------------------------------------------------------------
/roles/stats_backfill_cq/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create Continuous Queries and backfill the measurements
3 | uri:
4 | url: "https://stats.{{ base_domain }}/api/stats/admin/reinitialize_measurements_and_cq"
5 | method: PUT
6 | user: sysadmin
7 | password: "{{ stats_sysadmin_api_password }}"
8 | force_basic_auth: yes
9 | changed_when: false
10 |
11 |
--------------------------------------------------------------------------------
/roles/stepupapp/tasks/copygsspidpcerts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Copy gateway SP certificate | {{ appname }}
3 | copy:
4 | src: "{{ inventory_dir }}/files/certs/stepup/gateway_gssp_sp.crt"
5 | dest: "{{ current_release_config_file_dir_name }}/gateway.crt"
6 | mode: 0640
7 | owner: root
8 | group: "{{ appname }}"
9 |
10 | - name: Copy GSSP idp certificate| {{ appname }}
11 | copy:
12 | src: "{{ inventory_dir }}/files/certs/stepup/{{ appname }}_idp.crt"
13 | dest: "{{ current_release_config_file_dir_name }}/cert.pem"
14 | mode: 0640
15 | group: "{{ appname }}"
16 | owner: root
17 |
18 | - name: Write GSSP idp private key | {{ appname }}
19 | copy:
20 | content: "{{ gssp_idp_private_key }}"
21 | dest: "{{ current_release_config_file_dir_name }}/key.pem"
22 | owner: "{{ appname }}"
23 | mode: "0400"
24 |
--------------------------------------------------------------------------------
/roles/stepupapp/tasks/copygsspspcerts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Copy GSSP sp certificate | {{ appname }}
3 | copy:
4 | src: "{{ inventory_dir }}/files/certs/stepup/{{ appname }}_gssp_sp.crt"
5 | dest: "{{ current_release_config_file_dir_name }}/sp_gssp.crt"
6 | mode: 0640
7 | group: "{{ appname }}"
8 | owner: root
9 |
10 | - name: Write GSSP sp private key | {{ appname }}
11 | copy:
12 | content: "{{ gssp_sp_private_key }}"
13 | dest: "{{ current_release_config_file_dir_name }}/sp_gssp.key"
14 | owner: "{{ appname }}"
15 | mode: "0400"
16 |
--------------------------------------------------------------------------------
/roles/stepupapp/tasks/copyimages.yml:
--------------------------------------------------------------------------------
1 | - name: Put images from /files/stepup-app into public/images | {{ appname }}
2 | copy:
3 | src: "{{ item }}"
4 | dest: "{{ current_release_appdir }}/public/images"
5 | mode: "444"
6 | with_fileglob:
7 | - "{{ inventory_dir }}/files/stepup-app/images/*"
8 |
--------------------------------------------------------------------------------
/roles/stepupapp/tasks/copysfimages.yml:
--------------------------------------------------------------------------------
1 | - name: Copy second factor images from /files/stepup-app/second-factor into public/images/second-factor | {{ appname }}
2 | copy:
3 | src: "{{ item }}"
4 | dest: "{{ current_release_appdir }}/public/images/second-factor/"
5 | mode: "444"
6 | with_fileglob:
7 | - "{{ inventory_dir }}/files/stepup-app/images/second-factor/*"
8 |
--------------------------------------------------------------------------------
/roles/stepupapp/tasks/copyspcerts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Write SP private key | {{ appname }}
3 | copy:
4 | content: "{{ stepup_saml_sp_privatekey }}"
5 | dest: "{{ current_release_config_file_dir_name }}/sp.key"
6 | owner: "{{ appname}}"
7 | group: "{{ appname }}"
8 | mode: "0440"
9 |
10 | - name: Write SP certificate | {{ appname }}
11 | copy:
12 | src: "{{ inventory_dir }}/files/certs/stepup/{{ appname }}_saml_sp.crt"
13 | dest: "{{ current_release_config_file_dir_name }}/sp.crt"
14 | group: "{{ appname }}"
15 | mode: "0440"
16 |
--------------------------------------------------------------------------------
/roles/stepupapp/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install extra php packages
2 | yum:
3 | name:
4 | - php72-php-intl
5 | - php72-php-gmp
6 | state: present
7 |
8 | - name: Create the appdirs | {{ appname }}
9 | file:
10 | state: directory
11 | dest: "{{ item }}"
12 | with_items:
13 | - "{{ current_release_appdir }}"
14 | - "{{ current_release_config_file_dir_name }}"
15 |
16 | - name: Download and extract the release | {{ appname }}
17 | unarchive:
18 | src: https://github.com/OpenConext/Stepup-{{ stepup_gh_appname }}/releases/download/{{ appversion }}/Stepup-{{ stepup_gh_appname }}-{{ appversion }}-{{ appversion_sha }}.tar.bz2
19 | dest: "{{ current_release_appdir }}"
20 | remote_src: yes
21 | creates: "{{ current_release_appdir }}/component_info"
22 |
--------------------------------------------------------------------------------
/roles/stepupapp/tasks/postinstall.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Chown the cache dir {{ appname }}
3 | file:
4 | dest: "{{ current_release_symlink }}/var/cache/prod"
5 | owner: "{{ appname }}"
6 | group: "{{ appname }}"
7 | recurse: yes
8 |
9 | # Remove all dirs, but keep the current version and from the rest the most recent one.
10 | - name: Clean up old releases {{ appname }}
11 | shell: ls -td {{ current_release_symlink }}-* | grep -v $(readlink {{ current_release_symlink }}) | tail -n +2 | xargs --no-run-if-empty rm -rv
12 | register: clean_stepup_appdirs
13 | changed_when: '"removed" in clean_stepup_appdirs.stdout'
14 |
--------------------------------------------------------------------------------
/roles/stepupazuremfa/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: clear cache {{ appname }}
3 | shell: "php72 {{ current_release_symlink }}/bin/console cache:clear --env=prod"
4 |
5 | - name: reload php72-fpm {{ appname }}
6 | service:
7 | name: php72-php-fpm
8 | state: reloaded
9 |
10 | - name: restart azuremfa
11 | community.docker.docker_container:
12 | name: azuremfa
13 | state: started
14 | restart: true
15 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
16 | comparisons:
17 | '*': ignore
18 | when: azuremfacontainer is success and azuremfacontainer is not change
19 |
--------------------------------------------------------------------------------
/roles/stepupazuremfa/templates/env.j2:
--------------------------------------------------------------------------------
1 | KERNEL_CLASS='App\Kernel'
2 | APP_ENV=prod
3 | APP_SECRET={{ azuremfa_secret }}
4 | TRUSTED_PROXIES='{{ engine_trusted_proxy_ips|join(',') }}'
5 |
--------------------------------------------------------------------------------
/roles/stepupazuremfa/vars/docker.yml:
--------------------------------------------------------------------------------
1 | current_release_appdir: /opt/openconext/azuremfa
2 | current_release_config_file_dir_name: /opt/openconext/azuremfa
3 | current_release_config_file_dir_name_in_config: /var/www/html/config/openconext
4 | current_release_config_dir_name: /opt/openconext/azuremfa
5 |
--------------------------------------------------------------------------------
/roles/stepupazuremfa/vars/main.yml:
--------------------------------------------------------------------------------
1 | appname: "azuremfa"
2 | vhost_name: "{{ appname }}.{{ base_domain }}"
3 | loadbalancingport: "411"
4 | appversion: "{{ azuremfa_version }}"
5 | appversion_sha: "{{ azuremfa_version_sha }}"
6 | stepup_gh_appname: "Azure-MFA"
7 | current_release_symlink: "/opt/openconext/OpenConext-{{ appname }}"
8 | current_release_appdir: "{{current_release_symlink }}-{{ appversion }}"
9 | current_release_config_file_dir_name: "{{ current_release_appdir }}/app/files"
10 | #current_release_config_file_dir_name_in_config: "{{ current_release_config_file_dir_name }}"
11 | current_release_config_dir_name: "{{ current_release_appdir }}/config/packages"
12 | gssp_idp_private_key: "{{ lookup('file', inventory_dir+'/files/certs/stepup/azuremfa_idp.key') }}"
13 | fpmmemory: 128M
14 |
--------------------------------------------------------------------------------
/roles/stepupgateway/defaults/main.yml:
--------------------------------------------------------------------------------
1 | gateway_docker_networks:
2 | - name: loadbalancer
3 |
--------------------------------------------------------------------------------
/roles/stepupgateway/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: clear cache {{ appname }}
3 | shell: "php72 {{ current_release_symlink }}/bin/console cache:clear --env=prod"
4 |
5 | - name: reload php72-fpm {{ appname }}
6 | service:
7 | name: php72-php-fpm
8 | state: reloaded
9 |
10 | - name: restart gateway
11 | community.docker.docker_container:
12 | name: gateway
13 | state: started
14 | restart: true
15 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
16 | comparisons:
17 | '*': ignore
18 | when: gatewaycontainer is success and gatewaycontainer is not change
19 |
--------------------------------------------------------------------------------
/roles/stepupgateway/templates/env.j2:
--------------------------------------------------------------------------------
1 | KERNEL_CLASS='App\Kernel'
2 | APP_ENV=prod
3 | APP_SECRET={{ gateway_secret }}
4 | TRUSTED_PROXIES='{{ engine_trusted_proxy_ips|join(',') }}'
5 |
--------------------------------------------------------------------------------
/roles/stepupgateway/templates/global_view_parameters.yml.j2:
--------------------------------------------------------------------------------
1 | # These parameters are to be rendered into the view according to a specific locale
2 | # A typical example would be showing locale-dependent external URLs
3 | # Note that a '%' in strings must be escaped by a '%'
4 | # http://symfony.com/doc/2.7/service_container/parameters.html#parameters-in-configuration-files
5 |
6 | parameters:
7 | support_url: # Link to the support pages
8 | {% for locale, url in gateway_support_url.items() %}
9 | {{ locale }}: "{{ url | replace('%', '%%') }}"
10 | {% endfor %}
11 |
--------------------------------------------------------------------------------
/roles/stepupgateway/vars/docker.yml:
--------------------------------------------------------------------------------
1 | current_release_appdir: /opt/openconext/gateway
2 | current_release_config_file_dir_name: /opt/openconext/gateway
3 | current_release_config_file_dir_name_in_config: /var/www/html/config/openconext
4 | current_release_config_dir_name: /opt/openconext/gateway/
5 |
6 |
--------------------------------------------------------------------------------
/roles/stepupgateway/vars/main.yml:
--------------------------------------------------------------------------------
1 | appname: "gateway"
2 | vhost_name: "sa-gw.{{ base_domain }}"
3 | loadbalancingport: "414"
4 | appversion: "{{ gateway_version }}"
5 | appversion_sha: "{{ gateway_version_sha }}"
6 | stepup_gh_appname: "Gateway"
7 | current_release_symlink: "/opt/openconext/OpenConext-{{ appname }}"
8 | current_release_appdir: "{{current_release_symlink }}-{{ appversion }}"
9 | current_release_config_file_dir_name: "{{ current_release_appdir }}/app/files"
10 | current_release_config_dir_name: "{{ current_release_appdir }}/config/openconext"
11 | current_release_config_file_dir_name_in_config: "{{ current_release_config_file_dir_name }}"
12 | gateway_saml_sp_privatekey: "{{ lookup('file', inventory_dir+'/files/certs/stepup/gateway_saml_sp.key') }}"
13 | gateway_gssp_sp_privatekey: "{{ lookup('file', inventory_dir+'/files/certs/stepup/gateway_gssp_sp.key') }}"
14 | gateway_gssp_idp_privatekey: "{{ lookup('file', inventory_dir+'/files/certs/stepup/gateway_gssp_idp.key') }}"
15 | gateway_saml_idp_privatekey: "{{ lookup('file', inventory_dir+'/files/certs/stepup/gateway_saml_idp.key') }}"
16 | spryng_api_key: "{{ lookup('file', inventory_dir+'/secrets/stepup/spryng_api') }}"
17 | fpmmemory: 128M
18 |
--------------------------------------------------------------------------------
/roles/stepupmiddleware/defaults/main.yml:
--------------------------------------------------------------------------------
1 | middelware_docker_networks:
2 | - name: loadbalancer
3 | middleware_mem_limit: "256M"
4 |
--------------------------------------------------------------------------------
/roles/stepupmiddleware/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: clear cache {{ appname }}
3 | shell: "php72 {{ current_release_symlink }}/bin/console cache:clear --env=prod"
4 |
5 | - name: reload php72-fpm {{ appname }}
6 | service:
7 | name: php72-php-fpm
8 | state: reloaded
9 |
10 | - name: restart middleware
11 | community.docker.docker_container:
12 | name: middleware
13 | state: started
14 | restart: true
15 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
16 | comparisons:
17 | '*': ignore
18 | when: middlewarecontainer is success and middlewarecontainer is not change
19 |
--------------------------------------------------------------------------------
/roles/stepupmiddleware/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include docker tasks when running docker
2 | import_tasks: docker.yml
3 | when: "'docker' in group_names
4 | and ('push_mw_config' in ansible_run_tags
5 | or 'push_mw_institution' in ansible_run_tags
6 | or 'push_mw_whitelist' in ansible_run_tags
7 | or 'stepupmiddleware' in ansible_run_tags
8 | or 'stepup' in ansible_run_tags)"
9 |
10 | - name: Include vm tasks when running on a vm
11 | import_tasks: vm.yml
12 | when: "'docker' not in group_names
13 | and ('push_mw_config' in ansible_run_tags
14 | or 'push_mw_institution' in ansible_run_tags
15 | or 'push_mw_whitelist' in ansible_run_tags
16 | or 'stepupmiddleware' in ansible_run_tags
17 | or 'stepup' in ansible_run_tags)"
18 |
--------------------------------------------------------------------------------
/roles/stepupmiddleware/templates/01-middleware-db_migrate.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Run database migrations of the stepup-middleware compnent.
4 | # This updates both the middleware and the gateway database schemas
5 | # to the latest versions supported by the currently active version
6 | # of the middleware.
7 | #
8 | # You must run this command/script manually, it is not invoked from
9 | # the Ansible site.yml or deploy.yml playbooks
10 | #
11 | # This action is idempotent. I.e. it will have no effect when the database
12 | # schema's are already up-to-date
13 | #
14 | # A separate "deploy" user is used to update the schema's as the middleware
15 | # and gateway users do not have sufficient privileges to update the schema's
16 |
17 | pushd {{ current_release_symlink }}
18 |
19 | # Before Symfony 4 there was a separate console command to run the migrations:
20 | # console middleware:migrations:migrate --env=prod
21 |
22 | # Run the middleware and gateway database migrations
23 | echo "{{ php_cli }} bin/console doctrine:migrations:migrate --env=prod --em=deploy"
24 | {{ php_cli }} bin/console doctrine:migrations:migrate --env=prod --em=deploy
25 |
26 | popd
27 |
--------------------------------------------------------------------------------
/roles/stepupmiddleware/templates/06-middleware-bootstrap-sraa-users.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Add sraa users with their yubikey ID without using the ra interface.
3 | # The NameIDs of these users must also be listed in the sraa array in the middleware configuration
4 | # This must be run once from an app server after setting up a new stepup environment
5 |
6 | echo "WARNING: About to run middleware:bootstrap:identity-with-yubikey action(s)"
7 | echo "These actions must not be run more than once for each identity."
8 |
9 | pushd {{ current_release_symlink }}
10 |
11 | #Format: php app/console middleware:bootstrap:identity-with-yubikey --env=prod
12 |
13 | {% for sraa in middleware_sraa %}
14 | if [ "$1" == "--always-yes" ]; then
15 | REPLY="y"
16 | else
17 | read -p "Add identity {{ sraa.nameid }} (y/n)? " -r
18 | fi
19 | if [[ ${REPLY} =~ ^[Yy]$ ]]
20 | then
21 | {{ php_cli }} bin/console middleware:bootstrap:identity-with-yubikey --env=prod {{ sraa.nameid }} {{ sraa.institution }} "{{ sraa.cn }}" {{ sraa.email }} {{ sraa.lang }} {{ sraa['yubikey_id'] }}
22 |
23 | fi
24 | {% endfor %}
25 | popd
26 |
--------------------------------------------------------------------------------
/roles/stepupmiddleware/vars/docker.yml:
--------------------------------------------------------------------------------
1 | current_release_config_dir_name: /opt/openconext/middleware
2 |
--------------------------------------------------------------------------------
/roles/stepupmiddleware/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | appname: "middleware"
3 | vhost_name: "{{ appname }}.{{ base_domain }}"
4 | loadbalancingport: "413"
5 | appversion: "{{ middleware_version }}"
6 | appversion_sha: "{{ middleware_version_sha }}"
7 | stepup_gh_appname: Middleware
8 | current_release_symlink: "/opt/openconext/OpenConext-{{ appname }}"
9 | current_release_appdir: "{{ current_release_symlink }}-{{ appversion }}"
10 | current_release_config_dir_name: "{{ current_release_appdir }}/config/legacy"
11 | lifecycle_application_name: "OpenConext Stepup"
12 | fpmmemory: 512M
13 |
--------------------------------------------------------------------------------
/roles/stepupra/defaults/main.yml:
--------------------------------------------------------------------------------
1 | sms_otp_expiry_interval: 900
2 | sms_maximum_otp_requests: 3
3 | ss_gssp_webauthn_app_android_url: "https://play.google.com/store/apps/details?id=not_used"
4 |
--------------------------------------------------------------------------------
/roles/stepupra/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: clear cache {{ appname }}
3 | shell: "php72 {{ current_release_symlink }}/bin/console cache:clear --env=prod"
4 |
5 | - name: reload php72-fpm {{ appname }}
6 | service:
7 | name: php72-php-fpm
8 | state: reloaded
9 |
10 | - name: restart ra
11 | community.docker.docker_container:
12 | name: ra
13 | state: started
14 | restart: true
15 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
16 | comparisons:
17 | '*': ignore
18 | when: racontainer is success and racontainer is not change
19 |
--------------------------------------------------------------------------------
/roles/stepupra/templates/global_view_parameters.yml.j2:
--------------------------------------------------------------------------------
1 | # These parameters are to be rendered into the view according to a specific locale
2 | # A typical example would be showing locale-dependent external URLs
3 | # Note that a '%' in strings must be escaped by a '%'
4 | # http://symfony.com/doc/2.7/service_container/parameters.html#parameters-in-configuration-files
5 |
6 | parameters:
7 | support_url: # Link to the RA manual
8 | {% for locale, url in ra_manual_url.items() %}
9 | {{ locale }}: "{{ url | replace('%', '%%') }}"
10 | {% endfor %}
11 |
--------------------------------------------------------------------------------
/roles/stepupra/templates/samlstepupproviders.yml.j2:
--------------------------------------------------------------------------------
1 | imports:
2 | - { resource: samlstepupproviders_parameters.yaml }
3 |
4 | surfnet_stepup_ra_saml_stepup_provider:
5 | routes:
6 | consume_assertion: ra_vetting_gssf_verify
7 | metadata: ra_vetting_gssf_metadata
8 |
9 | providers:
10 | {% for key, value in stepup_enabled_generic_second_factors.items() %}
11 | {{ key }}:
12 | hosted:
13 | service_provider:
14 | public_key: "%gssp_{{ key }}_sp_publickey%"
15 | private_key: "%gssp_{{ key }}_sp_privatekey%"
16 | metadata:
17 | public_key: "%gssp_{{ key }}_metadata_publickey%"
18 | private_key: "%gssp_{{ key }}_metadata_privatekey%"
19 | remote:
20 | entity_id: "%gssp_{{ key }}_remote_entity_id%"
21 | sso_url: "%gssp_{{ key }}_remote_sso_url%"
22 | certificate: "%gssp_{{ key }}_remote_certificate%"
23 | view_config:
24 | title: "%gssp_{{ key }}_title%"
25 | page_title: "%gssp_{{ key }}_page_title%"
26 | explanation: "%gssp_{{ key }}_explanation%"
27 | initiate: "%gssp_{{ key }}_initiate%"
28 | gssf_id_mismatch: "%gssp_{{ key }}_gssf_id_mismatch%"
29 | {% endfor %}
30 |
--------------------------------------------------------------------------------
/roles/stepupra/vars/docker.yml:
--------------------------------------------------------------------------------
1 | current_release_appdir: /opt/openconext/ra
2 | current_release_config_file_dir_name: /opt/openconext/ra
3 | current_release_config_file_dir_name_in_config: /var/www/html/config/openconext
4 | current_release_config_dir_name: /opt/openconext/ra
5 |
--------------------------------------------------------------------------------
/roles/stepupra/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | appname: "ra"
3 | vhost_name: "{{ appname }}.{{ base_domain }}"
4 | loadbalancingport: "416"
5 | appversion: "{{ ra_version }}"
6 | appversion_sha: "{{ ra_version_sha }}"
7 | stepup_gh_appname: "RA"
8 | current_release_symlink: "/opt/openconext/OpenConext-{{ appname }}"
9 | current_release_appdir: "{{ current_release_symlink }}-{{ appversion }}"
10 | current_release_config_file_dir_name: "{{ current_release_appdir }}/app/files"
11 | current_release_config_dir_name: "{{ current_release_appdir }}/config/legacy"
12 | current_release_config_file_dir_name_in_config: "{{ current_release_config_file_dir_name }}"
13 | gssp_sp_private_key: "{{ lookup('file', inventory_dir+'/files/certs/stepup/ra_gssp_sp.key') }}"
14 | stepup_saml_sp_privatekey: "{{ lookup('file', inventory_dir+'/files/certs/stepup/ra_saml_sp.key') }}"
15 | gateway_saml_idp_publickey: "{{ lookup('file', inventory_dir+'/files/certs/stepup/gateway_saml_idp.crt') }}"
16 | gateway_gssp_idp_publickey: "{{ lookup('file', inventory_dir+'/files/certs/stepup/gateway_gssp_idp.crt') }}"
17 | fpmmemory: 128M
18 |
--------------------------------------------------------------------------------
/roles/stepupselfservice/defaults/main.yml:
--------------------------------------------------------------------------------
1 | sms_otp_expiry_interval: 900
2 | sms_maximum_otp_requests: 3
3 | ss_gssp_webauthn_app_android_url: "https://play.google.com/store/apps/details?id=not_used"
4 |
--------------------------------------------------------------------------------
/roles/stepupselfservice/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: clear cache {{ appname }}
3 | shell: "php72 {{ current_release_symlink }}/bin/console cache:clear --env=prod"
4 |
5 | - name: reload php72-fpm {{ appname }}
6 | service:
7 | name: php72-php-fpm
8 | state: reloaded
9 |
10 | - name: restart selfservice
11 | community.docker.docker_container:
12 | name: selfservice
13 | state: started
14 | restart: true
15 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
16 | comparisons:
17 | '*': ignore
18 | when: selfservicecontainer is success and selfservicecontainer is not change
19 |
--------------------------------------------------------------------------------
/roles/stepupselfservice/templates/global_view_parameters.yml.j2:
--------------------------------------------------------------------------------
1 | # These parameters are to be rendered into the view according to a specific locale
2 | # A typical example would be showing locale-dependent external URLs
3 | # Note that a '%' in strings must be escaped by a '%'
4 | # http://symfony.com/doc/2.7/service_container/parameters.html#parameters-in-configuration-files
5 |
6 | parameters:
7 | support_url: # Link to the RA manual
8 | {% for locale, url in ss_support_url.items() %}
9 | {{ locale }}: "{{ url | replace('%', '%%') }}"
10 | {% endfor %}
11 |
--------------------------------------------------------------------------------
/roles/stepupselfservice/vars/docker.yml:
--------------------------------------------------------------------------------
1 | current_release_appdir: /opt/openconext/selfservice
2 | current_release_config_file_dir_name: /opt/openconext/selfservice
3 | current_release_config_file_dir_name_in_config: /var/www/html/config/openconext
4 | current_release_config_dir_name: /opt/openconext/selfservice
5 |
--------------------------------------------------------------------------------
/roles/stepupselfservice/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | appname: "selfservice"
3 | vhost_name: "sa.{{ base_domain }}"
4 | loadbalancingport: "415"
5 | appversion: "{{ selfservice_version }}"
6 | appversion_sha: "{{ selfservice_version_sha }}"
7 | stepup_gh_appname: "SelfService"
8 | current_release_symlink: "/opt/openconext/OpenConext-{{ appname }}"
9 | current_release_appdir: "{{ current_release_symlink }}-{{ appversion }}"
10 | current_release_config_file_dir_name: "{{ current_release_appdir }}/app/files"
11 | current_release_config_dir_name: "{{ current_release_appdir }}/config/legacy"
12 | current_release_config_file_dir_name_in_config: "{{ current_release_config_file_dir_name }}"
13 | gssp_sp_private_key: "{{ lookup('file', inventory_dir+'/files/certs/stepup/selfservice_gssp_sp.key') }}"
14 | stepup_saml_sp_privatekey: "{{ lookup('file', inventory_dir+'/files/certs/stepup/selfservice_saml_sp.key') }}"
15 | gateway_saml_idp_publickey: "{{ lookup('file', inventory_dir+'/files/certs/stepup/gateway_saml_idp.crt') }}"
16 | gateway_gssp_idp_publickey: "{{ lookup('file', inventory_dir+'/files/certs/stepup/gateway_gssp_idp.crt') }}"
17 | fpmmemory: 128M
18 |
--------------------------------------------------------------------------------
/roles/stepuptiqr/defaults/main.yml:
--------------------------------------------------------------------------------
1 | tiqr_docker_networks:
2 | - name: loadbalancer
3 |
--------------------------------------------------------------------------------
/roles/stepuptiqr/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: clear cache {{ appname }}
3 | shell: "php72 {{ current_release_symlink }}/bin/console cache:clear --env=prod"
4 |
5 | - name: reload php72-fpm {{ appname }}
6 | service:
7 | name: php72-php-fpm
8 | state: reloaded
9 |
10 | - name: restart tiqr
11 | community.docker.docker_container:
12 | name: tiqr
13 | state: started
14 | restart: true
15 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
16 | comparisons:
17 | '*': ignore
18 | when: tiqrcontainer is success and tiqrcontainer is not change
19 |
--------------------------------------------------------------------------------
/roles/stepuptiqr/templates/01-tiqr-db_init.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | mysql -v -u {{ database_tiqr_deploy_user }} -p{{ mysql_passwords.tiqrdeploy }} -h {{ tiqr_db_host }} {{ database_tiqr_name }} < {{ current_release_symlink }}/config/db/mysql-create-tables.sql
4 | res=$?
5 | if [ "$res" -gt 0 ]; then
6 | echo "mysql failed"
7 | fi
8 |
--------------------------------------------------------------------------------
/roles/stepuptiqr/templates/env.j2:
--------------------------------------------------------------------------------
1 | KERNEL_CLASS='App\Kernel'
2 | APP_ENV=prod
3 | APP_SECRET={{ tiqr_secret }}
4 | TRUSTED_PROXIES='{{ engine_trusted_proxy_ips|join(',') }}'
5 |
--------------------------------------------------------------------------------
/roles/stepuptiqr/vars/docker.yml:
--------------------------------------------------------------------------------
1 |
2 | current_release_appdir: /opt/openconext/tiqr
3 | current_release_config_file_dir_name: /opt/openconext/tiqr
4 | current_release_config_file_dir_name_in_config: /var/www/html/config/openconext
5 | current_release_config_dir_name: /opt/openconext/tiqr
6 |
--------------------------------------------------------------------------------
/roles/stepuptiqr/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | appname: "tiqr"
3 | vhost_name: "{{ appname }}.{{ base_domain }}"
4 | loadbalancingport: "412"
5 | appversion: "{{ tiqr_version }}"
6 | appversion_sha: "{{ tiqr_version_sha }}"
7 | stepup_gh_appname: tiqr
8 | current_release_symlink: "/opt/openconext/OpenConext-{{ appname }}"
9 | current_release_appdir: "{{ current_release_symlink }}-{{ appversion }}"
10 | current_release_config_file_dir_name: "{{ current_release_appdir }}/app/files"
11 | current_release_config_dir_name: "{{ current_release_appdir }}/config/legacy"
12 | current_release_config_file_dir_name_in_config: "{{ current_release_config_file_dir_name }}"
13 | gssp_idp_private_key: "{{ lookup('file', inventory_dir+'/files/certs/stepup/tiqr_idp.key') }}"
14 | database_tiqr_user: tiqrrw
15 | database_tiqr_deploy_user: tiqrdeploy
16 | database_tiqr_name: tiqr
17 | # API key
18 | tiqr_gcm_apikey: "{{ lookup('file', inventory_dir+'/secrets/stepup/gcm_apikey') }}"
19 | # Optional API key for Firebase Cloud Messaging (FCM)
20 | tiqr_firebase_apikey: "{{ lookup('file', inventory_dir+'/secrets/stepup/firebase_apikey') }}"
21 | fpmmemory: 128M
22 |
--------------------------------------------------------------------------------
/roles/stepupwebauthn/defaults/main.yml:
--------------------------------------------------------------------------------
1 | webauthn_docker_networks:
2 | - name: loadbalancer
3 |
--------------------------------------------------------------------------------
/roles/stepupwebauthn/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: clear cache {{ appname }}
3 | shell: "php72 {{ current_release_symlink }}/bin/console cache:clear --env=prod"
4 |
5 | - name: reload php72-fpm {{ appname }}
6 | service:
7 | name: php72-php-fpm
8 | state: reloaded
9 |
10 | - name: restart webauthn
11 | community.docker.docker_container:
12 | name: webauthn
13 | state: started
14 | restart: true
15 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
16 | comparisons:
17 | '*': ignore
18 | when: webauthncontainer is success and webauthncontainer is not change
19 |
--------------------------------------------------------------------------------
/roles/stepupwebauthn/templates/01-webauthn-db_init.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | ### To run inside a runnig docker container
4 | # sudo docker cp /root/01-webauthn-db_init.sh webauthn:/
5 | # sudo docker exec -it webauthn /bin/bash /01-webauthn-db_init.sh
6 | #
7 | cd /var/www/html
8 | echo "Create database if not exists"
9 | /var/www/html/bin/console doctrine:database:create --if-not-exists
10 | if [ $? -eq 0 ]
11 | then
12 | echo "Done!"
13 | else
14 | echo "Database creation failed"
15 | exit 1
16 | fi
17 | echo "Create or migrate schema"
18 | /var/www/html/bin/console doctrine:migrations:migrate
19 | if [ $? -eq 0 ]
20 | then
21 | echo "Done!"
22 | else
23 | echo "Database migration failed"
24 | exit 1
25 | fi
26 |
--------------------------------------------------------------------------------
/roles/stepupwebauthn/vars/docker.yml:
--------------------------------------------------------------------------------
1 | current_release_appdir: /opt/openconext/webauthn
2 | current_release_config_file_dir_name: /opt/openconext/webauthn
3 | current_release_config_file_dir_name_in_config: /var/www/html/config/openconext
4 | current_release_config_dir_name: /opt/openconext/webauthn
5 |
--------------------------------------------------------------------------------
/roles/stepupwebauthn/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | appname: "webauthn"
3 | vhost_name: "{{ appname }}.{{ base_domain }}"
4 | loadbalancingport: "410"
5 | appversion: "{{ webauthn_version }}"
6 | appversion_sha: "{{ webauthn_version_sha }}"
7 | stepup_gh_appname: Webauthn
8 | current_release_symlink: "/opt/openconext/OpenConext-{{ appname }}"
9 | current_release_appdir: "{{ current_release_symlink }}-{{ appversion }}"
10 | current_release_config_file_dir_name: "{{ current_release_appdir }}/app/files"
11 | current_release_config_file_dir_name_in_config: "{{ current_release_config_file_dir_name }}"
12 | current_release_config_dir_name: "{{ current_release_appdir }}/config/packages"
13 | gssp_idp_private_key: "{{ lookup('file', inventory_dir+'/files/certs/stepup/webauthn_idp.key') }}"
14 | database_webauthn_user: webauthnrw
15 | database_webauthn_deploy_user: webauthndeploy
16 | database_webauthn_name: webauthn
17 | fpmmemory: 128M
18 |
--------------------------------------------------------------------------------
/roles/teams/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart teamsserver
2 | community.docker.docker_container:
3 | name: teamsserver
4 | state: started
5 | restart: true
6 | # avoid restarting it creates unexpected data loss according to docker_container_module notes
7 | comparisons:
8 | '*': ignore
9 | when: teamsserverontainer is success and teamsserverontainer is not change
10 |
--------------------------------------------------------------------------------
/roles/teams/templates/logback.xml.j2:
--------------------------------------------------------------------------------
1 | #jinja2:lstrip_blocks: True
2 |
3 |
4 |
5 |
6 |
7 | %d{ISO8601} %5p [%t] %logger{40}:%L - %m%n
8 |
9 |
10 |
11 |
12 | {{ smtp_server }}
13 | {{ noreply_email }}
14 | {{ error_mail_to }}
15 | {{ error_subject_prefix }}Unexpected error teams
16 |
17 |
18 |
19 | ERROR
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/roles/teams/vars/main.yml:
--------------------------------------------------------------------------------
1 | manage_provision_oidcrp_client_id: "{{ teams_authz_client_id }}"
2 | manage_provision_oidcrp_secret: "{{ teams_authz_client_secret }}"
3 | manage_provision_oidcrp_name_en: "{{ teams_manage_provision_oidcrp_name_en }}"
4 | manage_provision_oidcrp_description_en: "{{ teams_manage_provision_oidcrp_description_en }}"
5 | manage_provision_oidcrp_grants: "{{ teams_manage_provision_oidcrp_grants }}"
6 | manage_provision_oidcrp_allowed_resource_servers: "{{ teams_manage_provision_oidcrp_allowed_resource_servers }}"
7 | manage_provision_samlsp_client_id: "{{ teams_manage_provision_samlsp_client_id }}"
8 | manage_provision_samlsp_name_en: "{{ teams_manage_provision_samlsp_name_en }}"
9 | manage_provision_samlsp_description_en: "{{ teams_manage_provision_samlsp_description_en }}"
10 | manage_provision_samlsp_acs_location: "{{ teams_manage_provision_samlsp_acs_location }}"
11 | manage_provision_samlsp_metadata_url: "{{ teams_manage_provision_samlsp_metadata_url }}"
12 | manage_provision_samlsp_sp_cert: "{{ teams_manage_provision_samlsp_sp_cert }}"
13 | manage_provision_samlsp_trusted_proxy: "{{ teams_manage_provision_samlsp_trusted_proxy }}"
14 | manage_provision_samlsp_sign: "{{ teams_manage_provision_samlsp_sign }}"
15 |
--------------------------------------------------------------------------------
/roles/voot/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | voot_manage_provision_oauth_rs_client_id: "{{ voot.oidcng_checkToken_clientId }}"
3 | voot_manage_provision_oauth_rs_rp_secret: "{{ voot.oidcng_checkToken_secret }}"
4 | voot_manage_provision_oauth_rs_name_en: "{{ instance_name }} VOOT Resource Server"
5 | voot_manage_provision_oauth_rs_description_en: "The VOOT API is for group membership information"
6 | voot_manage_provision_oauth_rs_scopes: "groups"
7 |
--------------------------------------------------------------------------------
/roles/voot/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart vootserver
3 | community.docker.docker_container:
4 | name: vootserver
5 | state: started
6 | restart: true
7 |
--------------------------------------------------------------------------------
/roles/voot/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
--------------------------------------------------------------------------------
/roles/voot/templates/externalProviders.yml.j2:
--------------------------------------------------------------------------------
1 | externalGroupProviders:
2 | {% for provider in voot.externalGroupProviders %}
3 | - {
4 | type: "{{ provider.type }}",
5 | url: "{{ provider.url }}",
6 | credentials: {
7 | username: "{{ provider.credentials.username }}",
8 | secret: "{{ provider.credentials.secret }}"
9 | },
10 | schacHomeOrganization: "{{ provider.schacHomeOrganization }}",
11 | name: "{{ provider.name }}",
12 | timeoutMillis: {{ provider.timeoutMillis }}
13 | }
14 | {% endfor %}
15 |
--------------------------------------------------------------------------------
/roles/voot/templates/logback.xml.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | %d{ISO8601} %5p [%t] %logger{40}:%L - %m%n
7 |
8 |
9 |
10 |
11 | {{ smtp_server }}
12 | {{ noreply_email }}
13 | {{ error_mail_to }}
14 | {{ error_subject_prefix }}Unexpected error voot
15 |
16 |
17 |
18 | ERROR
19 |
20 |
21 |
22 | {% for logger in voot.loggers %}
23 |
24 | {% endfor %}
25 |
26 |
27 |
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/roles/voot/templates/serverapplication.yml.j2:
--------------------------------------------------------------------------------
1 | logging:
2 | config: "file:///logback.xml"
3 |
4 | server:
5 | port: 8080
6 | server-header: no
7 | error:
8 | include-message: always
9 | path: "/error"
10 |
11 | externalProviders:
12 | config:
13 | path: "file:///externalProviders.yml"
14 |
15 | spring:
16 | mvc:
17 | log-request-details: false
18 | security:
19 | user:
20 | name: disabled
21 | password: disabled
22 |
23 | oidcng:
24 | checkToken:
25 | endpoint_url: "{{ voot.oidcng_checkToken_endpoint_url }}"
26 | clientId: "{{ voot.oidcng_checkToken_clientId }}"
27 | secret: "{{ voot.oidcng_checkToken_secret }}"
28 |
29 | checkToken:
30 | cache: true
31 |
32 | # Feature toggle for searching for linked GrouperGroups and ExternalGroups
33 | support:
34 | linkedGrouperExternalGroups: true
35 |
36 | management:
37 | health:
38 | mail:
39 | enabled: false
40 | endpoints:
41 | web:
42 | exposure:
43 | include: "health,info,mappings"
44 | base-path: "/internal"
45 | endpoint:
46 | info:
47 | enabled: true
48 | mappings:
49 | enabled: true
50 | info:
51 | git:
52 | mode: full
53 |
--------------------------------------------------------------------------------
/roles/voot/vars/main.yml:
--------------------------------------------------------------------------------
1 | manage_provision_oauth_rs_client_id: "{{ voot_manage_provision_oauth_rs_client_id }}"
2 | manage_provision_oauth_rs_name_en: "{{ voot_manage_provision_oauth_rs_name_en }}"
3 | manage_provision_oauth_rs_description_en: "{{ voot_manage_provision_oauth_rs_description_en }}"
4 | manage_provision_oauth_rs_secret: "{{ voot_manage_provision_oauth_rs_rp_secret }}"
5 | manage_provision_oauth_rs_scopes: "{{ voot_manage_provision_oauth_rs_scopes }}"
6 |
--------------------------------------------------------------------------------
/roles/welcome/files/site/font/fontawesome-webfont.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/font/fontawesome-webfont.eot
--------------------------------------------------------------------------------
/roles/welcome/files/site/font/fontawesome-webfont.svgz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/font/fontawesome-webfont.svgz
--------------------------------------------------------------------------------
/roles/welcome/files/site/font/fontawesome-webfont.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/font/fontawesome-webfont.ttf
--------------------------------------------------------------------------------
/roles/welcome/files/site/font/fontawesome-webfont.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/font/fontawesome-webfont.woff
--------------------------------------------------------------------------------
/roles/welcome/files/site/font/vagrundschriftd-lig-webfont.eot:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/font/vagrundschriftd-lig-webfont.eot
--------------------------------------------------------------------------------
/roles/welcome/files/site/font/vagrundschriftd-lig-webfont.ttf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/font/vagrundschriftd-lig-webfont.ttf
--------------------------------------------------------------------------------
/roles/welcome/files/site/font/vagrundschriftd-lig-webfont.woff:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/font/vagrundschriftd-lig-webfont.woff
--------------------------------------------------------------------------------
/roles/welcome/files/site/images/bg-app-grid.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/images/bg-app-grid.png
--------------------------------------------------------------------------------
/roles/welcome/files/site/images/bg-footer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/images/bg-footer.png
--------------------------------------------------------------------------------
/roles/welcome/files/site/images/bg-header.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/images/bg-header.png
--------------------------------------------------------------------------------
/roles/welcome/files/site/images/engine-block-logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/images/engine-block-logo.jpg
--------------------------------------------------------------------------------
/roles/welcome/files/site/images/mujina-logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/images/mujina-logo.jpg
--------------------------------------------------------------------------------
/roles/welcome/files/site/images/oauth-2-sm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/images/oauth-2-sm.png
--------------------------------------------------------------------------------
/roles/welcome/files/site/images/openconext-logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/images/openconext-logo.jpg
--------------------------------------------------------------------------------
/roles/welcome/files/site/images/openid_connect-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/images/openid_connect-logo.png
--------------------------------------------------------------------------------
/roles/welcome/files/site/images/profile-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/images/profile-logo.png
--------------------------------------------------------------------------------
/roles/welcome/files/site/images/sr-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/images/sr-logo.png
--------------------------------------------------------------------------------
/roles/welcome/files/site/images/teams-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/OpenConext/OpenConext-deploy/dbad4f006d2d1c74e20ab0c035ee261e4e0b451e/roles/welcome/files/site/images/teams-logo.png
--------------------------------------------------------------------------------
/roles/welcome/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create site dir
3 | file:
4 | path: "/var/www/welcome"
5 | state: directory
6 |
7 | - name: copy static site files
8 | copy:
9 | src: "site/"
10 | dest: "/var/www/welcome"
11 | # src ends with / so *only* the content of that folder is copied.
12 |
13 | - name: copy template site files
14 | template:
15 | src: "site/index.html"
16 | dest: "/var/www/welcome/index.html"
17 |
18 | - name: copy virtual host config
19 | template:
20 | src: "welcome-vm.conf.j2"
21 | dest: "/etc/httpd/conf.d/welcome-vm.conf"
22 | notify:
23 | - "reload httpd"
24 |
--------------------------------------------------------------------------------
/roles/welcome/templates/welcome-vm.conf.j2:
--------------------------------------------------------------------------------
1 | {% if apache_app_listen_address.welcome is defined %}
2 | Listen {{ apache_app_listen_address.welcome }}:{{ loadbalancing.welcome.port }}
3 |
4 | {% else %}
5 |
6 | {% endif %}
7 | # General setup for the virtual host, inherited from global configuration
8 | ServerName {{ base_domain }}:443
9 |
10 | DocumentRoot /var/www/welcome
11 |
12 | ErrorLog "|/usr/bin/logger -S 32k -p local3.err -t 'Apache-Welcome'"
13 | CustomLog "|/usr/bin/logger -S 32k -p local3.info -t 'Apache-Welcome'" combined
14 |
15 | {% if haproxy_backend_tls %}
16 | SSLEngine on
17 | SSLCertificateFile {{ tls.cert_path }}/backend.{{ base_domain }}.pem
18 | SSLCertificateKeyFile {{ tls.cert_private_path }}/backend.{{ base_domain }}.key
19 | Include ssl_backend.conf
20 | {% endif %}
21 |
22 | {% if apache_app_listen_address.all is defined %}
23 | SSLEngine on
24 | SSLCertificateFile {{ tls.cert_path }}/{{ tls_star_cert }}
25 | SSLCertificateKeyFile {{ tls.cert_private_path }}/{{ tls_star_cert_key }}
26 | SSLCertificateChainFile {{ tls.cert_path_ca }}/{{ tls_ca }}
27 | Include ssl_backend.conf
28 | {% endif %}
29 |
30 |
31 |
--------------------------------------------------------------------------------
/scripts/decrypt:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 | set -o pipefail
5 |
6 | CMD="ansible-vault decrypt"
7 |
8 | cat | sed 's/^\s*//' | grep -v '!vault' | $CMD
9 | echo
10 |
11 | exit 0
12 |
--------------------------------------------------------------------------------
/scripts/encrypt:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 | set -o pipefail
5 |
6 | ENV=$1
7 | shift
8 | if [ -z "$ENV" ]
9 | then
10 | echo "Please specify environment"
11 | exit 1
12 | fi
13 |
14 |
15 | SECRET=
16 | if [ -n "$1" ]
17 | then
18 | SECRET=$1
19 | shift
20 | fi
21 |
22 | # aliases
23 | if [ "$ENV" = "prd" ]
24 | then
25 | ENV=prod
26 | fi
27 |
28 | # check if env is correct
29 | if [ "$ENV" != "acc" ] && [ "$ENV" != "prod" ] && [ "$ENV" != "test" ] && [ "$ENV" != "test2" ]
30 | then
31 | echo "Unknown environment '$ENV'"
32 | echo "Acceptable are: acc, prod, test, test2"
33 | exit 1
34 | fi
35 |
36 | CMD="ansible-vault encrypt_string --encrypt-vault-id=$ENV"
37 | if [ -n "$SECRET" ]
38 | then
39 | CMD="$CMD $SECRET"
40 | fi
41 |
42 | $CMD | sed 's/^ \+/ /'
43 | echo
44 |
45 | exit 0
46 |
--------------------------------------------------------------------------------
/scripts/gen_tink_keyset_oidc.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # This script will generate a master key that is used to encrypt all the OIDC keys which are stored in the database
4 | # It will download a java binary and use that to generate the key
5 | # The key will be added to the secrets
6 | # ----- Input handling
7 | if [ $# -lt 1 ]
8 | then
9 | echo "INFO: No arguments supplied, syntax: $BASENAME secretsfile"
10 | exit 1
11 | fi
12 |
13 |
14 | if [ $# -gt 1 ]
15 | then
16 | echo "ERROR: Only 3 arguments expected, syntax: $BASENAME secretsfile"
17 | exit 1
18 | fi
19 |
20 | # ----- End Input handing
21 | #
22 | CURL_BIN=`which curl`
23 | JAVA_BIN=`which java`
24 | OIDC_KEYSET_FILE=$1
25 |
26 | ## First we download the binary to /tmp
27 | $CURL_BIN -o /tmp/crypto-1.0.1-shaded.jar https://build.openconext.org/repository/public/releases/org/openconext/crypto/1.0.1/crypto-1.0.1-shaded.jar
28 | ## Execute it, and send the key to the secrets file
29 | $JAVA_BIN -jar /tmp/crypto-1.0.1-shaded.jar > $OIDC_KEYSET_FILE
30 | ## Clean up the binary
31 | rm /tmp/crypto-1.0.1-shaded.jar
32 |
33 |
34 |
--------------------------------------------------------------------------------
/scripts/prep-dev-env.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | echo "Restarting php-fpm, httpd"
4 | sudo service php72-php-fpm restart
5 | sudo service httpd restart
6 |
7 | echo "Preparing dev env"
8 | (cd /opt/openconext/OpenConext-engineblock && php72 $(which composer) prepare-env)
9 |
--------------------------------------------------------------------------------
/scripts/syntax-check:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
5 |
6 | find . -name '*.j2' -print0 | xargs -0 ${DIR}/syntax-jinja
7 | find . -name '*.yml' -print0 | xargs -0 ${DIR}/syntax-yml
8 | find . -name '*.json' -print0 | xargs -0 ${DIR}/syntax-json
9 |
10 | exit 0
11 |
--------------------------------------------------------------------------------
/scripts/syntax-jinja:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from __future__ import print_function
3 | import jinja2
4 | import sys
5 |
6 | env = jinja2.Environment()
7 | for filename in sys.argv[1:]:
8 | print("Checking {}... ".format(filename), end='')
9 |
10 | try:
11 | with open(filename, encoding='utf-8') as template:
12 | env.parse(template.read())
13 | except Exception as e:
14 | print("failed: {}".format(e))
15 | exit(1)
16 |
17 | print("ok")
18 |
19 | sys.exit(0)
20 |
--------------------------------------------------------------------------------
/scripts/syntax-json:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from __future__ import print_function
3 | import json
4 | import sys
5 |
6 | for filename in sys.argv[1:]:
7 | print("Checking {}... ".format(filename), end='')
8 |
9 | try:
10 | with open(filename, encoding='utf-8') as y:
11 | json.load(y)
12 | except Exception as e:
13 | print("failed: {}".format(e))
14 | exit(1)
15 |
16 | print("ok")
17 |
18 | sys.exit(0)
19 |
20 |
--------------------------------------------------------------------------------
/scripts/syntax-yml:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from __future__ import print_function
3 | import yaml
4 | import sys
5 |
6 | for filename in sys.argv[1:]:
7 | print("Checking {}... ".format(filename), end='')
8 |
9 | try:
10 | with open(filename, encoding='utf-8') as y:
11 | yaml.safe_load(y.read())
12 | except Exception as e:
13 | print("failed: {}".format(e))
14 | exit(1)
15 |
16 | print("ok")
17 |
18 | sys.exit(0)
19 |
20 |
--------------------------------------------------------------------------------