├── .ansible-lint ├── .gitignore ├── .rspec ├── .ruby-version ├── LICENSE.txt ├── README.md ├── Rakefile ├── Vagrantfile ├── ansible.cfg ├── hosts ├── playbooks ├── georchestra.yml └── resources │ └── kibana.yml ├── requirements.yaml ├── roles ├── georchestra │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── cas-standalone.yml │ │ ├── clean.yml │ │ ├── config.yml │ │ ├── datafeeder.yml │ │ ├── gateway.yml │ │ ├── geonetwork.yml │ │ ├── geoserver.yml │ │ ├── gn-cloud-searching.yml │ │ ├── gn-ogc-api-records.yml │ │ ├── main.yml │ │ ├── mviewerstudio.yml │ │ ├── nativelibs.yml │ │ ├── sviewer.yml │ │ ├── war_cadastrapp.yml │ │ └── wars.yml │ └── templates │ │ ├── analytics │ │ └── log4j.properties.j2 │ │ ├── cadastrapp │ │ ├── cadastrapp.properties.j2 │ │ ├── config.json.j2 │ │ ├── logback.xml │ │ └── logback.xml.j2 │ │ ├── cas │ │ ├── cas.properties.j2 │ │ ├── georchestra-cas.service.j2 │ │ └── log4j2.xml.j2 │ │ ├── console │ │ ├── console.properties.j2 │ │ └── log4j.properties.j2 │ │ ├── datafeeder │ │ ├── datafeeder.properties.j2 │ │ ├── datafeeder.service.j2 │ │ └── frontend-config.json.j2 │ │ ├── default.properties.j2 │ │ ├── gateway │ │ ├── datadir │ │ │ ├── application.yaml.j2 │ │ │ ├── gateway.yaml.j2 │ │ │ ├── roles-mappings.yaml.j2 │ │ │ ├── routes.yaml.j2 │ │ │ └── security.yaml.j2 │ │ └── systemd │ │ │ └── gateway.service.j2 │ │ ├── geofence │ │ ├── geofence-datasource-ovr.properties.j2 │ │ └── log4j.xml.j2 │ │ ├── geonetwork │ │ ├── geonetwork.properties.j2 │ │ ├── gn-cloud-searching-application.yml.j2 │ │ ├── gn-cloud-searching.service.j2 │ │ ├── gn-ogc-api-records-config.yml.j2 │ │ ├── gn-ogc-api-records.service.j2 │ │ └── log4j2.xml.j2 │ │ ├── geoserver │ │ └── geofence-geoserver.properties.j2 │ │ ├── geowebcache │ │ └── log4j.properties.j2 │ │ ├── mapstore │ │ ├── localConfig.json.j2 │ │ ├── log4j2.properties.j2 │ │ └── proxy.properties.j2 │ │ ├── mviewerstudio │ │ ├── config.json.j2 │ │ └── mviewerstudio.service.j2 │ │ ├── security-proxy │ │ ├── log4j.properties.j2 │ │ ├── security-proxy.properties.j2 │ │ └── targets-mapping.properties.j2 │ │ └── sviewerConfig.js.j2 ├── nginx │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── datahub.yml │ │ ├── generate_cert.yml │ │ ├── main.yml │ │ ├── mdeditor.yml │ │ └── mviewer.yml │ └── templates │ │ ├── vhost-mviewer.j2 │ │ └── vhost.j2 ├── openldap │ ├── handlers │ │ └── main.yml │ └── tasks │ │ ├── clean.yml │ │ ├── main.yml │ │ └── migrate_groups.yml ├── postgresql │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── cadastrapp.yml │ │ ├── clean.yml │ │ ├── geofence.yml │ │ ├── geonetwork.yml │ │ ├── main.yml │ │ └── other_schemas.yml │ └── vars │ │ └── main.yml ├── superset │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── database.yml │ │ └── main.yml │ └── templates │ │ ├── Overrides.py.j2 │ │ ├── Preconfig.py.j2 │ │ ├── celery-worker.service.j2 │ │ └── superset.service.j2 └── tomcat │ ├── meta │ └── main.yml │ ├── tasks │ ├── cadastrapp.yml │ ├── clean.yml │ ├── common.yml │ ├── instance.yml │ └── main.yml │ └── templates │ ├── config-georchestra.j2 │ ├── config-geoserver.j2 │ ├── config-proxycas.j2 │ ├── server-georchestra.xml.j2 │ ├── server-geoserver.xml.j2 │ ├── server-proxycas.xml.j2 │ ├── tomcat.conf.j2 │ └── tomcat.service.j2 └── spec ├── georchestra └── georchestra_spec.rb └── spec_helper.rb /.ansible-lint: -------------------------------------------------------------------------------- 1 | exclude_paths: 2 | - roles/elastic.elasticsearch 3 | - roles/geerlingguy.kibana 4 | skip_list: 5 | - name[missing] # complains mostly on import_tasks 6 | - name[casing] 7 | - fqcn 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant 2 | **/*.retry 3 | ansible.log 4 | roles/elastic.elasticsearch 5 | roles/geerlingguy.kibana 6 | roles/ansible-role-elasticsearch 7 | -------------------------------------------------------------------------------- /.rspec: -------------------------------------------------------------------------------- 1 | --color 2 | --format documentation 3 | -------------------------------------------------------------------------------- /.ruby-version: -------------------------------------------------------------------------------- 1 | system 2 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015-2016, geOrchestra PSC 2 | 3 | Permission to use, copy, modify, and/or distribute this software for any 4 | purpose with or without fee is hereby granted, provided that the above 5 | copyright notice and this permission notice appear in all copies. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 | WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 | MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 | ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION 12 | OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 13 | CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # geOrchestra-ansible 2 | 3 | A simple [ansible](http://docs.ansible.com) playbook to deploy a fullblown [geOrchestra](http://www.georchestra.org/) SDI instance. 4 | 5 | Right now, this will deploy a working geOrchestra from the **master** branch with mostly default configs and empty databases/datadirs. 6 | 7 | ## Prerequisite 8 | 9 | * Debian Bookworm (12.x) VM 10 | * JAVA11 (could use [AdoptOpenJDK](https://adoptopenjdk.net/) project) 11 | * Token access to allow connection from VM to GitHub and allow to get MapStore2 artifact (see playbook.yml) 12 | 13 | ## setup 14 | 15 | In order to deploy all the middleware and components of a geOrchestra instance, you just need to: 16 | 17 | * use `ansible-galaxy` to install external roles required for geonetwork 4: 18 | ``` 19 | ansible-galaxy install -r requirements.yaml 20 | ``` 21 | 22 | * Clone source : 23 | 24 | `git clone https://github.com/georchestra/ansible.git` 25 | 26 | * setup variables for your own instance in ```playbooks/georchestra.yml``` 27 | 28 | * Open `hosts` file 29 | 30 | * Replace `IP_OF_YOUR_MACHINE` by the IP of a host where you have `ssh-with-passphrase` root access (ideally, an lxc container, a vm, or whatever suits you) to get something like : 31 | 32 | `mygeorchestra ansible_ssh_host=192.xxx.xx.x` 33 | 34 | ... and run: 35 | ``` 36 | ansible-playbook playbooks/georchestra.yml 37 | ``` 38 | 39 | 👉 If you run the playbook with a remote access (ssh) you maybe need to run the playbook with this command : 40 | 41 | `ansible-playbook playbook.yml -i hosts --user= --extra-vars "ansible_sudo_pass="` 42 | 43 | ## additional config 44 | 45 | - **Set MapStore2-georchestra config extensions** 46 | 47 | To import plugins, Tomcat (or Jetty) will need to write into a plugins folder. He can't by default and you get some errors. 48 | In fact, this folder could be the default datadir (/etc/georchestra/datadir) but it's not fully recomended. 49 | > [More details here](http://docs.georchestra.geo-solutions.it/fr/latest/configuration/application/index.html?highlight=extensions#dynamic-files) 50 | 51 | So, we suggest you to set an alternative mapstore plugins directory. To do that, open `/etc/default/tomcat-georchestra` and add this JVM option : 52 | 53 | `-Dgeorchestra.extensions=/target/path/extensions \` 54 | 55 | Don't forget to restart the service next... and be sure this path is writable by tomcat. 56 | 57 | If you really want to use datadir, you have to set correct right to the `/mapstore` directory. 58 | 59 | If an `extensions.json` file is missing, just add it manually with empty `{}` json content. 60 | 61 | 62 | ## cleanup 63 | 64 | If you want to remove/cleanup the webapps, databases, LDAP DIT and datadirs, sub-tasks have been added and can be run using 65 | 66 | ``` 67 | ansible-playbook -t cleanup -e "cleanup=true" playbooks/georchestra.yml 68 | 69 | ``` 70 | Those sub-tasks aren't run by default, you need to specify the `cleanup=true` variable. 71 | 72 | ## example setup with vagrant 73 | 74 | Install the dependencies with: 75 | ``` 76 | sudo apt-get install vagrant virtualbox ansible 77 | ``` 78 | 79 | Optionally, to install the "guest additions", you may run: 80 | ``` 81 | vagrant plugin install vagrant-vbguest 82 | ``` 83 | 84 | Finally, create a virtual machine and provision it (according to the playbook), with: 85 | ``` 86 | vagrant up 87 | ``` 88 | 89 | When the machine is provisioned, you can `vagrant ssh` into it, look around, and once you're done, you may halt the machine (`vagrant halt`) or destroy it (`vagrant destroy`). 90 | 91 | To browse your SDI, just drop a line in your ```/etc/hosts``` file, registering the IP of the VM with the FQDN you declared in the playbook, eg: 92 | ``` 93 | 192.168.0.19 georchestra.example.org 94 | ``` 95 | ... and open https://georchestra.example.org/geonetwork/ in your browser. 96 | 97 | # Serverspec 98 | 99 | a serverspec testsuite is provided to test the vagrant environments Once the box is up (see previous section), 100 | you can test the setup with the following command: 101 | 102 | ``` 103 | $ rake spec 104 | ``` 105 | 106 | This will require the `ruby-serverspec` package to be installed on the host. 107 | 108 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | require 'rake' 2 | require 'rspec/core/rake_task' 3 | 4 | task :spec => 'spec:all' 5 | task :default => :spec 6 | 7 | namespace :spec do 8 | targets = [] 9 | Dir.glob('./spec/*').each do |dir| 10 | next unless File.directory?(dir) 11 | target = File.basename(dir) 12 | target = "_#{target}" if target == "default" 13 | targets << target 14 | end 15 | 16 | task :all => targets 17 | task :default => :all 18 | 19 | targets.each do |target| 20 | original_target = target == "_default" ? target[1..-1] : target 21 | desc "Run serverspec tests to #{original_target}" 22 | RSpec::Core::RakeTask.new(target.to_sym) do |t| 23 | ENV['TARGET_HOST'] = original_target 24 | t.pattern = "spec/#{original_target}/*_spec.rb" 25 | end 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! 5 | VAGRANTFILE_API_VERSION = "2" 6 | 7 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 8 | # All Vagrant configuration is done here. The most common configuration 9 | # options are documented and commented below. For a complete reference, 10 | # please see the online documentation at vagrantup.com. 11 | 12 | # Every Vagrant virtual environment requires a box to build off of. 13 | config.vm.box = "debian/bookworm64" 14 | 15 | # set CPU and RAM 16 | config.vm.provider "virtualbox" do |vb| 17 | vb.customize ["modifyvm", :id, "--memory", "20480"] 18 | vb.customize ["modifyvm", :id, "--cpus", "4"] 19 | end 20 | 21 | # We do not care about security here and want to keep using the default insecure key: 22 | config.ssh.insert_key = false 23 | 24 | # Give a nice name ("georchestra") to the VM: 25 | config.vm.define "georchestra" do |georchestra| 26 | end 27 | 28 | config.vm.provision "ansible" do |ansible| 29 | # execute this playbook for vm provisioning: 30 | ansible.playbook = "playbooks/georchestra.yml" 31 | # display ansible-playbook output: 32 | ansible.verbose = "v" 33 | ansible.groups = { 34 | "mygeorchestra" => ["georchestra"] 35 | } 36 | # If needed to pass arguments to ansible, then you can use the ANSIBLE_ARGS environment variables, e.g.: 37 | # ANSIBLE_ARGS="-t datahub" vagrant provision 38 | ansible.raw_arguments = Shellwords.shellsplit(ENV['ANSIBLE_ARGS']) if ENV['ANSIBLE_ARGS'] 39 | end 40 | 41 | config.vm.post_up_message = "geOrchestra SDI installed, congrats! See https://www.georchestra.org/community.html for help and bug reports" 42 | 43 | end 44 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory=./hosts 3 | roles_path=./roles/ 4 | log_path=ansible.log 5 | display_skipped_hosts = no 6 | display_ok_hosts = no 7 | allow_world_readable_tmpfiles=true 8 | -------------------------------------------------------------------------------- /hosts: -------------------------------------------------------------------------------- 1 | mygeorchestra ansible_ssh_host=IP_OF_YOUR_MACHINE 2 | # To deploy to a remote node, put your IP above, leaving the "mygeorchestra" string as is 3 | -------------------------------------------------------------------------------- /playbooks/georchestra.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: georchestra deployment 3 | hosts: mygeorchestra 4 | # note: above host must match the content of the "hosts" file 5 | become: true 6 | roles: 7 | - { role: georchestra, tags: georchestra } 8 | - { role: ansible-role-elasticsearch, tags: es } 9 | - { role: superset, tags: superset, when: superset.enabled } 10 | - { role: geerlingguy.kibana, tags: kibana } 11 | vars: 12 | georchestra_versions: 13 | # master version 14 | datadir: master # or 24.0, see https://github.com/georchestra/datadir/branches 15 | debian_repository_url: deb [signed-by=/etc/apt/keyrings/packages.georchestra.org.gpg] https://packages.georchestra.org/debian master main # or 24.0.x 16 | georchestra_repository: master # see https://github.com/georchestra/georchestra/branches 17 | geonetwork_datadir: gn4.4.5 # see https://github.com/georchestra/geonetwork_minimal_datadir/branches 18 | geoserver_datadir: master # https://github.com/georchestra/geoserver_minimal_datadir/branches 19 | # 24.0.x 20 | # datadir: 24.0 21 | # debian_repository_url: deb [signed-by=/etc/apt/keyrings/packages.georchestra.org.gpg] https://packages.georchestra.org/debian 24.0.x main 22 | # georchestra_repository: 24.0.x 23 | # geonetwork_datadir: gn4.2.7 24 | # geoserver_datadir: 2.25.0 25 | 26 | java_version: java-17-openjdk-amd64 27 | tomcat_version: 9 28 | kibana_server_host: 127.0.0.1 29 | elasticsearch_version: '7.x' 30 | elasticsearch_log: /srv/elasticsearch/logs 31 | elasticsearch_data: /srv/elasticsearch/data 32 | elasticsearch_extra_options: | 33 | cluster.name: "{{ georchestra.fqdn }}" 34 | bootstrap.memory_lock: true 35 | elasticsearch_heap_size_min: 1g 36 | elasticsearch_heap_size_max: 2g 37 | cadastrapp: 38 | enabled: false 39 | db: 40 | name: georchestra 41 | user: georchestra 42 | schema: cadastrapp 43 | pass: georchestra 44 | qgisdb: 45 | host: localhost 46 | port: 5432 47 | name: georchestra 48 | user: georchestra 49 | pass: georchestra 50 | schema: qadastre 51 | gitrepo: https://github.com/georchestra/cadastrapp 52 | gitversion: master 53 | debsrc: 54 | path: /data/src/georchestra/cadastrapp/cadastrapp/target/ 55 | pkg: georchestra-cadastrapp_99.master.202108020909~80b14a6-1_all.deb 56 | host: build.fluela 57 | workdir: /tmp/cadastrapp/tmp 58 | # Set here your Github token, which should at least have the 'actions' scope 59 | github_action_token: secret 60 | # if deploying an ms2 artifact from gh 61 | # mapstore: { 62 | # enabled: True, 63 | # repo: georchestra/mapstore2-georchestra, 64 | # artifact_id: 119135632, 65 | # artifact_sha256: b2803ecc76a3768fdc5e358f23b5c5ce10b02ddc #git commit hash 66 | # } 67 | openldap: 68 | topdc: georchestra 69 | basedn: dc=georchestra,dc=org # has to be in the form dc={{ topdc }},dc=xx 70 | rootdn: cn=admin,dc=georchestra,dc=org 71 | rootpw: secret 72 | gitrepo: https://raw.github.com/georchestra/georchestra 73 | ldifs: 74 | - bootstrap 75 | - docker-root/georchestraSchema 76 | - docker-root/etc/ldap.dist/modules/groupofmembers 77 | - docker-root/etc/ldap.dist/modules/openssh 78 | - docker-root/memberof 79 | - docker-root/lastbind 80 | - root 81 | - docker-root/georchestra 82 | gitversion: "{{ georchestra_versions.georchestra_repository }}" 83 | 84 | georchestra: 85 | fqdn: georchestra.example.org 86 | max_body_size: 100M 87 | ign_api_key: luvs4p9c4yq5ewfwqcqgm83f # invalid key only used in sviewer 88 | db: 89 | name: georchestra 90 | user: georchestra 91 | pass: georchestra 92 | datadir: 93 | path: /etc/georchestra 94 | gitrepo: https://github.com/georchestra/datadir 95 | gitversion: "{{ georchestra_versions.datadir }}" 96 | debian: 97 | repo: "{{ georchestra_versions.debian_repository_url }}" 98 | key: https://packages.georchestra.org/debian/landry%40georchestra.org.gpg.pubkey 99 | header: 100 | height: 80 101 | script: https://cdn.jsdelivr.net/gh/georchestra/header@dist/header.js 102 | logourl: https://www.georchestra.org/public/georchestra-logo.svg 103 | legacy: "false" 104 | legacyurl: /header/ 105 | # stylesheet: /public/stylesheet.css 106 | # configfile: /public/config.json 107 | geonetwork: 108 | microservice_version: 4.4.6-1 109 | db: 110 | schema: geonetwork 111 | datadir: 112 | path: /srv/data/geonetwork/ 113 | gitrepo: https://github.com/georchestra/geonetwork_minimal_datadir 114 | gitversion: "{{ georchestra_versions.geonetwork_datadir }}" 115 | geoserver: 116 | privileged: 117 | user: geoserver_privileged_user 118 | pass: gerlsSnFd6SmM 119 | datadir: 120 | path: /srv/data/geoserver/ 121 | gitrepo: https://github.com/georchestra/geoserver_minimal_datadir 122 | gitversion: "{{ georchestra_versions.geoserver_datadir }}" 123 | wms_srslist: 124 | - 2154 125 | - 3857 126 | - 3942 127 | - 3943 128 | - 3944 129 | - 3945 130 | - 3946 131 | - 3947 132 | - 3948 133 | - 3949 134 | - 3950 135 | - 4171 136 | - 4258 137 | - 4326 138 | - 23030 139 | - 23031 140 | - 23032 141 | - 32630 142 | - 32631 143 | - 32632 144 | - 4171 145 | - 4271 146 | - 3758 147 | geowebcache_datadir: /srv/data/geowebcache/ 148 | tomcat_keystore_pass: tomcatkstp 149 | tomcat_basedir: /srv/tomcat 150 | system_locale: en_US.UTF-8 151 | logs_basedir: /srv/log 152 | force_https: true # set to false if running behind a reverse proxy that does SSL 153 | # if running behind a reverse proxy, uncomment/fill so that you get the real client ip in accesslogs 154 | #reverse_proxy_real_ip: 10.0.0.1 155 | #reverse_proxy_real_ip_header: X-Forwarded-For 156 | console_adminemail: admin@example.org 157 | console_captcha: 158 | privateKey: "" 159 | publicKey: "" 160 | tomcat_instances: 161 | proxycas: 162 | port: 8180 163 | control_port: 8105 164 | xms: 256m 165 | xmx: 512m 166 | georchestra: 167 | port: 8280 168 | control_port: 8205 169 | xms: 1G 170 | xmx: 2G 171 | geoserver: 172 | port: 8380 173 | control_port: 8305 174 | xms: 1G 175 | xmx: 1G 176 | georchestra_wars: 177 | analytics: 178 | pkg: georchestra-analytics 179 | tomcat: georchestra 180 | enabled: true 181 | cas: 182 | pkg: georchestra-cas 183 | tomcat: proxycas 184 | enabled: true 185 | geonetwork: 186 | pkg: georchestra-geonetwork 187 | tomcat: georchestra 188 | enabled: true 189 | # mapstore: # using a github action artifact 190 | # url: https://api.github.com/repos/{{ mapstore.repo }}/actions/artifacts/{{ mapstore.artifact_id }}/zip 191 | # tomcat: georchestra 192 | # artifact_sha256: "{{ mapstore.artifact_sha256 }}" 193 | # enabled: "{{ mapstore.enabled }}" 194 | mapstore: # using the package from packages.georchestra.org 195 | pkg: georchestra-mapstore 196 | tomcat: georchestra 197 | enabled: true 198 | geoserver: 199 | pkg: georchestra-geoserver 200 | tomcat: geoserver 201 | enabled: true 202 | geowebcache: 203 | pkg: georchestra-geowebcache 204 | tomcat: georchestra 205 | enabled: true 206 | import: 207 | pkg: georchestra-datafeeder-ui 208 | tomcat: georchestra 209 | enabled: true 210 | header: 211 | pkg: georchestra-header 212 | tomcat: georchestra 213 | enabled: true 214 | console: 215 | pkg: georchestra-console 216 | tomcat: georchestra 217 | enabled: true 218 | cadastrapp: 219 | pkg: georchestra-cadastrapp 220 | tomcat: georchestra 221 | enabled: true 222 | ROOT: 223 | pkg: georchestra-security-proxy 224 | tomcat: proxycas 225 | enabled: true 226 | datafeeder: 227 | enabled: true 228 | port: 8480 229 | # not yet, doesnt work standalone ? 230 | # cas: 231 | # pkg: georchestra-cas 232 | # enabled: true 233 | # port: 8980 234 | gn_cloud_searching: 235 | enabled: true 236 | port: 8580 237 | url: https://packages.georchestra.org/bot/wars/geonetwork-microservices/searching.jar 238 | gn_ogc_api_records: 239 | enabled: true 240 | port: 8880 241 | #timeout: 300 242 | url: https://packages.georchestra.org/bot/wars/geonetwork-microservices/gn-cloud-ogc-api-records-service-{{ geonetwork.microservice_version }}.jar 243 | datahub: 244 | enabled: true 245 | url: https://packages.georchestra.org/bot/datahub/datahub.zip 246 | default_api_url: /geonetwork/srv/api # could be set to any other GeoNetwork catalogue, even remote if CORS allows it 247 | mdeditor: 248 | enabled: false 249 | url: https://github.com/geonetwork/geonetwork-ui/releases/download/v2.4.0-alpha.2/metadata-editor-2.4.0-alpha.2.zip 250 | mviewer: 251 | enabled: true 252 | port: 8680 253 | gitrepo: https://github.com/mviewer/mviewer 254 | gitversion: master 255 | mviewerstudio: 256 | enabled: true 257 | port: 8780 258 | gitrepo: https://github.com/mviewer/mviewerstudio 259 | gitversion: master 260 | gateway: 261 | enabled: false 262 | port: 8980 263 | superset: 264 | enabled: true 265 | secretkey: CHANGEME 266 | urlprefix: superset 267 | port: 9080 # gunicorn port 268 | tasks: 269 | - name: reconfigure Kibana after geerlingguy.kibana 270 | copy: 271 | src: resources/kibana.yml 272 | dest: /etc/kibana/kibana.yml 273 | owner: root 274 | group: root 275 | mode: "0644" 276 | notify: restart kibana 277 | 278 | handlers: 279 | - name: restart kibana 280 | service: name=kibana state=restarted 281 | -------------------------------------------------------------------------------- /playbooks/resources/kibana.yml: -------------------------------------------------------------------------------- 1 | server.basePath: "/geonetwork/dashboards" 2 | server.rewriteBasePath: false 3 | kibana.index: ".dashboards" 4 | -------------------------------------------------------------------------------- /requirements.yaml: -------------------------------------------------------------------------------- 1 | # ansible roles from galaxy 2 | - src: https://github.com/jdev-org/ansible-role-elasticsearch 3 | version: 5.1.3 4 | - geerlingguy.kibana,4.0.2 -------------------------------------------------------------------------------- /roles/georchestra/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart all tomcats 3 | service: 4 | name: tomcat@{{ item }} 5 | state: restarted 6 | with_items: "{{ tomcat_instances | list }}" 7 | 8 | - name: restart gn-ogc-api-records 9 | service: 10 | name: gn-ogc-api-records 11 | state: restarted 12 | -------------------------------------------------------------------------------- /roles/georchestra/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: openldap, tags: openldap } 4 | - { role: postgresql, tags: postgresql } 5 | - { role: tomcat, tags: tomcat } 6 | - { role: nginx, tags: nginx } 7 | -------------------------------------------------------------------------------- /roles/georchestra/tasks/cas-standalone.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install the cas package 3 | apt: 4 | name: "{{ cas.pkg }}" 5 | 6 | - name: setup a systemd unit file 7 | template: 8 | src: cas/georchestra-cas.service.j2 9 | dest: /etc/systemd/system/georchestra-cas.service 10 | register: georchestra_cas_unitfile 11 | 12 | - name: start/enable the cas service 13 | systemd: 14 | state: restarted 15 | daemon_reload: true 16 | name: georchestra-cas 17 | when: georchestra_cas_unitfile.changed 18 | -------------------------------------------------------------------------------- /roles/georchestra/tasks/clean.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: remove wars 3 | apt: 4 | pkg: "{{ item.value.pkg }}" 5 | state: absent 6 | purge: true 7 | with_dict: "{{ georchestra_wars }}" 8 | when: item.value.pkg is defined and item.value.enabled 9 | # roles/tomcat/tasks/clean.yml already removed the unzipped wars 10 | 11 | - name: remove datadirs 12 | file: 13 | path: "{{ item }}" 14 | state: absent 15 | with_items: 16 | - "{{ geonetwork.datadir.path }}" 17 | - "{{ geoserver.datadir.path }}" 18 | - "{{ geowebcache_datadir }}" 19 | - "{{ georchestra.datadir.path }}" 20 | 21 | - name: remove non-free and contrib for dependencies (bullseye) 22 | apt_repository: 23 | repo: deb http://ftp.fr.debian.org/debian/ bullseye main non-free contrib 24 | state: absent 25 | when: ansible_distribution_release == "bullseye" 26 | -------------------------------------------------------------------------------- /roles/georchestra/tasks/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: checkout georchestra datadir 3 | git: 4 | dest: "{{ georchestra.datadir.path }}" 5 | repo: "{{ georchestra.datadir.gitrepo }}" 6 | version: "{{ georchestra.datadir.gitversion }}" 7 | force: true 8 | 9 | - name: create cadastrapp subdir 10 | check_mode: false 11 | file: 12 | path: "{{ georchestra.datadir.path }}/cadastrapp" 13 | state: directory 14 | 15 | - name: template config files 16 | template: 17 | src: "{{ item.war }}/{{ item.file }}.j2" 18 | dest: "{{ georchestra.datadir.path }}/{{ item.war }}/{{ item.destdir }}/{{ item.file }}" 19 | backup: true 20 | # debug: var=item 21 | with_items: 22 | - { file: default.properties, war: ., destdir: "" } 23 | - { file: targets-mapping.properties, war: security-proxy, destdir: "" } 24 | - { file: security-proxy.properties, war: security-proxy, destdir: "" } 25 | - { file: log4j.properties, war: security-proxy, destdir: log4j } 26 | - { file: log4j2.xml, war: cas, destdir: config } 27 | - { file: cas.properties, war: cas, destdir: config } 28 | - { file: console.properties, war: console, destdir: "" } 29 | - { file: log4j.properties, war: console, destdir: log4j } 30 | - { file: log4j.properties, war: geowebcache, destdir: log4j } 31 | - { file: log4j2.xml, war: geonetwork, destdir: log4j } 32 | - { file: geonetwork.properties, war: geonetwork, destdir: "" } 33 | - { file: datafeeder.properties, war: datafeeder, destdir: "" } 34 | - { file: frontend-config.json, war: datafeeder, destdir: "" } 35 | # - { file: geofence-geoserver.properties, war: geoserver, destdir: 'WEB-INF/classes' } 36 | - { file: localConfig.json, war: mapstore, destdir: configs } 37 | - { file: log4j2.properties, war: mapstore, destdir: "" } 38 | - { file: proxy.properties, war: mapstore, destdir: "" } 39 | - { file: log4j.properties, war: analytics, destdir: log4j } 40 | - { file: cadastrapp.properties, war: cadastrapp, destdir: "" } 41 | - { file: logback.xml, war: cadastrapp, destdir: "" } 42 | notify: restart all tomcats 43 | when: georchestra.datadir.gitrepo == 'https://github.com/georchestra/datadir' 44 | 45 | - name: Mviewer accessible only if connected 46 | lineinfile: 47 | insertbefore: '' 50 | state: present 51 | when: mviewer.enabled 52 | -------------------------------------------------------------------------------- /roles/georchestra/tasks/datafeeder.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install datafeeder package 3 | apt: 4 | pkg: georchestra-datafeeder 5 | state: latest 6 | 7 | - name: setup a systemd unit file 8 | template: 9 | src: datafeeder/datafeeder.service.j2 10 | dest: /etc/systemd/system/datafeeder.service 11 | register: df_unitfile 12 | 13 | - name: start/enable the datafeeder service 14 | systemd: 15 | state: restarted 16 | daemon_reload: true 17 | enabled: true 18 | name: datafeeder 19 | when: df_unitfile.changed 20 | -------------------------------------------------------------------------------- /roles/georchestra/tasks/gateway.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add the adoptium signing key 3 | apt_key: 4 | url: https://packages.adoptium.net/artifactory/api/gpg/key/public 5 | state: present 6 | 7 | - name: Set up the adoptium debian repository 8 | apt_repository: 9 | repo: deb https://packages.adoptium.net/artifactory/deb/ bookworm main 10 | state: present 11 | 12 | - name: Install temurin-21-jre 13 | apt: 14 | pkg: temurin-21-jre 15 | state: latest 16 | 17 | - name: alternatives - back to java 17 as default jvm 18 | community.general.alternatives: 19 | name: java 20 | path: /usr/lib/jvm/java-17-openjdk-amd64/bin/java 21 | 22 | - name: install the georchestra-gateway package 23 | apt: 24 | pkg: georchestra-gateway 25 | state: latest 26 | 27 | - name: template a configuration into the datadir 28 | template: 29 | src: gateway/datadir/{{ item }}.yaml.j2 30 | dest: /etc/georchestra/gateway/{{ item }}.yaml 31 | with_items: 32 | - application 33 | - gateway 34 | - roles-mappings 35 | - routes 36 | - security 37 | 38 | - name: setup a systemd unit file 39 | template: 40 | src: gateway/systemd/gateway.service.j2 41 | dest: /etc/systemd/system/gateway.service 42 | register: gw_unitfile 43 | 44 | - name: start/enable the georchestra-gateway service 45 | systemd: 46 | state: restarted 47 | daemon_reload: true 48 | name: gateway 49 | when: gw_unitfile.changed 50 | -------------------------------------------------------------------------------- /roles/georchestra/tasks/geonetwork.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: checkout geonetwork datadir 3 | become: true 4 | become_user: tomcat 5 | git: 6 | dest: "{{ geonetwork.datadir.path }}" 7 | repo: "{{ geonetwork.datadir.gitrepo }}" 8 | version: "{{ geonetwork.datadir.gitversion }}" 9 | force: true 10 | -------------------------------------------------------------------------------- /roles/georchestra/tasks/geoserver.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set useRelativeRedirects for tomcat geoserver 3 | replace: 4 | dest: "{{ tomcat_basedir }}/geoserver/conf/context.xml" 5 | regexp: 6 | replace: 7 | 8 | - name: checkout geoserver datadir 9 | become: true 10 | become_user: tomcat 11 | git: 12 | dest: "{{ geoserver.datadir.path }}" 13 | repo: "{{ geoserver.datadir.gitrepo }}" 14 | version: "{{ geoserver.datadir.gitversion }}" 15 | force: true 16 | 17 | - name: fix geoserver logging path 18 | become: true 19 | become_user: tomcat 20 | replace: 21 | dest: "{{ geoserver.datadir.path }}/logging.xml" 22 | regexp: /tmp/geoserver.log 23 | replace: {{ logs_basedir }}/geoserver.log 24 | 25 | - name: set list of advertised wms srs 26 | become: true 27 | become_user: tomcat 28 | lineinfile: 29 | dest: "{{ geoserver.datadir.path }}/wms.xml" 30 | insertafter: 31 | line: {{ geoserver.wms_srslist | join('') }} 32 | 33 | - name: fix my_ldap config for usergroup/roles 34 | tags: fixgsldap 35 | become: true 36 | become_user: tomcat 37 | replace: 38 | dest: "{{ geoserver.datadir.path }}/security/{{ item.1 }}/my_ldap/config.xml" 39 | regexp: "{{ item.0.pat }}" 40 | replace: "{{ item.0.val }}" 41 | backup: true 42 | with_nested: 43 | - - pat: .* 44 | val: "{{ openldap.rootdn }}" 45 | - pat: .* 46 | val: "{{ openldap.rootpw }}" 47 | - pat: ou=(\w+),.* 48 | val: ou=\1,{{ openldap.basedn }} 49 | - pat: .* 50 | val: "member=uid={0},ou=users,{{ openldap.basedn }}" 51 | - pat: .* 52 | val: "ou=users,{{ openldap.basedn }}" 53 | - [role, usergroup] 54 | 55 | # alternative could be to checkout the geofence branch of geoserver_minimal_datadir 56 | # - name: enforce geofence auth 57 | # become: yes 58 | # become_user: tomcat 59 | # lineinfile: 60 | # dest: '{{ geoserver.datadir.path }}/security/auth/default/config.xml' 61 | # regexp: ' org.geoserver.security.auth.UsernamePasswordAuthenticationProvider' 62 | # line: ' it.geosolutions.geoserver.authentication.auth.GeofenceAuthenticationProvider' 63 | 64 | - name: create geowebcache datadir 65 | file: 66 | dest: "{{ geowebcache_datadir }}" 67 | owner: tomcat 68 | state: directory 69 | -------------------------------------------------------------------------------- /roles/georchestra/tasks/gn-cloud-searching.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install the gn-cloud-searching microservice 3 | get_url: 4 | url: "{{ gn_cloud_searching.url }}" 5 | dest: /usr/share/lib/searching.jar 6 | 7 | - name: templating a configuration file 8 | tags: config 9 | template: 10 | src: geonetwork/gn-cloud-searching-application.yml.j2 11 | dest: /etc/georchestra/geonetwork/microservices/searching/config.yml 12 | 13 | - name: setup a systemd unit file 14 | tags: systemd 15 | template: 16 | src: geonetwork/gn-cloud-searching.service.j2 17 | dest: /etc/systemd/system/gn-cloud-searching.service 18 | register: gn_cloud_searching_unitfile 19 | 20 | - name: start/enable the gn-cloud-searching service 21 | tags: systemd 22 | systemd: 23 | state: restarted 24 | daemon_reload: true 25 | enabled: true 26 | name: gn-cloud-searching 27 | when: gn_cloud_searching_unitfile.changed 28 | -------------------------------------------------------------------------------- /roles/georchestra/tasks/gn-ogc-api-records.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: fetch the gn-ogc-api-records microservice 3 | get_url: 4 | url: "{{ gn_ogc_api_records.url }}" 5 | dest: "/usr/share/lib/gn-cloud-ogc-api-records-service-{{ geonetwork.microservice_version }}.jar" 6 | 7 | - name: symlink the microservice to the path used by the systemd service 8 | file: 9 | src: "gn-cloud-ogc-api-records-service-{{ geonetwork.microservice_version }}.jar" 10 | dest: /usr/share/lib/gn-ogc-api-records.jar 11 | state: link 12 | force: yes 13 | 14 | - name: template the configuration file 15 | tags: config 16 | template: 17 | src: geonetwork/gn-ogc-api-records-config.yml.j2 18 | dest: /etc/georchestra/geonetwork/microservices/ogc-api-records/config.yml 19 | notify: restart gn-ogc-api-records 20 | 21 | - name: setup a systemd unit file 22 | tags: systemd 23 | template: 24 | src: geonetwork/gn-ogc-api-records.service.j2 25 | dest: /etc/systemd/system/gn-ogc-api-records.service 26 | register: gn_ogc_api_records_unitfile 27 | 28 | - name: start/enable the gn-ogc-api-records service 29 | tags: systemd 30 | systemd: 31 | state: restarted 32 | daemon_reload: true 33 | enabled: true 34 | name: gn-ogc-api-records 35 | when: gn_ogc_api_records_unitfile.changed 36 | -------------------------------------------------------------------------------- /roles/georchestra/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create datadirs 3 | tags: wars 4 | file: 5 | path: "{{ item }}" 6 | state: directory 7 | owner: tomcat 8 | group: tomcat 9 | with_items: 10 | - "{{ geonetwork.datadir.path }}" 11 | - "{{ geoserver.datadir.path }}" 12 | - "{{ geowebcache_datadir }}" 13 | 14 | - import_tasks: config.yml 15 | tags: config 16 | 17 | - import_tasks: sviewer.yml 18 | tags: sviewer 19 | 20 | - import_tasks: war_cadastrapp.yml 21 | tags: cadastrapp_war 22 | when: cadastrapp.enabled 23 | 24 | - import_tasks: geoserver.yml 25 | tags: geoserver 26 | 27 | - import_tasks: geonetwork.yml 28 | tags: geonetwork 29 | 30 | - import_tasks: wars.yml 31 | tags: wars 32 | 33 | - import_tasks: datafeeder.yml 34 | tags: datafeeder 35 | when: datafeeder.enabled 36 | 37 | - import_tasks: gn-cloud-searching.yml 38 | tags: gn-cloud-searching 39 | when: gn_cloud_searching.enabled 40 | 41 | - import_tasks: gn-ogc-api-records.yml 42 | tags: gn-ogc-api-records 43 | when: gn_ogc_api_records.enabled 44 | 45 | - import_tasks: cas-standalone.yml 46 | tags: cas 47 | when: cas is defined and cas.enabled and tomcat_version == 10 48 | 49 | - import_tasks: gateway.yml 50 | tags: gateway 51 | when: gateway is defined and gateway.enabled 52 | 53 | - import_tasks: nativelibs.yml 54 | tags: nativelibs 55 | 56 | - import_tasks: mviewerstudio.yml 57 | tags: mviewerstudio 58 | when: mviewerstudio.enabled 59 | 60 | - import_tasks: clean.yml 61 | tags: [cleanup, georchestra_cleanup] 62 | when: cleanup is defined 63 | -------------------------------------------------------------------------------- /roles/georchestra/tasks/mviewerstudio.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install MviewerStudio dependencies 3 | apt: 4 | pkg: 5 | - git 6 | - libxslt1-dev 7 | - libxml2-dev 8 | - python3 9 | - python3-pip 10 | - python3-virtualenv 11 | state: present 12 | 13 | - name: Create a georchestra group 14 | become: yes 15 | group: 16 | name: georchestra 17 | state: present 18 | 19 | - name: Create user for mviewer and mviewerstudio 20 | become: yes 21 | user: 22 | name: mviewer 23 | group: georchestra 24 | state: present 25 | 26 | - name: Checkout MviewerStudio 27 | git: 28 | repo: "{{ mviewerstudio.gitrepo }}" 29 | version: "{{ mviewerstudio.gitversion }}" 30 | dest: /srv/apps/mviewerstudio/ 31 | 32 | - name: Create folder for generated Mviewer conf create by MviewerStudio 33 | file: 34 | path: "/srv/data/mviewer/apps/{{ item }}" 35 | state: directory 36 | owner: mviewer 37 | group: georchestra 38 | with_items: 39 | - store 40 | - prod 41 | 42 | - name: Create a symbolic link for Mviewer 43 | file: 44 | src: "/srv/data/mviewer/apps/{{ item }}" 45 | dest: "/var/www/mviewer/apps/{{ item }}" 46 | state: link 47 | with_items: 48 | - store 49 | - prod 50 | 51 | - name: Configure Mviewerstudio access only if connected 52 | lineinfile: 53 | insertbefore: '' 56 | state: present 57 | 58 | - name: Copy MviewerStudio static ressource 59 | copy: 60 | src: "/srv/apps/mviewerstudio/{{ item }}" 61 | dest: /srv/apps/mviewerstudio/srv/python/mviewerstudio_backend/static/ 62 | remote_src: true 63 | with_items: 64 | - css 65 | - img 66 | - js 67 | - lib 68 | - index.html 69 | - mviewerstudio.i18n.json 70 | 71 | - name: Create folder for mviewerstudio frontend configuration 72 | file: 73 | path: "/srv/apps/mviewerstudio/srv/python/mviewerstudio_backend/static/apps/" 74 | state: directory 75 | 76 | - name: Template MviewerStudio config 77 | template: 78 | src: mviewerstudio/config.json.j2 79 | dest: /srv/apps/mviewerstudio/srv/python/mviewerstudio_backend/static/apps/config.json 80 | 81 | - name: Create logdir for mviewerstudio 82 | file: 83 | path: "{{ logs_basedir }}/mviewer" 84 | state: directory 85 | owner: mviewer 86 | group: georchestra 87 | 88 | - name: Install MviewerStudio service requirements 89 | pip: 90 | requirements: /srv/apps/mviewerstudio/srv/python/requirements.txt 91 | virtualenv: /srv/apps/mviewerstudio/srv/python/.venv 92 | 93 | - name: Template MviewerStudio systemd unit 94 | tags: systemd_unit 95 | template: 96 | src: mviewerstudio/mviewerstudio.service.j2 97 | dest: /etc/systemd/system/mviewerstudio.service 98 | 99 | - name: Reload systemd 100 | tags: systemd_unit 101 | systemd: 102 | enabled: true 103 | daemon-reload: true 104 | name: mviewerstudio.service 105 | 106 | - name: Start mviewerstudio 107 | service: 108 | name: mviewerstudio 109 | state: started -------------------------------------------------------------------------------- /roles/georchestra/tasks/nativelibs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: enable non-free and contrib for dependencies 3 | apt_repository: 4 | repo: deb http://deb.debian.org/debian bullseye main contrib 5 | 6 | # libgdal-java does not exist anymore in bullseye 7 | # See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=947960 8 | - name: install runtime dependencies 9 | apt: 10 | pkg: [ttf-mscorefonts-installer, gdal-bin] 11 | state: present 12 | 13 | # - name: fetch libjpeg-turbo deb from sourceforge 14 | # get_url: dest=/tmp/ url=http://sourceforge.net/projects/libjpeg-turbo/files/1.4.0/libjpeg-turbo-official_1.4.0_amd64.deb 15 | 16 | # - name: install libjpeg-turbo deb 17 | # apt: deb=/tmp/libjpeg-turbo-official_1.4.0_amd64.deb 18 | 19 | - name: point georchestra's shared.loader to catalina.base/shared/*.jar 20 | lineinfile: 21 | dest: "{{ tomcat_basedir }}/georchestra/conf/catalina.properties" 22 | regexp: shared.loader=${catalina.home}/shared/classes,${catalina.home}/shared/*.jar,/var/lib/tomcat{{ tomcat_version }}/shared/classes,/var/lib/tomcat{{ tomcat_version 23 | }}/shared/*.jar 24 | line: shared.loader=${catalina.home}/shared/classes,${catalina.base}/shared/*.jar,/var/lib/tomcat{{ tomcat_version }}/shared/classes,/var/lib/tomcat{{ tomcat_version 25 | }}/shared/*.jar 26 | 27 | - name: create tomcat shared dir 28 | file: 29 | dest: "{{ tomcat_basedir }}/georchestra/shared/" 30 | state: directory 31 | -------------------------------------------------------------------------------- /roles/georchestra/tasks/sviewer.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: checkout sviewer 3 | git: 4 | repo: https://github.com/georchestra/sviewer.git 5 | dest: /var/www/georchestra/htdocs/sviewer/ 6 | version: master 7 | 8 | - name: template config 9 | template: 10 | src: sviewerConfig.js.j2 11 | dest: /var/www/georchestra/htdocs/sviewer/etc/customConfig.js 12 | backup: true 13 | -------------------------------------------------------------------------------- /roles/georchestra/tasks/war_cadastrapp.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: copy cadastrapp deb 3 | check_mode: false 4 | synchronize: 5 | src: "{{ cadastrapp.debsrc.path }}/{{ cadastrapp.debsrc.pkg }}" 6 | dest: /tmp/{{ cadastrapp.debsrc.pkg }} 7 | delegate_to: "{{ cadastrapp.debsrc.host }}" 8 | 9 | - name: install cadastrapp deb 10 | apt: 11 | deb: /tmp/{{ cadastrapp.debsrc.pkg }} 12 | state: present 13 | 14 | - name: remove deb 15 | file: 16 | path: /tmp/{{ cadastrapp.debsrc.pkg }} 17 | state: absent 18 | -------------------------------------------------------------------------------- /roles/georchestra/tasks/wars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install apt-transport-https 3 | tags: apt_repo 4 | apt: 5 | pkg: apt-transport-https 6 | state: present 7 | 8 | - name: add georchestra repository key 9 | tags: apt_repo 10 | apt_key: 11 | keyring: /etc/apt/keyrings/packages.georchestra.org.gpg 12 | url: "{{ georchestra.debian.key }}" 13 | 14 | - name: add georchestra debian repo 15 | tags: apt_repo 16 | apt_repository: 17 | repo: "{{ georchestra.debian.repo }}" 18 | 19 | - name: install debian packages 20 | apt: 21 | pkg: "{{ item.value.pkg }}" 22 | update_cache: true 23 | state: latest # noqa: package-latest 24 | with_dict: "{{ georchestra_wars }}" 25 | when: item.value.enabled and item.key != 'cadastrapp' and item.value.pkg is defined 26 | 27 | - name: install the wars defined by an url 28 | get_url: 29 | url: "{{ item.value.url }}" 30 | dest: /usr/share/lib/georchestra-{{ item.key }}.zip 31 | mode: "0644" 32 | headers: 33 | Authorization: Bearer {{ github_action_token }} 34 | with_dict: "{{ georchestra_wars }}" 35 | when: item.value.enabled and item.value.url is defined 36 | 37 | - name: unzips the war 38 | unarchive: 39 | src: /usr/share/lib/georchestra-{{ item.key }}.zip 40 | dest: /usr/share/lib 41 | remote_src: true 42 | with_dict: "{{ georchestra_wars }}" 43 | when: item.value.enabled and item.value.url is defined 44 | 45 | - name: creating symlinks for downloaded webapps 46 | file: 47 | owner: tomcat 48 | src: /usr/share/lib/{{ item.key }}{% if item.value.artifact_sha256 is defined %}-{{ item.value.artifact_sha256 }}{% endif %}.war 49 | dest: "{{ tomcat_basedir }}/{{ item.value.tomcat }}/webapps/{{ item.key }}.war" 50 | state: link 51 | force: true 52 | with_dict: "{{ georchestra_wars }}" 53 | when: item.value.enabled and item.value.url is defined 54 | 55 | - name: removes the downloaded archive 56 | file: 57 | path: /usr/share/lib/georchestra-{{ item.key }}.zip 58 | state: absent 59 | with_dict: "{{ georchestra_wars }}" 60 | when: item.value.enabled and item.value.url is defined 61 | 62 | - name: symlink webapps in each tomcat instance 63 | file: 64 | owner: tomcat 65 | src: /usr/share/lib/{{ item.value.pkg }}/{{ item.key }}{% if item.key != 'import' and item.key != 'geowebcache' %}-generic{% endif %}.war 66 | dest: "{{ tomcat_basedir }}/{{ item.value.tomcat }}/webapps/{{ item.key }}.war" 67 | state: link 68 | force: true 69 | with_dict: "{{ georchestra_wars }}" 70 | when: item.value.enabled and item.value.pkg is defined and (item.key != 'cadastrapp' or (item.key == 'cadastrapp' and cadastrapp.enabled)) 71 | 72 | # instances need to be started so that webapps are deployed 73 | - name: start instances 74 | service: 75 | name: tomcat@{{ item }} 76 | state: started 77 | with_items: "{{ tomcat_instances | list }}" 78 | -------------------------------------------------------------------------------- /roles/georchestra/templates/analytics/log4j.properties.j2: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=WARN, R 2 | 3 | log4j.logger.org.georchestra.analytics=WARN, R 4 | 5 | log4j.appender.R = org.apache.log4j.rolling.RollingFileAppender 6 | log4j.appender.R.RollingPolicy = org.apache.log4j.rolling.TimeBasedRollingPolicy 7 | log4j.appender.R.RollingPolicy.FileNamePattern = {{ logs_basedir }}/analytics.%d.log.gz 8 | log4j.appender.R.RollingPolicy.ActiveFileName = {{ logs_basedir }}/analytics.log 9 | log4j.appender.R.Append = true 10 | log4j.appender.R.layout = org.apache.log4j.PatternLayout 11 | log4j.appender.R.layout.ConversionPattern = %d{yyyy-MM-dd HH:mm:ss} %c{1} [%p] %m%n 12 | 13 | -------------------------------------------------------------------------------- /roles/georchestra/templates/cadastrapp/cadastrapp.properties.j2: -------------------------------------------------------------------------------- 1 | ## Configuration file for cadastrapp 2 | 3 | ## Two modes are available on this addons 4 | ## 0 - All user, even connected user can search in any commune in database 5 | ## (result with CNIL authorization > 0 will be filtered) 6 | ## 1 - Connected user can only search and view result on their geographical limitation, 7 | ## not connected user can search without geographical limitation (result with CNIL authorization > 0 will be filtered) 8 | ## (see https://github.com/georchestra/cadastrapp/blob/master/script/commun/user/README.md) 9 | user.search.are.filtered=1 10 | 11 | # Database schema name 12 | schema.name={{ cadastrapp.db.schema }} 13 | 14 | ## CNIL ROLE NAME for ldap role. Only Cnil 1 and Cnil 2 make some limitation 15 | cnil1RoleName=ROLE_EL_CAD_CNIL1 16 | cnil2RoleName=ROLE_EL_CAD_CNIL2 17 | 18 | ## Separator in sec-roles http header, 19 | # before version 15.06 of georchestra sec-roles are separated by a , after it's a ; 20 | roleSeparator=; 21 | 22 | ## Min char number that is required to make a search in database 23 | ## in link with the min char in combox need before calling webapp 24 | minNbCharForSearch=3 25 | 26 | # PDF generation 27 | pdf.imageHeight=550 28 | pdf.imageWidth=550 29 | pdf.dateValiditeDonneesMajic=01/01/2016 30 | pdf.dateValiditeDonneesEDIGEO=01/01/2016 31 | pdf.organisme=Un service du CRAIG 32 | 33 | ## Use to create image for pdf on server side (could be use for client configuration as well) 34 | ## if baseMap URL is empty no image will be added in pdf 35 | #baseMap.wms.url=https://osm.geobretagne.fr/service/wms?VERSION=1.1.1&Request=GetCapabilities&Service=WMS 36 | baseMap.wms.url= 37 | baseMap.layer.name=osm:roads 38 | baseMap.format=image/png 39 | baseMap.SRS=EPSG:3857 40 | # only used when wms service need authentification 41 | # if empty no authentification is used 42 | baseMap.wms.username= 43 | baseMap.wms.password= 44 | 45 | ## information about WMS and WFS service 46 | # Here you can configure the layer name and with field contains the parcelle Id depending if you are on Qgis or Arcopole model 47 | cadastre.wms.url=https://{{ georchestra.fqdn }}/geoserver/wms 48 | cadastre.wms.layer.name=qgis:parcelle_bati 49 | # only used when wms service need authentification 50 | # if empty no authentification is used 51 | cadastre.wms.username= 52 | cadastre.wms.password= 53 | 54 | cadastre.wfs.url=https://{{ georchestra.fqdn }}/geoserver/wfs 55 | cadastre.wfs.layer.name=qgis:geo_parcelle 56 | # only used when wfs service need authentification 57 | # if empty no authentification is used 58 | # if authentification is used make sure to use https service with certificate declared in tomcat 59 | cadastre.wfs.username= 60 | cadastre.wfs.password= 61 | 62 | 63 | ## use in the addon to check WFS fieldname to be search 64 | cadastre.layer.idParcelle=geo_parcelle 65 | cadastre.format=image/png 66 | cadastre.SRS=EPSG:3857 67 | 68 | ## used in inner call service to create image for pdf from fo file 69 | # This service does need to go throw the security proxy 70 | webapp.url.services=http://localhost:{{ tomcat_instances[georchestra_wars['ROOT'].tomcat]['port'] }}/cadastrapp/services/ 71 | 72 | ## Temp folder with write acces to create image and temporary files needed for pdf generation 73 | ## This folder should be writtable by tomcat user 74 | tempFolder={{ cadastrapp.workdir }} 75 | 76 | # Minimum string length to be tested before trying to launch service 77 | parcelleId.length=14 78 | cgoCommune.length=6 79 | 80 | # Maximun number of object request 81 | maxRequest=8 82 | -------------------------------------------------------------------------------- /roles/georchestra/templates/cadastrapp/config.json.j2: -------------------------------------------------------------------------------- 1 | [{ 2 | "id": "cadastrapp_0", 3 | "name": "Cadastrapp", 4 | "enabled" :true, 5 | "preloaded": true, 6 | "title": { 7 | "fr": "Cadastrapp", 8 | "en": "Cadastrapp", 9 | "es": "Cadastrapp", 10 | "de": "Cadastrapp" 11 | }, 12 | "description": { 13 | "fr": "Une serie d'outils pour exploiter pleinement les donnees cadastrales de la DGFiP", 14 | "en": "A series of tools to fully exploit the cadastral data DGFiP", 15 | "es": "Una serie de herramientas para aprovechar al maximo los datos catastrales DGFiP", 16 | "de": "TODO" }, 17 | "options": { 18 | "target": "tbar_12", 19 | "webapp":{ 20 | "url" : "/cadastrapp/" 21 | }, 22 | "WMSLayer":{ 23 | "layerNameInPanel":"Cadastre", 24 | "transparent":true, 25 | "format": "image/png" 26 | }, 27 | "WFSLayerSetting": { 28 | "request" : "getfeature", 29 | "version" : "1.0.0", 30 | "service" : "wfs", 31 | "outputFormat" : "application/json", 32 | "geometryField":"geom" 33 | }, 34 | "defautStyleParcelle" :{ 35 | "strokeColor": "#000000", 36 | "strokeWidth":"0.5", 37 | "pointRadius": 6, 38 | "pointerEvents": "visiblePainted", 39 | "fontSize": "10px" 40 | }, 41 | "selectedStyle" : { 42 | "defautColor":"#AAAAAA", 43 | "opacity":"0.4", 44 | "colorState1":"#FFFF00", 45 | "colorState2":"#81BEF7", 46 | "colorState3":"#57D53B", 47 | "strokeWidth":"3" 48 | }, 49 | "popup" : { 50 | "timeToShow": 2000, 51 | "minZoom":14 52 | } 53 | } 54 | }] 55 | -------------------------------------------------------------------------------- /roles/georchestra/templates/cadastrapp/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | %d %-5level %logger{36} - %msg%n 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | /tmp/cadastrapp.log 35 | 36 | %d [%thread] %-5level /%X{uri} - %X{sec-username:-nouser} - %X{sec-roles:-norole} - %X{sec-org:-noorg} -%logger{36} - %msg%n 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /roles/georchestra/templates/cadastrapp/logback.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | %d %-5level %logger{36} - %msg%n 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | {{ logs_basedir }}/cadastrapp.log 35 | 36 | {{ logs_basedir }}/cadastrapp.%d.log.gz 37 | 38 | 39 | %d [%thread] %-5level /%X{uri} - %X{sec-username:-nouser} - %X{sec-roles:-norole} - %X{sec-org:-noorg} -%logger{36} - %msg%n 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /roles/georchestra/templates/cas/cas.properties.j2: -------------------------------------------------------------------------------- 1 | cas.server.name=https://{{ georchestra.fqdn }} 2 | cas.server.prefix=${cas.server.name}/cas 3 | 4 | logging.config=file:/etc/georchestra/cas/config/log4j2.xml 5 | 6 | cas.theme.param-name=georchestra 7 | cas.theme.default-theme-name=georchestra 8 | 9 | cas.service-registry.core.init-from-json=false 10 | cas.service-registry.json.location=file:/etc/georchestra/cas/services 11 | #uncomment if getting 302 redirects on cas.{css,js} behind nginx/apache 12 | #server.forward-headers-strategy=FRAMEWORK 13 | 14 | cas.authn.accept.enabled=false 15 | 16 | cas.authn.ldap[0].ldap-url=ldap://localhost:389/ 17 | cas.authn.ldap[0].bind-dn= 18 | cas.authn.ldap[0].bind-credential= 19 | 20 | cas.authn.ldap[0].base-dn={{ openldap.basedn }} 21 | cas.authn.ldap[0].subtree-search=true 22 | cas.authn.ldap[0].search-filter=uid={user} 23 | cas.authn.ldap[0].page-size=0 24 | 25 | cas.authn.ldap[0].pool-passivator=NONE 26 | cas.authn.ldap[0].connection-strategy= 27 | cas.authn.ldap[0].connect-timeout=PT5S 28 | cas.authn.ldap[0].disable-pooling=false 29 | cas.authn.ldap[0].min-pool-size=3 30 | cas.authn.ldap[0].max-pool-size=10 31 | cas.authn.ldap[0].validate-on-checkout=true 32 | cas.authn.ldap[0].validate-periodically=true 33 | cas.authn.ldap[0].validate-period=PT5M 34 | cas.authn.ldap[0].validate-timeout=PT5S 35 | cas.authn.ldap[0].fail-fast=true 36 | cas.authn.ldap[0].idle-time=PT10M 37 | cas.authn.ldap[0].prune-period=PT2H 38 | cas.authn.ldap[0].block-wait-time=PT3S 39 | 40 | cas.authn.ldap[0].use-start-tls=false 41 | cas.authn.ldap[0].response-timeout=PT5S 42 | cas.authn.ldap[0].allow-multiple-dns=false 43 | cas.authn.ldap[0].allow-multiple-entries=false 44 | cas.authn.ldap[0].follow-referrals=false 45 | cas.authn.ldap[0].binary-attributes=jpegPhoto 46 | cas.authn.ldap[0].name= 47 | 48 | cas.authn.ldap[0].type=DIRECT 49 | cas.authn.ldap[0].dn-format=uid=%s,ou=users,{{ openldap.basedn }} 50 | cas.authn.oidc.jwks.file-system.jwks-file=file:///tmp/keystore.jwksdown 51 | 52 | cas.authn.saml-idp.core.entity-id=https://${FQDN}/idp 53 | cas.authn.saml-idp.metadata.file-system.location=file:///tmp/ 54 | -------------------------------------------------------------------------------- /roles/georchestra/templates/cas/georchestra-cas.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=geOrchestra georchestra-cas webservice 3 | After=syslog.target 4 | 5 | [Service] 6 | User=tomcat 7 | ExecStart=/usr/lib/jvm/{{ java_version }}/bin/java -Dserver.ssl.keyStore=file:/etc/ssl/certs/java/cacerts -Dserver.ssl.keyStorePassword=changeit -Dserver.ssl.keyPassword=changeit -Dserver.port={{ cas.port }} -Dgeorchestra.datadir={{ georchestra.datadir.path }} -Dlog4j2.configurationFile={{ georchestra.datadir.path }}/cas/config/log4j2.xml -Dcas.standalone.configurationDirectory={{ georchestra.datadir.path }}/cas/config -jar /usr/share/lib/georchestra-cas/cas.war 8 | Environment=JAVA_OPTS=-Dfile.encoding=UTF-8 9 | Environment=LANG={{ system_locale }} 10 | SuccessExitStatus=143 11 | StandardOutput=append:{{ logs_basedir }}/georchestra-cas.log 12 | StandardError=append:{{ logs_basedir }}/georchestra-cas.log 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /roles/georchestra/templates/cas/log4j2.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | {{ logs_basedir }} 6 | info 7 | warn 8 | info 9 | warn 10 | debug 11 | warn 12 | warn 13 | warn 14 | warn 15 | warn 16 | warn 17 | 18 | 19 | 20 | 21 | 22 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 116 | 117 | 118 | 119 | 120 | -------------------------------------------------------------------------------- /roles/georchestra/templates/console/log4j.properties.j2: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------ 2 | # 3 | # The following properties set the logging levels and log appender. The 4 | # log4j.rootLogger variable defines the default log level and one or more 5 | # appenders. For the console, use 'S'. For the daily rolling file, use 'R'. 6 | # For an HTML formatted log, use 'H'. 7 | # 8 | # To override the default (rootLogger) log level, define a property of the 9 | # form (see below for available values): 10 | # 11 | # log4j.logger. = 12 | # 13 | # Possible Log Levels: 14 | # FATAL, ERROR, WARN, INFO, DEBUG 15 | # 16 | #------------------------------------------------------------------------------ 17 | log4j.rootLogger=WARN, R 18 | 19 | log4j.logger.org.georchestra.console=WARN, R 20 | log4j.logger.org.georchestra.console.ws.utils=INFO, R 21 | 22 | log4j.appender.R = org.apache.log4j.rolling.RollingFileAppender 23 | log4j.appender.R.RollingPolicy = org.apache.log4j.rolling.TimeBasedRollingPolicy 24 | log4j.appender.R.RollingPolicy.FileNamePattern = {{ logs_basedir }}/console.%d.log.gz 25 | log4j.appender.R.RollingPolicy.ActiveFileName = {{ logs_basedir }}/console.log 26 | log4j.appender.R.Append = true 27 | log4j.appender.R.layout = org.apache.log4j.PatternLayout 28 | log4j.appender.R.layout.ConversionPattern = %d{yyyy-MM-dd HH:mm:ss} %c{1} [%p] %m%n 29 | -------------------------------------------------------------------------------- /roles/georchestra/templates/datafeeder/datafeeder.properties.j2: -------------------------------------------------------------------------------- 1 | ### The following properties are inherited from the geOrchestra default.properties, 2 | ### if you want to override them for datafeeder, uncomment them. 3 | 4 | # PostgreSQL server domain name 5 | # pgsqlHost=database 6 | 7 | # PostgreSQL server port 8 | # pgsqlPort=5432 9 | 10 | # PostgreSQL database name 11 | # pgsqlDatabase=georchestra 12 | 13 | # User to connect to PostgreSQL server 14 | # pgsqlUser=georchestra 15 | 16 | # Password to connect to PostgreSQL server 17 | # pgsqlPassword=georchestra 18 | 19 | #################################### 20 | # Datafeeder specific properties # 21 | #################################### 22 | 23 | publicUrl=https://${domainName} 24 | 25 | # pgsqlSchema=datafeeder 26 | 27 | # maximum size allowed for uploaded files. (e.g. 128MB, GB can't be used, only KB or MB) 28 | file-upload.max-file-size=512MB 29 | # maximum size allowed for multipart/form-data requests (e.g. 128MB, GB can't be used, only KB or MB) 30 | file-upload.max-request-size=512MB 31 | # size threshold after which files will be written to disk. 32 | file-upload.file-size-threshold=1MB 33 | # directory location where files will be stored by the servlet container once the request exceeds the {@link #fileSizeThreshold} 34 | file-upload.temporary-location=${java.io.tmpdir}/datafeeder/tmp 35 | # directory location where files will be stored. 36 | file-upload.persistent-location=${java.io.tmpdir}/datafeeder/uploads 37 | # select the file to serve as the front-end application configuration 38 | front-end.config.uri=file:${georchestra.datadir}/datafeeder/frontend-config.json 39 | 40 | datafeeder.publishing.geoserver.public-url=https://${domainName}/geoserver 41 | # Use this for HTTP basic authentication to geoserver api url: 42 | #datafeeder.publishing.geoserver.api-url=https://${domainName}/geoserver/rest 43 | #datafeeder.publishing.geoserver.auth.type=basic 44 | #datafeeder.publishing.geoserver.auth.basic.username=geoserver_privileged_user 45 | #datafeeder.publishing.geoserver.auth.basic.password=gerlsSnFd6SmM 46 | # Use this for HTTP-headers based authentication to GeoServer's api url: 47 | datafeeder.publishing.geoserver.api-url=http://localhost:{{ tomcat_instances['geoserver']['port'] }}/geoserver/rest 48 | datafeeder.publishing.geoserver.auth.type=headers 49 | datafeeder.publishing.geoserver.auth.headers.[sec-proxy]=true 50 | datafeeder.publishing.geoserver.auth.headers.[sec-username]=idatafeeder 51 | datafeeder.publishing.geoserver.auth.headers.[sec-roles]=ROLE_ADMINISTRATOR;ROLE_GN_ADMIN 52 | 53 | 54 | datafeeder.publishing.geonetwork.api-url=http://localhost:{{ tomcat_instances['georchestra']['port'] }}/geonetwork 55 | datafeeder.publishing.geonetwork.public-url=https://${domainName}/geonetwork 56 | # Use this for HTTP basic authentication to Geonetwork's api url: 57 | #datafeeder.publishing.geonetwork.auth.type=basic 58 | #datafeeder.publishing.geonetwork.auth.basic.username= 59 | #datafeeder.publishing.geonetwork.auth.basic.password= 60 | # Use this for HTTP-headers based authentication to Geonetwork's api url: 61 | datafeeder.publishing.geonetwork.auth.type=headers 62 | datafeeder.publishing.geonetwork.auth.headers.[sec-proxy]=true 63 | datafeeder.publishing.geonetwork.auth.headers.[sec-username]=idatafeeder 64 | datafeeder.publishing.geonetwork.auth.headers.[sec-org]=geOrchestra 65 | datafeeder.publishing.geonetwork.auth.headers.[sec-roles]=ROLE_ADMINISTRATOR;ROLE_GN_ADMIN 66 | # This is odd, apparently any UUID works as XSRF token, and these two need to be set 67 | datafeeder.publishing.geonetwork.auth.headers.[X-XSRF-TOKEN]=c9f33266-e242-4198-a18c-b01290dce5f1 68 | datafeeder.publishing.geonetwork.auth.headers.[Cookie]=XSRF-TOKEN=c9f33266-e242-4198-a18c-b01290dce5f1 69 | 70 | #template-record-id, an existing geonetwork record id to use as template. If provided, takes precedence over template-record 71 | #datafeeder.publishing.geonetwork.template-record-id: 72 | datafeeder.publishing.geonetwork.template-record: file:${georchestra.datadir}/datafeeder/metadata_template.xml 73 | datafeeder.publishing.geonetwork.template-transform: file:${georchestra.datadir}/datafeeder/metadata_transform.xsl 74 | # you can override the resource type used in the metadata by uncommenting this line: 75 | # possible values are here: 76 | # https://github.com/geonetwork/core-geonetwork/blob/main/schemas/iso19139/src/main/plugin/iso19139/schema/resources/Codelist/gmxCodelists.xml#L1538-L1646 77 | # defaults to dataset 78 | #datafeeder.publishing.geonetwork.defaultResourceType=dataset 79 | 80 | 81 | # GeoTools DataStore connection params used inside datafeeder when importing 82 | # uploaded datasets to the target store 83 | datafeeder.publishing.backend.local.dbtype=postgis 84 | datafeeder.publishing.backend.local.host=${pgsqlHost} 85 | datafeeder.publishing.backend.local.port=${pgsqlPort} 86 | datafeeder.publishing.backend.local.database=${pgsqlDatabase} 87 | # is a placeholder to be replaced by the actual schema computed from the "sec-org" request header 88 | datafeeder.publishing.backend.local.schema= 89 | datafeeder.publishing.backend.local.user=${pgsqlUser} 90 | datafeeder.publishing.backend.local.passwd=${pgsqlPassword} 91 | datafeeder.publishing.backend.local.preparedStatements=true 92 | 93 | # GeoTools DataStore connection params to be used when configuring a GeoServer 94 | # datastore to access the imported datasets 95 | datafeeder.publishing.backend.geoserver.dbtype=postgis 96 | datafeeder.publishing.backend.geoserver.preparedStatements=true 97 | # is a placeholder to be replaced by the actual schema computed from the "sec-org" request header 98 | datafeeder.publishing.backend.geoserver.schema= 99 | # is a placeholder to be replaced by the lowercase "sec-org" request header 100 | datafeeder.publishing.backend.geoserver.workspacename= 101 | # is a placeholder to be replaced by the lowercase "sec-org" request header prefixed by 'datafeeder_' 102 | datafeeder.publishing.backend.geoserver.storename= 103 | #datafeeder.publishing.backend.geoserver.jndiReferenceName=jdbc/datafeeder 104 | #if a JNDI data source is configured in geoserver, uncomment the above line and comment out the following ones 105 | datafeeder.publishing.backend.geoserver.host=${pgsqlHost} 106 | datafeeder.publishing.backend.geoserver.port=${pgsqlPort} 107 | datafeeder.publishing.backend.geoserver.database=${pgsqlDatabase} 108 | datafeeder.publishing.backend.geoserver.user=${pgsqlUser} 109 | datafeeder.publishing.backend.geoserver.passwd=${pgsqlPassword} 110 | 111 | # note how to set a property with spaces: property.prefix.[name\ with\ spaces]=value 112 | datafeeder.publishing.backend.geoserver.[Loose\ bbox]=false 113 | datafeeder.publishing.backend.geoserver.[Estimated\ extends]=true 114 | 115 | 116 | #datafeeder.email.send=true #not used yet 117 | datafeeder.email.ackTemplate=file:${georchestra.datadir}/datafeeder/templates/analysis-started-email-template.txt 118 | datafeeder.email.analysisFailedTemplate=file:${georchestra.datadir}/datafeeder/templates/analysis-failed-email-template.txt 119 | datafeeder.email.publishFailedTemplate=file:${georchestra.datadir}/datafeeder/templates/data-publishing-failed-email-template.txt 120 | datafeeder.email.publishSuccessTemplate=file:${georchestra.datadir}/datafeeder/templates/data-publishing-succeeded-email-template.txt 121 | 122 | #administratorEmail=georchestra@georchestra.mydomain.org 123 | 124 | # Configuration for SMTP email sending of application events 125 | # Datafeeder will send emails to the user when a job is started, finished, or failed, 126 | # if the spring.mail.* configuration properties are set. 127 | 128 | # already defined in ../default.properties: 129 | #smtpHost=localhost 130 | #smtpPort=25 131 | # not defined in ../default.properties: 132 | smtpUser= 133 | smtpPassword= 134 | smtpAuth=false 135 | smtpTLS=false 136 | 137 | # Example smtp config for gmail: 138 | #smtpHost=smtp.gmail.com 139 | #smtpPort=587 140 | #smtpUser=${administratorEmail} 141 | #smtpPassword=changeme 142 | #smtpAuth=true 143 | #smtpTLS=true 144 | 145 | spring.mail.host=${smtpHost} 146 | spring.mail.port=${smtpPort} 147 | spring.mail.username=${smtpUser:} 148 | spring.mail.password=${smtpPassword:} 149 | spring.mail.protocol=smtp 150 | spring.mail.test-connection=${smtpTest:true} 151 | spring.mail.properties.mail.smtp.auth=${smtpAuth:false} 152 | spring.mail.properties.mail.smtp.starttls.enable=${smtpTLS:false} 153 | 154 | # Uncomment these properties if you encounter weird redirections to the internal 155 | # service name when hitting https://.../datafeeder/" ; it will force spring 156 | # to build the "Location" http header using the 'X-Forwarded-*' ones. 157 | #server.forward-headers-strategy=FRAMEWORK 158 | #server.servlet.context-path=/datafeeder 159 | 160 | # Example for gmail: 161 | # make sure 2-step verification is turned off: https://support.google.com/accounts/answer/1064203?hl=en 162 | # and Allow Less Secure App turnes ON: https://myaccount.google.com/lesssecureapps 163 | #spring.mail.host=smtp.gmail.com 164 | #spring.mail.port=587 165 | #spring.mail.username: noreply.georchestra.dev@gmail.com 166 | #spring.mail.password: ***** 167 | #spring.mail.protocol: smtp 168 | #spring.mail.test-connection: true 169 | #spring.mail.properties.mail.smtp.auth: true 170 | #spring.mail.properties.mail.smtp.starttls.enable: true 171 | -------------------------------------------------------------------------------- /roles/georchestra/templates/datafeeder/datafeeder.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=geOrchestra datafeeder backend 3 | After=syslog.target 4 | 5 | [Service] 6 | User=www-data 7 | ExecStart=/usr/bin/java -Dserver.port={{ datafeeder.port }} -jar /usr/share/lib/georchestra-datafeeder/datafeeder-bin.jar 8 | SuccessExitStatus=143 9 | StandardOutput=append:{{ logs_basedir }}/datafeeder.log 10 | StandardError=append:{{ logs_basedir }}/datafeeder.log 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /roles/georchestra/templates/datafeeder/frontend-config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "encodings": [ 3 | { 4 | "label": "UTF-8", 5 | "value": "UTF-8" 6 | }, 7 | { 8 | "label": "ISO-8859-1", 9 | "value": "ISO-8859-1" 10 | } 11 | ], 12 | "projections": [ 13 | { 14 | "label": "WGS84", 15 | "value": "EPSG:4326" 16 | }, 17 | { 18 | "label": "Lambert 93", 19 | "value": "EPSG:2154" 20 | }, 21 | { 22 | "label": "Web Mercator", 23 | "value": "EPSG:3857" 24 | } 25 | ], 26 | "thesaurusUrl": "https://{{ georchestra.fqdn }}/geonetwork/srv/api/registries/vocabularies/search?type=CONTAINS&thesaurus=external.theme.httpinspireeceuropaeutheme-theme&rows=200&q=${q}&uri=**&lang=${lang}" 27 | } 28 | -------------------------------------------------------------------------------- /roles/georchestra/templates/default.properties.j2: -------------------------------------------------------------------------------- 1 | # This file holds some property shared across all geOrchestra webapps 2 | # All properties in this file MUST be present. Do not comment any of them, they 3 | # do not have a default value. Adapt them according to the needs of your 4 | # instance. 5 | 6 | # Domain name of the geOrchestra instance 7 | # URL must not include the trailing slash 8 | # Once modified, adapt the following files accordingly: 9 | # - cas/cas.properties 10 | # - ... 11 | # or replace all the strings with `sed` (see README.md) 12 | domainName={{ georchestra.fqdn }} 13 | 14 | # Name of this geOrchestra instance 15 | instanceName=geOrchestra 16 | 17 | # Default language 18 | language=en 19 | 20 | # Header script for web component header 21 | # https://github.com/georchestra/header 22 | headerScript={{ georchestra.header.script | default('https://cdn.jsdelivr.net/gh/georchestra/header@dist/header.js') }} 23 | 24 | # Header height (size in px) 25 | # If different from default value "90", adapt analytics/js/GEOR_custom.js 26 | # accordingly 27 | headerHeight={{ georchestra.header.height | default('80') }} 28 | 29 | # Variable use to keep the old iframe header. 30 | # Set headerUrl accordingly 31 | # Default false 32 | useLegacyHeader={{ georchestra.header.legacy | default ('false') }} 33 | 34 | # Header URL (can be absolute or relative). It is used only if useLegacyHeader is set to true. 35 | # If different from default value "/header/", adapt 36 | # security-proxy/targets-mapping.properties accordingly 37 | # default /header/ 38 | headerUrl={{ georchestra.header.legacyurl | default ('/header/') }} 39 | 40 | # Stylesheet used to override default colors of header 41 | # More design can be set by overriding default classes & styles 42 | # Default empty string 43 | # georchestraStylesheet=/public/stylesheet.css 44 | {% if georchestra.header.stylesheet is defined %} 45 | georchestraStylesheet={{ georchestra.header.stylesheet }} 46 | {% endif %} 47 | 48 | # Config file for the header webcomponent 49 | # You can use this file to change the menu design, add new items, etc. 50 | # Full configuration can be found here: https://github.com/georchestra/header/ 51 | # Default empty string 52 | # headerConfigFile=/public/config.json 53 | {% if georchestra.header.configfile is defined %} 54 | headerConfigFile={{ georchestra.header.configfile }} 55 | {% endif %} 56 | 57 | # Logo URL 58 | # Used to set header's logo. 59 | logoUrl={{ georchestra.header.logourl | default('https://www.georchestra.org/public/georchestra-logo.svg') }} 60 | 61 | 62 | # Administrator email 63 | # Default email address used to send and receive mails in console 64 | # See the corresponding properties files to override this email 65 | # address for specific needs. 66 | administratorEmail={{ console_adminemail }} 67 | 68 | ### PostgreSQL properties 69 | 70 | # PostgreSQL server domain name 71 | # Domain name, or IP address, of the PostgreSQL server 72 | pgsqlHost=localhost 73 | 74 | # PostgreSQL server port 75 | # Listening port of the PostgreSQL server 76 | pgsqlPort=5432 77 | 78 | # PostgreSQL database name 79 | # Default common PostgreSQL database for all geOrchestra modules 80 | pgsqlDatabase={{ georchestra.db.name }} 81 | 82 | # User to connect to PostgreSQL server 83 | # Default common PostgreSQL user for all geOrchestra modules 84 | pgsqlUser={{ georchestra.db.user }} 85 | 86 | # Password to connect to PostgreSQL server 87 | # Default common password of PostgreSQL user for all geOrchestra modules 88 | pgsqlPassword={{ georchestra.db.pass }} 89 | 90 | ### rabbitmq properties 91 | 92 | # Activate/desactivate Rabbitmq 93 | enableRabbitmqEvents=false 94 | 95 | # rabbitmq server domain name 96 | rabbitmqHost=localhost 97 | 98 | # rabbitmq user 99 | rabbitmqUser=georchestra 100 | 101 | # rabbitmq password 102 | rabbitmqPassword=georchestra 103 | 104 | # rabbitmq port 105 | rabbitmqPort=5672 106 | 107 | ### LDAP properties 108 | 109 | # LDAP server domain name 110 | # Domain name, or IP address, of the LDAP server 111 | ldapHost=localhost 112 | 113 | # LDAP server port 114 | # Listening port of the LDAP server 115 | ldapPort=389 116 | 117 | # LDAP Scheme 118 | # ldap or ldaps 119 | ldapScheme=ldap 120 | 121 | # Base DN of the LDAP directory 122 | # Base Distinguished Name of the LDAP directory. Also named root or suffix, see 123 | # http://www.zytrax.com/books/ldap/apd/index.html#base 124 | ldapBaseDn={{ openldap.basedn }} 125 | 126 | # Administrator DN 127 | # Distinguished name of the administrator user that connects to the LDAP server 128 | ldapAdminDn={{ openldap.rootdn }} 129 | 130 | # Administrator password 131 | # Password of the administrator user that connects to the LDAP server 132 | ldapAdminPassword={{ openldap.rootpw }} 133 | 134 | # Users RDN 135 | # Relative distinguished name of the "users" LDAP organization unit. E.g. if the 136 | # complete name (or DN) is ou=users,dc=georchestra,dc=org, the RDN is ou=users. 137 | ldapUsersRdn=ou=users 138 | 139 | # Roles RDN 140 | # Relative distinguished name of the "roles" LDAP organization unit. E.g. if the 141 | # complete name (or DN) is ou=roles,dc=georchestra,dc=org, the RDN is ou=roles. 142 | ldapRolesRdn=ou=roles 143 | 144 | # Organizations RDN 145 | # Relative distinguished name of the "orgs" LDAP organization unit. E.g. if the 146 | # complete name (or DN) is ou=orgs,dc=georchestra,dc=org, the RDN is ou=orgs. 147 | ldapOrgsRdn=ou=orgs 148 | 149 | ### SMTP properties 150 | 151 | # SMTP server domain name 152 | # Domain name, or IP address, of the SMTP server 153 | smtpHost=localhost 154 | 155 | # SMTP server domain name 156 | # Listening port of the SMTP server 157 | smtpPort=25 158 | 159 | # Activates analytics 160 | # WARNING : When using the geOrchestra gateway, analytics should be disabled 161 | # if set to true, the analytics app will be enabled (https://github.com/georchestra/georchestra/tree/master/analytics) 162 | # console will add all links to analytics and also execute XHR requests 163 | # header will diplay analytics app 164 | # default: true 165 | analyticsEnabled=false 166 | -------------------------------------------------------------------------------- /roles/georchestra/templates/gateway/datadir/application.yaml.j2: -------------------------------------------------------------------------------- 1 | # Spring-boot/cloud application configuration. 2 | # 3 | # Here you can override the spring configuration provided by the Gateway 4 | # by default. 5 | # 6 | # See here for the default provided configuration: 7 | # https://github.com/georchestra/georchestra-gateway/blob/main/gateway/src/main/resources/application.yml 8 | 9 | server: 10 | reactive: 11 | session: 12 | timeout: 1440m # 24h 13 | -------------------------------------------------------------------------------- /roles/georchestra/templates/gateway/datadir/gateway.yaml.j2: -------------------------------------------------------------------------------- 1 | # Default georchestra gateway specific configuration, always imported from application.yml 2 | # configure target base URL's, headers and role based access, per service name. 3 | # Replaces security-proxy's targets-mapping.properties, headers-mapping.properties, and security-mappings.xml 4 | # 5 | spring.config.import: application.yaml, security.yaml, routes.yaml, roles-mappings.yaml 6 | 7 | georchestra: 8 | gateway: 9 | # Redirection URL after a successful logout : Defaults to /?logout 10 | # logoutUrl : "/?logout" 11 | default-headers: 12 | # Default security headers to append to proxied requests 13 | proxy: true 14 | username: true 15 | roles: true 16 | org: true 17 | orgname: true 18 | global-access-rules: 19 | - intercept-url: /testPage 20 | anonymous: false 21 | allowed-roles: USER 22 | - intercept-url: 23 | - "/**" 24 | - "/proxy/?url=*" 25 | anonymous: true 26 | services: 27 | header: 28 | target: ${georchestra.gateway.services.header.target} 29 | access-rules: 30 | - intercept-url: /header/** 31 | anonymous: true 32 | analytics: 33 | target: ${georchestra.gateway.services.analytics.target} 34 | access-rules: 35 | - intercept-url: /analytics/** 36 | allowed-roles: SUPERUSER,ORGADMIN 37 | datafeeder: 38 | target: ${georchestra.gateway.services.datafeeder.target} 39 | headers: 40 | json-user: true 41 | json-organization: true 42 | access-rules: 43 | - intercept-url: /datafeeder/** 44 | anonymous: false 45 | allowed-roles: SUPERUSER,IMPORT 46 | import: 47 | target: ${georchestra.gateway.services.import.target} 48 | access-rules: 49 | - intercept-url: /import/** 50 | anonymous: false 51 | allowed-roles: SUPERUSER,IMPORT 52 | console: 53 | target: ${georchestra.gateway.services.console.target} 54 | access-rules: 55 | - intercept-url: 56 | - /console/public/** 57 | - /console/manager/public/** 58 | #/console/account resources are private except account/new and account/passwordRecovery 59 | - /console/account/new 60 | - /console/account/newPassword 61 | - /console/account/passwordRecovery 62 | - /console/account/js/** 63 | - /console/account/css/** 64 | - /console/account/fonts/** 65 | - /console/testPage 66 | anonymous: true 67 | - intercept-url: 68 | - /console/private/** 69 | - /console/manager/** 70 | - /console/*/emails 71 | - /console/*/sendEmail # /console/sendEmail features are reserved to SUPERUSER & delegated admins 72 | - /console/*/emailTemplates 73 | - /console/attachments 74 | allowed-roles: SUPERUSER,ORGADMIN 75 | - intercept-url: /console/emailProxy #activated for members having the EMAILPROXY role 76 | allowed-roles: EMAILPROXY 77 | - intercept-url: /console/internal/** 78 | allowed-roles: SUPERUSER 79 | - intercept-url: /console/account/** 80 | anonymous: false 81 | geonetwork: 82 | target: ${georchestra.gateway.services.geonetwork.target} 83 | headers: 84 | proxy: true 85 | username: false 86 | roles: false 87 | org: false 88 | orgname: true 89 | json-user: true 90 | geoserver: 91 | target: ${georchestra.gateway.services.geoserver.target} 92 | -------------------------------------------------------------------------------- /roles/georchestra/templates/gateway/datadir/roles-mappings.yaml.j2: -------------------------------------------------------------------------------- 1 | # Configuration file allowing to extend the list of security role names 2 | # assigned to a user, from the role names extracted by the authentication 3 | # provider (e.g. LDAP, Oauth2, OpenID Connect). 4 | # 5 | # Limited regular expression support: only the * character is allowed 6 | # as a wildcard on a source role name. For example, the following mapping 7 | # will add the `ROLE_USER` role to all authenticated users that already 8 | # have any role name starting with `ROLE_GP.GDI.` 9 | # 10 | # Note that for the key names (source roles) to include special characters, 11 | # you must use the format '[role.name.*]' for the literal string role.name.* 12 | # to be interpreted correctly. 13 | # 14 | #georchestra: 15 | # gateway: 16 | # role-mappings: 17 | # '[ROLE_GP.GDI.*]' 18 | # - ROLE_USER 19 | # 20 | # If an authentication provider role name matches multiple mappings, 21 | # all the matching additional roles will be appended. For example, the 22 | # following mappings will add both `ROLE_USER` and `ROLE_ADMINISTRATOR` 23 | # to a user with role `ROLE_GP.GDI.ADMINISTRATOR`, but only `ROLE_USER` 24 | # to any other with a role starting with `ROLE_GP.GDI.`: 25 | #georchestra: 26 | # gateway: 27 | # role-mappings: 28 | # '[ROLE_GP.GDI.*]': 29 | # - ROLE_USER 30 | # '[ROLE_GP.GDI.ADMINISTRATOR]': 31 | # - ROLE_ADMINISTRATOR 32 | -------------------------------------------------------------------------------- /roles/georchestra/templates/gateway/datadir/routes.yaml.j2: -------------------------------------------------------------------------------- 1 | # File included by gateway.yaml to set up the spring-gateway's routes 2 | # target service URL's are defined in gateway.yaml (e.g. georchestra.gateway.services..target) 3 | 4 | spring: 5 | cloud: 6 | gateway: 7 | routes: 8 | - id: root 9 | uri: ${georchestra.gateway.services.header.target} 10 | predicates: 11 | - Path=/ 12 | filters: 13 | - RedirectTo=308, /header 14 | - id: header 15 | uri: ${georchestra.gateway.services.header.target} 16 | predicates: 17 | - Path=/header,/header/** 18 | - id: analytics 19 | uri: ${georchestra.gateway.services.analytics.target} 20 | predicates: 21 | - Path=/analytics/** 22 | - id: console 23 | uri: ${georchestra.gateway.services.console.target} 24 | predicates: 25 | - Path=/console/** 26 | - id: geonetwork 27 | uri: ${georchestra.gateway.services.geonetwork.target} 28 | predicates: 29 | - Path=/geonetwork/** 30 | filters: 31 | - name: CookieAffinity 32 | args: 33 | name: XSRF-TOKEN 34 | from: /geonetwork 35 | to: /datahub 36 | - id: geoserver 37 | uri: ${georchestra.gateway.services.geoserver.target} 38 | predicates: 39 | - Path=/geoserver/** 40 | - id: geowebcache 41 | uri: ${georchestra.gateway.services.geowebcache.target} 42 | predicates: 43 | - Path=/geowebcache/** 44 | - id: mapstore 45 | uri: ${georchestra.gateway.services.mapstore.target} 46 | predicates: 47 | - Path=/mapstore/** 48 | - id: datafeeder 49 | uri: ${georchestra.gateway.services.datafeeder.target} 50 | predicates: 51 | - Path=/datafeeder/** 52 | - id: import 53 | uri: ${georchestra.gateway.services.import.target} 54 | predicates: 55 | - Path=/import/** 56 | filters: 57 | - RewritePath=/import/(?.*),/$\{segment} 58 | - id: datahub 59 | uri: ${georchestra.gateway.services.datahub.target} 60 | predicates: 61 | - Path=/datahub/** 62 | - id: ogc-api-records 63 | uri: ${georchestra.gateway.services.ogc-api-records.target} 64 | predicates: 65 | - Path=/ogc-api-records/** 66 | 67 | georchestra.gateway.services: 68 | analytics.target: http://localhost:8280/analytics/ 69 | console.target: http://localhost:8280/console/ 70 | datafeeder.target: http://localhost:8480/datafeeder/ 71 | datahub.target: http://localhost:8280/datahub/ 72 | geonetwork.target: http://localhost:8280/geonetwork/ 73 | geoserver.target: http://localhost:8380/geoserver/ 74 | geowebcache.target: http://localhost:8280/geowebcache/ 75 | header.target: http://localhost:8280/header/ 76 | import.target: http://localhost:8280/import/ 77 | mapstore.target: http://localhost:8280/mapstore/ 78 | ogc-api-records.target: http://localhost:8480/ogc-api-records/ 79 | -------------------------------------------------------------------------------- /roles/georchestra/templates/gateway/datadir/security.yaml.j2: -------------------------------------------------------------------------------- 1 | georchestra: 2 | gateway: 3 | security: 4 | createNonExistingUsersInLDAP: true 5 | enableRabbitmqEvents: true 6 | oauth2: 7 | enabled: false 8 | ldap: 9 | default: 10 | enabled: true 11 | extended: true 12 | url: ldap://${ldapHost:localhost}:${ldapPort:389}/ 13 | baseDn: ${ldapBaseDn:dc=georchestra,dc=org} 14 | adminDn: ${ldapAdminDn:cn=admin,dc=georchestra,dc=org"} 15 | adminPassword: ${ldapAdminPassword:secret} 16 | users: 17 | rdn: ${ldapUsersRdn:ou=users} 18 | searchFilter: ${ldapUserSearchFilter:(uid={0})} 19 | pendingUsersSearchBaseDN: ou=pendingusers 20 | protectedUsers: geoserver_privileged_user 21 | roles: 22 | rdn: ${ldapRolesRdn:ou=roles} 23 | searchFilter: ${ldapRolesSearchFilter:(member={0})} 24 | orgs: 25 | rdn: ${ldapOrgsRdn:ou=orgs} 26 | protectedRoles: ADMINISTRATOR, GN_.*, ORGADMIN, REFERENT, USER, SUPERUSER 27 | # uncomment for oauth 2.0 28 | #spring: 29 | # security: 30 | # oauth2: 31 | # client: 32 | # registration: 33 | # cas-oauth2: 34 | # client-id: external-oauth2 35 | # client-secret: aaaa 36 | # authorization-grant-type: authorization_code 37 | # redirect-uri: "{baseUrl}/login/oauth2/code/cas-oauth2" 38 | # provider: 39 | # cas-oauth2: 40 | # authorization-uri: https://georchestra.mydomain.org/cas/oauth2.0/authorize 41 | # token-uri: https://georchestra.mydomain.org/cas/oauth2.0/accessToken 42 | # user-info-uri: https://georchestra.mydomain.org/cas/oauth2.0/profile 43 | # userNameAttribute: id 44 | -------------------------------------------------------------------------------- /roles/georchestra/templates/gateway/systemd/gateway.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=geOrchestra gateway service 3 | After=syslog.target 4 | 5 | [Service] 6 | User=www-data 7 | ExecStart=/usr/lib/jvm/temurin-21-jre-amd64/bin/java -Dserver.port={{ gateway.port }} -Dgeorchestra.datadir=/etc/georchestra -jar /usr/share/lib/georchestra-gateway/georchestra-gateway.jar 8 | SuccessExitStatus=143 9 | StandardOutput=append:{{ logs_basedir }}/gateway.log 10 | StandardError=append:{{ logs_basedir }}/gateway.log 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /roles/georchestra/templates/geofence/geofence-datasource-ovr.properties.j2: -------------------------------------------------------------------------------- 1 | #updated by config's GenerateConfig class 2 | #Tue Jan 13 11:17:13 CET 2015 3 | geofenceGlobalConfiguration.baseLayerStyle= 4 | geofenceGlobalConfiguration.baseLayerFormat=image/png 5 | geofenceEntityManagerFactory.jpaPropertyMap[hibernate.validator.autoregister_listeners]=false 6 | geofenceGlobalConfiguration.mapProjection=EPSG\:3857 7 | geofenceDataSource.url=jdbc\:postgresql\://localhost\:5432/{{ georchestra.db.name }} 8 | geofenceEntityManagerFactory.jpaPropertyMap[hibernate.hbm2ddl.auto]=update 9 | geofenceGlobalConfiguration.baseLayerName=osm:google 10 | geofenceDataSource.driverClassName=org.postgresql.Driver 11 | geofenceGlobalConfiguration.baseLayerTitle=OpenStreetMap 12 | geofenceGlobalConfiguration.mapCenterLon=273950.30933606 13 | geofenceEntityManagerFactory.jpaPropertyMap[hibernate.default_schema]=geofence 14 | geofenceDataSource.password={{ georchestra.db.pass }} 15 | geofenceGlobalConfiguration.mapCenterLat=5901246.3506556 16 | geofenceVendorAdapter.databasePlatform=org.hibernatespatial.postgis.PostgisDialect 17 | geofenceGlobalConfiguration.mapMaxResolution=156543.03390625 18 | geofenceGlobalConfiguration.mapZoom=4 19 | geofenceEntityManagerFactory.jpaPropertyMap[javax.persistence.validation.mode]=none 20 | geofenceEntityManagerFactory.jpaPropertyMap[hibernate.validator.apply_to_ddl]=false 21 | geofenceGlobalConfiguration.baseLayerURL=http://osm.geobretagne.fr/service/wms 22 | geofenceGlobalConfiguration.mapMaxExtent=-20037508.34,-20037508.34,20037508.34,20037508.34 23 | geofenceDataSource.username={{ georchestra.db.user }} 24 | -------------------------------------------------------------------------------- /roles/georchestra/templates/geofence/log4j.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | {{ logs_basedir }} 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 16 | 17 | %d{ABSOLUTE} %5p %c{1}:%L - %m%n 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /roles/georchestra/templates/geonetwork/geonetwork.properties.j2: -------------------------------------------------------------------------------- 1 | # GeoNetwork datadir location: 2 | geonetwork.dir={{ geonetwork.datadir.path }} 3 | # Note that it can/should be overriden at runtime with -Dgeonetwork.dir=/path/to/... 4 | 5 | # GeoNetwork datadir subdirectories are embedded by default, 6 | # but they can be eventually relocated elsewhere, eg to use geonetwork-provided 7 | # schemas: 8 | # geonetwork.schema.dir=/var/lib/jetty/webapps/geonetwork/WEB-INF/data/config/schema_plugins/ 9 | geonetwork.config.dir=${geonetwork.dir}/config/ 10 | geonetwork.schema.dir=${geonetwork.config.dir}/schema_plugins/ 11 | geonetwork.lucene.dir=${geonetwork.dir}/index/ 12 | geonetwork.thesauri.dir=${geonetwork.config.dir}/codelist/ 13 | geonetwork.data.dir=${geonetwork.dir}/data/metadata_data/ 14 | geonetwork.svn.dir=${geonetwork.dir}/data/metadata_subversion/ 15 | geonetwork.resources.dir=${geonetwork.dir}/data/resources/ 16 | geonetwork.upload.dir=${geonetwork.dir}/data/upload/ 17 | geonetwork.formatter.dir=${geonetwork.dir}/data/formatter/ 18 | geonetwork.htmlcache.dir=${geonetwork.resources.dir}/htmlcache/ 19 | geonetwork.indexConfig.dir=${geonetwork.config.dir}/index/ 20 | 21 | # AuthNZ integration uses console's REST API to fetch the canonical 22 | # users and groups, regardless of where geOrchestra gets them from (LDAP or otherwise) 23 | georchestra.console.url=http://localhost:{{ tomcat_instances.georchestra.port }} 24 | 25 | # Group synchronization mode. 26 | # Defines whether to synchronize GeoNetwork Groups with Georchestra Organizations or Roles. 27 | # Allowed values are 'orgs' and 'roles'. Defaults to 'orgs', meaning each synchronized 28 | # User will be matched to one GeoNetwork Group, which in turn matches the user's organization. 29 | # A value of 'roles' means GeoNetwork Groups will be synchronized with Georchestra roles instead 30 | # of organizations, and Users will be synchronized so that they belong to all the Groups that match 31 | # its roles 32 | geonetwork.syncMode=orgs 33 | 34 | # If using 'roles' sync mode, a Java regular expression can be used to filter 35 | # which Georchestra roles are to be mapped to GeoNetwork groups. Only those role names 36 | # that march the regular expression will be mapped. 37 | geonetwork.syncRolesFilter=EL_(.*) 38 | 39 | # Map geOrchestra user role names to GeoNetwork user profiles. 40 | # Available GN profile names are: 41 | # Administrator, UserAdmin, Reviewer, Editor, RegisteredUser, Guest, Monitor 42 | geonetwork.profiles.default:RegisteredUser 43 | geonetwork.profiles.rolemappings.[GN_ADMIN]=Administrator 44 | geonetwork.profiles.rolemappings.[GN_USERADMIN]=UserAdmin 45 | geonetwork.profiles.rolemappings.[GN_REVIEWER]=Reviewer 46 | geonetwork.profiles.rolemappings.[GN_EDITOR]=Editor 47 | geonetwork.profiles.rolemappings.[USER]=RegisteredUser 48 | 49 | # Scheduled users/groups synchronization configuration 50 | # timeUnit allows: MILLISECONDS/SECONDS/MINUTES/HOURS 51 | # The following are default values, uncomment and modify as needed 52 | #geonetwork.scheduled.enabled=true 53 | #geonetwork.scheduled.timeUnit=SECONDS 54 | #geonetwork.scheduled.retryOnFailure=true 55 | #geonetwork.scheduled.initialDelay=10 56 | #geonetwork.scheduled.retryDelay=10 57 | #geonetwork.scheduled.delayBetweenRuns=60 58 | 59 | # database configuration 60 | jdbc.host=localhost 61 | jdbc.port=5432 62 | jdbc.database={{ georchestra.db.name }} 63 | jdbc.schema={{ geonetwork.db.schema }} 64 | jdbc.username={{ georchestra.db.user }} 65 | jdbc.password={{ georchestra.db.pass }} 66 | jdbc.connectionProperties=currentSchema={{ geonetwork.db.schema }} 67 | 68 | # The maximum number of active connections that can be allocated from this pool 69 | # at the same time, or negative for no limit 70 | jdbc.basic.maxActive=50 71 | # The minimum number of active connections that can remain idle in the pool, 72 | # without extra ones being created, or 0 to create none 73 | jdbc.basic.minIdle=1 74 | # The maximum number of connections that can remain idle in the pool, without 75 | # extra ones being released, or negative for no limit 76 | jdbc.basic.maxIdle=10 77 | # The maximum number of open statements that can be allocated from the statement 78 | # pool at the same time, or non-positive for no limit 79 | jdbc.basic.maxOpenPreparedStatements=100 80 | # The maximum number of milliseconds that the pool will wait (when there are no 81 | # available connections) for a connection to be returned before throwing an 82 | # exception, or <= 0 to wait indefinitely 83 | jdbc.basic.maxWait=200 84 | # The indication of whether objects will be validated before being borrowed from 85 | # the pool: 86 | jdbc.basic.testOnBorrow=true 87 | # The SQL query that will be used to validate connections from this pool before 88 | # returning them to the caller 89 | jdbc.basic.validationQuery=SELECT 1 90 | 91 | # Schematron validation job, which scans non-harvested metadatas regularly, 92 | # then updates the validation table in the database, runs every 2 hours: 93 | schematron.job.activated=true 94 | schematron.job.cronExpression=0 0 0/2 * * ? 95 | 96 | # GN 4 97 | es.featureproxy.targeturi=http://localhost:9200/gn-features/{_} 98 | es.protocol=http 99 | es.port=9200 100 | es.host=localhost 101 | es.url=${es.protocol}://${es.host}:${es.port} 102 | kb.url=http://localhost:5601 103 | jms.url=tcp://localhost:61616 104 | -------------------------------------------------------------------------------- /roles/georchestra/templates/geonetwork/gn-cloud-searching-application.yml.j2: -------------------------------------------------------------------------------- 1 | server: 2 | instance-id: ${spring.application.name}:${spring.application.instance_id:${spring.cloud.client.ip-address}}:${server.port} 3 | spring: 4 | rabbitmq.host: localhost 5 | cloud: 6 | config: 7 | discovery: 8 | enabled: false 9 | enabled: false 10 | management: 11 | health.ldap.enabled: false 12 | server.port: {{ gn_cloud_searching.port +1 }} 13 | eureka: 14 | client: 15 | enabled: false 16 | registerWithEureka: false 17 | fetch-registry: false 18 | 19 | gn: 20 | datadir: ${java.io.tmpdir}/gn-datadir 21 | baseurl: https://{{ georchestra.fqdn }}/geonetwork 22 | legacy.url: https://{{ georchestra.fqdn }}/geonetwork 23 | linkToLegacyGN4: true 24 | language.default: fr 25 | site: 26 | name: GeoNetwork 27 | organization: opensource 28 | index: 29 | url: http://localhost:9200 30 | records: gn-records 31 | username: 32 | password: 33 | api: 34 | metadata: 35 | license: 36 | name: GNU General Public License v2.0 37 | url: https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html 38 | search: 39 | # Full text on all fields 40 | # 'queryBase': '${any}', 41 | # Full text but more boost on title match 42 | queryBase: 'any:(${any}) resourceTitleObject.default:(${any})^2' 43 | scoreConfig: > 44 | { 45 | "boost": "5", 46 | "functions": [ 47 | { // Boost down member of a series 48 | "filter": { "exists": { "field": "parentUuid" } }, 49 | "weight": 0.3 50 | }, 51 | { // Boost down obsolete records 52 | "filter": { "match": { "cl_status.key": "obsolete" } }, 53 | "weight": 0.3 54 | }, 55 | { 56 | "gauss": { 57 | "dateStamp": { 58 | "scale": "365d", 59 | "offset": "90d", 60 | "decay": 0.5 61 | } 62 | } 63 | } 64 | ], 65 | "score_mode": "multiply" 66 | } 67 | sortables: 68 | - "relevance" 69 | - "createDate:desc" 70 | - "resourceTitleObject.default.keyword" 71 | - "rating:desc" 72 | - "popularity:desc" 73 | formats: 74 | - name : rss 75 | mimeType : application/rss+xml 76 | responseProcessor: RssResponseProcessorImpl 77 | operations: 78 | - items 79 | defaultMimeType: text/html 80 | sources: 81 | - "resourceTitleObject" 82 | - "resourceAbstractObject" 83 | - "resourceType" 84 | - "overview" 85 | - "uuid" 86 | - "schema" 87 | - "link" 88 | - "allKeywords" 89 | - "contactForResource" 90 | - "cl_status" 91 | - "edit" 92 | -------------------------------------------------------------------------------- /roles/georchestra/templates/geonetwork/gn-cloud-searching.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=geOrchestra gn-cloud-searching webservice 3 | After=syslog.target 4 | 5 | [Service] 6 | User=www-data 7 | # gn-cloud-* webservices require java >= 11 8 | ExecStart=/usr/lib/jvm/{{ java_version }}/bin/java {% if "17" in java_version %}--add-exports=java.naming/com.sun.jndi.ldap=ALL-UNNAMED {% endif %}-Dsearch_without_sql -Dspring.config.location=/etc/georchestra/geonetwork/microservices/searching/config.yml -Dspring.cloud.bootstrap.enabled=false -Dserver.port={{ gn_cloud_searching.port }} -jar /usr/share/lib/searching.jar 9 | SuccessExitStatus=143 10 | StandardOutput=append:{{ logs_basedir }}/gn-cloud-searching.log 11 | StandardError=append:{{ logs_basedir }}/gn-cloud-searching.log 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /roles/georchestra/templates/geonetwork/gn-ogc-api-records-config.yml.j2: -------------------------------------------------------------------------------- 1 | server: 2 | port: {{ gn_ogc_api_records.port }} 3 | forward-headers-strategy: FRAMEWORK 4 | instance-id: ${spring.application.name}:${spring.application.instance_id:${spring.cloud.client.ip-address}}:${server.port} 5 | servlet: 6 | context-path: /ogc-api-records 7 | encoding: 8 | charset: UTF-8 9 | force-response: true 10 | management: 11 | health.ldap.enabled: false 12 | server.port: {{ gn_ogc_api_records.port +1 }} 13 | eureka: 14 | client: 15 | enabled: false 16 | registerWithEureka: false 17 | fetch-registry: false 18 | 19 | spring: 20 | profiles: standalone 21 | rabbitmq.host: localhost 22 | datasource: 23 | driver-class-name: org.postgresql.Driver 24 | url: jdbc:postgresql://localhost:5432/georchestra?currentSchema=geonetwork 25 | username: georchestra 26 | password: georchestra 27 | hikari: 28 | pool-name: ogc-api-records 29 | minimum-idle: 1 30 | maximum-pool-size: 10 31 | jpa: 32 | database-platform: org.hibernate.dialect.PostgreSQLDialect 33 | jpa.generate-ddl: false 34 | jpa.hibernate.ddl-auto: none 35 | cloud: 36 | config: 37 | discovery: 38 | enabled: false 39 | enabled: false 40 | 41 | gn: 42 | baseurl: https://{{ georchestra.fqdn }}/geonetwork 43 | legacy.url: https://{{ georchestra.fqdn }}/geonetwork 44 | language: 45 | default: fre 46 | linkToLegacyGN4: true 47 | site: 48 | name: GeoNetwork 49 | organization: opensource 50 | index: 51 | url: http://localhost:9200 52 | records: gn-records 53 | username: 54 | password: 55 | search: 56 | queryFilter: '+isTemplate:n AND -indexingError:true' # only show relevant records 57 | trackTotalHits: true 58 | formats: 59 | - name: html 60 | mimeType: text/html 61 | responseProcessor: JsonUserAndSelectionAwareResponseProcessorImpl 62 | operations: 63 | - root 64 | - conformance 65 | - collections 66 | - collection 67 | - items 68 | - item 69 | - name: xml 70 | mimeType: application/xml 71 | responseProcessor: XmlResponseProcessorImpl 72 | operations: 73 | - root 74 | - collections 75 | - collection 76 | - items 77 | - item 78 | - name: json 79 | mimeType: application/json 80 | responseProcessor: GeoJsonResponseProcessorImpl 81 | operations: 82 | - root 83 | - conformance 84 | - collections 85 | - collection 86 | - items 87 | - item 88 | - name : gn 89 | mimeType : application/gn+xml 90 | responseProcessor: XsltResponseProcessorImpl 91 | - name: opensearch 92 | mimeType: application/opensearchdescription+xml 93 | operations: 94 | - collection 95 | - name : schema.org 96 | mimeType : application/ld+json 97 | responseProcessor: JsonLdResponseProcessorImpl 98 | operations: 99 | - items 100 | - item 101 | - name: dcat 102 | mimeType: application/rdf+xml 103 | responseProcessor: DcatCatalogResponseProcessorImpl 104 | operations: 105 | - items 106 | - item 107 | - name : dcat_turtle 108 | mimeType : text/turtle 109 | responseProcessor: JsonUserAndSelectionAwareResponseProcessorImpl 110 | operations : 111 | - item 112 | - name : rss 113 | mimeType : application/rss+xml 114 | responseProcessor: RssResponseProcessorImpl 115 | operations: 116 | - items 117 | - name : gnindex 118 | mimeType : application/gnindex+json 119 | responseProcessor : JsonUserAndSelectionAwareResponseProcessorImpl 120 | operations : 121 | - items 122 | - item 123 | - name : geojson 124 | mimeType : application/geo+json 125 | responseProcessor: GeoJsonResponseProcessorImpl 126 | operations: 127 | - items 128 | - item 129 | defaultMimeType: text/html 130 | 131 | logging: 132 | level: 133 | # geonetwork roots 134 | org.fao.geonet: INFO 135 | org.fao.geonet.ogcapi: INFO 136 | org.fao.geonet.searching: INFO 137 | springfox: 138 | documentation: 139 | swaggerUi: 140 | baseUrl: /openapi 141 | openApi: 142 | v3: 143 | path: /openapi/v3/api-docs 144 | swagger: 145 | v2 : 146 | path: /openapi/v2/api-docs 147 | -------------------------------------------------------------------------------- /roles/georchestra/templates/geonetwork/gn-ogc-api-records.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=geOrchestra gn-ogc-api-records webservice 3 | After=syslog.target 4 | 5 | [Service] 6 | User=www-data 7 | ExecStart=/usr/lib/jvm/{{ java_version }}/bin/java {% if "17" in java_version %}--add-exports=java.naming/com.sun.jndi.ldap=ALL-UNNAMED {% endif %}-Dspring.config.location=/etc/georchestra/geonetwork/microservices/ogc-api-records/config.yml -jar /usr/share/lib/gn-ogc-api-records.jar 8 | Environment=SPRING_PROFILES_ACTIVE=standalone 9 | Environment=JAVA_OPTS=-Dfile.encoding=UTF-8 10 | Environment=LANG={{ system_locale }} 11 | SuccessExitStatus=143 12 | StandardOutput=append:{{ logs_basedir }}/gn-ogc-api-records.log 13 | StandardError=append:{{ logs_basedir }}/gn-ogc-api-records.log 14 | 15 | [Install] 16 | WantedBy=multi-user.target 17 | -------------------------------------------------------------------------------- /roles/georchestra/templates/geonetwork/log4j2.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | /srv/log 5 | 6 | 7 | 8 | 9 | 10 | 11 | ${sys:log_dir:-log_dir}/geonetwork.log 12 | ${sys:log_dir:-log_dir}/geonetwork.log-%i.log 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | %date{ISO8601}{${ctx:timeZone}} %-5level [%logger] - %message%n 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | -------------------------------------------------------------------------------- /roles/georchestra/templates/geoserver/geofence-geoserver.properties.j2: -------------------------------------------------------------------------------- 1 | #updated by config's GenerateConfig class 2 | #Tue Jan 13 11:17:13 CET 2015 3 | servicesUrl=http\://localhost\:{{ tomcat_instances['georchestra']['port'] }}/geofence/remoting/RuleReader 4 | instanceName=default-gs 5 | allowDynamicStyles=true 6 | allowRemoteAndInlineLayers=true 7 | -------------------------------------------------------------------------------- /roles/georchestra/templates/geowebcache/log4j.properties.j2: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=WARN, R 2 | 3 | log4j.category.org.geowebcache.seed=WARN, R 4 | log4j.category.org.geowebcache.diskquota=WARN, R 5 | 6 | log4j.appender.R = org.apache.log4j.rolling.RollingFileAppender 7 | log4j.appender.R.RollingPolicy = org.apache.log4j.rolling.TimeBasedRollingPolicy 8 | log4j.appender.R.RollingPolicy.FileNamePattern = {{ logs_basedir }}/geowebcache.%d.log.gz 9 | log4j.appender.R.RollingPolicy.ActiveFileName = {{ logs_basedir }}/geowebcache.log 10 | log4j.appender.R.Append = true 11 | log4j.appender.R.layout = org.apache.log4j.PatternLayout 12 | log4j.appender.R.layout.ConversionPattern = %d{yyyy-MM-dd HH:mm:ss} %c{1} [%p] %m%n 13 | 14 | -------------------------------------------------------------------------------- /roles/georchestra/templates/mapstore/log4j2.properties.j2: -------------------------------------------------------------------------------- 1 | rootLogger.level = INFO 2 | appenders= console, file 3 | 4 | 5 | appender.console.type = Console 6 | appender.console.name = LogToConsole 7 | appender.console.layout.type = PatternLayout 8 | appender.console.layout.pattern = %p %d{yyyy-MM-dd HH:mm:ss.SSS} %c::%M:%L - %m%n 9 | rootLogger.appenderRef.stdout.ref = LogToConsole 10 | rootLogger.appenderRef.console.ref = LogToConsole 11 | 12 | appender.file.type = File 13 | appender.file.name = LogToFile2 14 | appender.file.fileName={{ logs_basedir }}/mapstore.log 15 | appender.file.layout.type=PatternLayout 16 | appender.file.layout.pattern=%p %d{yyyy-MM-dd HH:mm:ss.SSS} %C{1}.%M() - %m %n 17 | rootLogger.appenderRef.file.ref = LogToFile2 18 | 19 | logger.restsrv.name=it.geosolutions.geostore.services.rest 20 | logger.restsrv.level= INFO 21 | logger.hibernate1.name=org.hibernate 22 | logger.hibernate1.level=INFO 23 | logger.trg1.name=com.trg 24 | logger.trg1.level=INFO 25 | -------------------------------------------------------------------------------- /roles/georchestra/templates/mapstore/proxy.properties.j2: -------------------------------------------------------------------------------- 1 | # ################## 2 | # CONNECTION MANAGER 3 | # ################## 4 | timeout = 30000 5 | connection_timeout = 30000 6 | max_total_connections = 60 7 | default_max_connections_per_host = 6 8 | 9 | # ################# 10 | # Stream Byte Size 11 | # ################# 12 | defaultStreamByteSize=1024 13 | 14 | # ########### 15 | # WHITE LISTS 16 | # ########### 17 | # White lists, if defined, limit the requests that are accepted and handled by the proxy. Any request that is not 18 | # matching a defined whitelist will be rejected. 19 | 20 | # hostnameWhitelist defines the accepted target hosts for the internal proxy. 21 | # The list should be set to the hosts whose CORS headers do not allow direct access 22 | hostnameWhitelist = {{ georchestra.fqdn }} 23 | 24 | # mimetype whitelist limit the accepted values for the contentype header in response from the proxied host 25 | mimetypeWhitelist = application/force-download,text/html,text/plain,application/xml,text/xml,application/vnd.ogc.sld+xml,application/vnd.ogc.gml,application/json,application/vnd.ogc.wms_xml,application/x-www-form-urlencoded,image/png,application/pdf,text/csv,application/zip,text/csv;charset=UTF-8 26 | 27 | # methods whitelist limit the accepted values for the HTTP method in requests 28 | methodsWhitelist = GET,POST,PUT 29 | 30 | # hosts whitelist is like hostname whitelist, but IP addresses are used instead of host names 31 | #hostsWhitelist = 127.0.0.1 32 | 33 | # reqtype whitelist allow a regular expression based filter on the request urls 34 | # it is possible to defined one or more whitelist, each with a specific filter 35 | # only urls matching one of the whitelist will be forwarded 36 | #reqtypeWhitelist.capabilities = (([&]?([Rr][Ee][Qq][Uu][Ee][Ss][Tt]=[Gg]et[Cc]apabilities))|([&]?(version=1\\.1\\.1)))+ 37 | reqtypeWhitelist.capabilities = .*[Gg]et[Cc]apabilities.* 38 | reqtypeWhitelist.featureinfo = .*[Gg]et[Ff]eature[Ii]nfo.* 39 | reqtypeWhitelist.csw = .*csw.* 40 | reqtypeWhitelist.geostore = .*geostore.* 41 | reqtypeWhitelist.generic = (.*exist.*)|(.*pdf.*)|(.*map.*)|(.*wms.*)|(.*wmts.*)|(.*wfs.*)|(.*ows.*) 42 | -------------------------------------------------------------------------------- /roles/georchestra/templates/mviewerstudio/config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "app_conf": { 3 | "studio_title": "Mviewer Studio", 4 | "mviewer_version": "3.9", 5 | "mviewerstudio_version": "4.0.1", 6 | "api": "api/app", 7 | "store_style_service": "api/style", 8 | "mviewer_instance": "/mviewer/", 9 | {% raw %}"publish_url": "/mviewer/?config=apps/prod/{{config}}.xml",{% endraw %} 10 | "conf_path_from_mviewer": "apps/store/", 11 | "mviewer_short_url": { 12 | "used": true, 13 | "apps_folder": "store", 14 | "public_folder": "prod" 15 | }, 16 | "external_themes": { 17 | "used": true, 18 | "url": "https://geobretagne.fr/minicatalog/csv" 19 | }, 20 | "user_info": "api/user", 21 | "proxy": "proxy/?url=", 22 | "user_info_visible": true, 23 | "app_form_placeholders": { 24 | "app_title": "Kartenn", 25 | "logo_url": "https://geobretagne.fr/pub/logo/region-bretagne.jpg", 26 | "help_file": "mviewer_help.html" 27 | }, 28 | "map": { 29 | "center": [-307903.74898791354, 6141345.088741366], 30 | "zoom": 7 31 | }, 32 | "baselayers": { 33 | "positron": { 34 | "id": "positron", 35 | "thumbgallery": "img/basemap/positron.png", 36 | "title": "CartoDb", 37 | "label": "Positron", 38 | "type": "OSM", 39 | "url": "https://{a-c}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png", 40 | "attribution": "Map tiles by CartoDb, under CC BY 3.0 " 41 | }, 42 | "ortho_ign": { 43 | "id": "ortho_ign", 44 | "thumbgallery": "img/basemap/ortho.jpg", 45 | "title": "IGN", 46 | "label": "Photographies aériennes IGN", 47 | "type": "WMTS", 48 | "url": "https://data.geopf.fr/wmts?", 49 | "layers": "ORTHOIMAGERY.ORTHOPHOTOS", 50 | "format": "image/jpeg", 51 | "fromcapacity": "false", 52 | "attribution": "", 53 | "style": "normal", 54 | "matrixset": "PM", 55 | "maxzoom": "22" 56 | }, 57 | "darkmatter": { 58 | "id": "darkmatter", 59 | "thumbgallery": "img/basemap/darkmatter.png", 60 | "title": "CartoDb", 61 | "label": "Dark Matter", 62 | "type": "OSM", 63 | "url": "https://{a-c}.basemaps.cartocdn.com/dark_all/{z}/{x}/{y}.png", 64 | "maxzoom": "20", 65 | "attribution": "Map tiles by CartoDb, under CC BY 3.0 " 66 | }, 67 | "esriworldimagery": { 68 | "id": "esriworldimagery", 69 | "thumbgallery": "img/basemap/esriworldwide.jpg", 70 | "title": "Esri", 71 | "label": "Esri world imagery", 72 | "type": "OSM", 73 | "url": "https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}", 74 | "attribution": "Esri world imagery" 75 | }, 76 | "ortho1": { 77 | "id": "ortho1", 78 | "thumbgallery": "img/basemap/ortho.jpg", 79 | "title": "GéoBretagne", 80 | "label": "Photo aérienne actuelle GéoBretagne", 81 | "type": "WMTS", 82 | "url": "https://tile.geobretagne.fr/gwc02/service/wmts", 83 | "layers": "satellite", 84 | "format": "image/png", 85 | "style": "_null", 86 | "matrixset": "EPSG:3857", 87 | "fromcapacity": "false", 88 | "attribution": "partenaires GéoBretagne - Megalis Bretagne - IGN - PlanetObserver" 89 | }, 90 | "ortho_ir": { 91 | "id": "ortho_ir", 92 | "thumbgallery": "img/basemap/ir.jpg", 93 | "title": "GéoBretagne", 94 | "label": "Photo aérienne infra rouge GéoBretagne", 95 | "type": "WMTS", 96 | "url": "https://geobretagne.fr/geoserver/gwc/service/wmts", 97 | "layers": "photo:ir-composite", 98 | "format": "image/jpeg", 99 | "style": "_null", 100 | "matrixset": "EPSG:3857", 101 | "fromcapacity": "false", 102 | "attribution": "partenaires GéoBretagne - Megalis Bretagne - IGN" 103 | }, 104 | "osm_google": { 105 | "id": "osm_google", 106 | "thumbgallery": "img/basemap/osm_google.png", 107 | "title": "GéoBretagne", 108 | "label": "OpenStreetMap GéoBretagne", 109 | "type": "WMS", 110 | "url": "https://osm.geobretagne.fr/gwc01/service/wms", 111 | "layers": "osm:google", 112 | "format": "image/png", 113 | "attribution": "GéoBretagne. Données : les contributeurs d'OpenStreetMap , ODbL " 114 | }, 115 | "osm": { 116 | "id": "osm", 117 | "thumbgallery": "img/basemap/osm.png", 118 | "title": "OSM", 119 | "label": "OpenStreetMap", 120 | "type": "OSM", 121 | "url": "https://{a-c}.tile.openstreetmap.org/{z}/{x}/{y}.png", 122 | "attribution": "Données : les contributeurs d'OpenStreetMap ODbL " 123 | }, 124 | "osm_bzh": { 125 | "id": "osm_bzh", 126 | "thumbgallery": "img/basemap/osm.png", 127 | "title": "OSM BZH", 128 | "label": "OpenStreetMap en breton", 129 | "type": "OSM", 130 | "maxzoom": "20", 131 | "url": "https://tile.openstreetmap.bzh/br/{z}/{x}/{y}.png", 132 | "attribution": "Kendaolerien OpenStreetMap" 133 | }, 134 | "plan_ign": { 135 | "id": "plan_ign", 136 | "thumbgallery": "img/basemap/scan-express.jpg", 137 | "title": "IGN", 138 | "label": "Plan IGN v2", 139 | "type": "WMTS", 140 | "url": "https://data.geopf.fr/wmts?", 141 | "layers": "GEOGRAPHICALGRIDSYSTEMS.PLANIGNV2", 142 | "format": "image/png", 143 | "fromcapacity": "false", 144 | "attribution": "", 145 | "style": "normal", 146 | "matrixset": "PM", 147 | "maxzoom": "22" 148 | } 149 | }, 150 | "data_providers": { 151 | "csw": [{ 152 | "title": "Catalogue local", 153 | "url": "https://{{ georchestra.fqdn }}/geonetwork/srv/fre/csw", 154 | "baseref": "https://{{ georchestra.fqdn }}/geonetwork/srv/eng/catalog.search?node=srv#/metadata/" 155 | }, 156 | { 157 | "title": "Catalogue GéoBretagne", 158 | "url": "https://geobretagne.fr/geonetwork/srv/fre/csw", 159 | "baseref": "https://geobretagne.fr/geonetwork/srv/eng/catalog.search?node=srv#/metadata/" 160 | }, 161 | { 162 | "title": "Catalogue Région Bretagne", 163 | "url": "https://kartenn.region-bretagne.fr/geonetwork/srv/fre/csw", 164 | "baseref": "https://kartenn.region-bretagne.fr/geonetwork/srv/fre/catalog.search#/metadata/" 165 | }, 166 | { 167 | "title": "Catalogue de la Région Grand Est", 168 | "url": "https://datagrandest.fr/geonetwork/srv/fre/csw", 169 | "baseref": "https://datagrandest.fr/geonetwork/srv/eng/catalog.search?node=srv#/metadata/" 170 | } 171 | ], 172 | "wms": [{ 173 | "title": "Serveur WMS local", 174 | "url": "https://{{ georchestra.fqdn }}/geoserver/rb/wms" 175 | }, 176 | { 177 | "title": "Serveur WMS de la Région", 178 | "url": "https://ows.region-bretagne.fr/geoserver/rb/wms" 179 | }] 180 | }, 181 | "default_params": { 182 | "layer": { 183 | "info_format": "text/html" 184 | } 185 | } 186 | } 187 | } -------------------------------------------------------------------------------- /roles/georchestra/templates/mviewerstudio/mviewerstudio.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=mviewerstudio 3 | After=network.target 4 | 5 | [Service] 6 | User=mviewer 7 | Environment="EXPORT_CONF_FOLDER=/srv/data/mviewer/apps/store/" 8 | Environment="CONF_PUBLISH_PATH_FROM_MVIEWER=apps/prod" 9 | Environment="CONF_PATH_FROM_MVIEWER=apps/store" 10 | Environment="MVIEWERSTUDIO_PUBLISH_PATH=/srv/data/mviewer/apps/prod" 11 | Environment="DEFAULT_ORG=public" 12 | Environment="LOG_LEVEL=INFO" 13 | WorkingDirectory=/srv/apps/mviewerstudio/srv/python 14 | ExecStart=/srv/apps/mviewerstudio/srv/python/.venv/bin/gunicorn \ 15 | -b 127.0.0.1:{{ mviewerstudio.port}} \ 16 | --access-logfile {{ logs_basedir }}/mviewer/mviewerstudio-access.log \ 17 | --log-level info \ 18 | --error-logfile {{ logs_basedir }}/mviewer/mviewerstudio-error.log \ 19 | mviewerstudio_backend.app:app 20 | 21 | [Install] 22 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /roles/georchestra/templates/security-proxy/log4j.properties.j2: -------------------------------------------------------------------------------- 1 | log4j.rootLogger=INFO, R 2 | 3 | log4j.logger.org.georchestra.security=INFO 4 | log4j.logger.org.georchestra.security.statistics=INFO, OGCSTATISTICS 5 | #log4j.logger.org.georchestra.security.statistics-common=INFO, NETWORKSOCKET 6 | 7 | log4j.logger.OGCServiceMessageFormatter=INFO 8 | log4j.logger.org.springframework=INFO 9 | log4j.logger.org.springframework.security=INFO 10 | log4j.logger.org.jasig=INFO 11 | 12 | log4j.appender.R = org.apache.log4j.rolling.RollingFileAppender 13 | log4j.appender.R.RollingPolicy = org.apache.log4j.rolling.TimeBasedRollingPolicy 14 | log4j.appender.R.RollingPolicy.FileNamePattern = {{ logs_basedir }}/security-proxy.%d.log.gz 15 | log4j.appender.R.RollingPolicy.ActiveFileName = {{ logs_basedir }}/security-proxy.log 16 | log4j.appender.R.Append = true 17 | log4j.appender.R.layout = org.apache.log4j.PatternLayout 18 | log4j.appender.R.layout.ConversionPattern = %d{yyyy-MM-dd HH:mm:ss} %c{1} [%p] %m%n 19 | 20 | # network socket (Logstash / Elasticsearch) 21 | #log4j.appender.NETWORKSOCKET=org.apache.log4j.net.SocketAppender 22 | #log4j.appender.NETWORKSOCKET.Port=4712 23 | #log4j.appender.NETWORKSOCKET.RemoteHost=localhost 24 | 25 | # OGC services statistics 26 | log4j.appender.OGCSTATISTICS=org.georchestra.ogcservstatistics.log4j.OGCServicesAppender 27 | log4j.appender.OGCSTATISTICS.activated=true 28 | -------------------------------------------------------------------------------- /roles/georchestra/templates/security-proxy/security-proxy.properties.j2: -------------------------------------------------------------------------------- 1 | # General purposes properties 2 | 3 | # Note: the PostgreSQL-related properties configure the JDBC access to the 4 | # database where the statistics gathered by the OGC-server-statistics module 5 | # are stored 6 | 7 | # PostgreSQL server domain name 8 | # default: see default.properties - uncomment to override 9 | #pgsqlHost= 10 | 11 | # PostgreSQL server port 12 | # default: see default.properties - uncomment to override 13 | #pgsqlPort= 14 | 15 | # PostgreSQL database name 16 | # default: see default.properties - uncomment to override 17 | #pgsqlDatabase= 18 | 19 | # User to connect to PostGreSQL server 20 | # default: see default.properties - uncomment to override 21 | #pgsqlUser= 22 | 23 | # Password to connect to PostGreSQL server 24 | # default: see default.properties - uncomment to override 25 | #pgsqlPassword= 26 | 27 | # LDAP server domain name 28 | # default: see default.properties - uncomment to override 29 | #ldapHost= 30 | 31 | # LDAP server port 32 | # default: see default.properties - uncomment to override 33 | #ldapPort= 34 | 35 | # Base DN of the LDAP directory 36 | # default: see default.properties - uncomment to override 37 | #ldapBaseDn= 38 | 39 | # Administrator DN 40 | # default: see default.properties - uncomment to override 41 | #ldapAdminDn= 42 | 43 | # Administrator password 44 | # default: see default.properties - uncomment to override 45 | #ldapAdminPassword= 46 | 47 | # Users RDN 48 | # Note: it's the base DN from where to search for the logged-in user. This 49 | # mostly to verify the user exists. 50 | # default: see default.properties - uncomment to override 51 | #ldapUsersRdn= 52 | 53 | # Roles RDN 54 | # Note: it's the base DN to use for looking up the roles/groups/authorities 55 | # of the logged-in user. Normally the ldap is configured like: 56 | # ou=roles 57 | # cn=somerole 58 | # member=uid=username,ou=users,dc=georchestra,dc=org 59 | # 60 | # ou can be cn, ou, or some other option. member is often uniquemember as well 61 | # default: see default.properties - uncomment to override 62 | #ldapRolesRdn= 63 | 64 | # Organizations RDN 65 | # Note: it's the base DN from where to search for organization. Used to fill 66 | # sec-org http header.. 67 | # default: see default.properties - uncomment to override 68 | #ldapOrgsRdn= 69 | 70 | 71 | # ------ proxy-servlet.xml --------- 72 | 73 | # Default timeout : 20min should be enough to handle big extraction (~ 4x10^9 pixels) 74 | # default: 1200000 75 | #http_client_timeout=1200000 76 | 77 | 78 | # ------- applicationContext-security.xml ------- 79 | 80 | # Anonymous role 81 | # default: ROLE_ANONYMOUS 82 | #anonymousRole=ROLE_ANONYMOUS 83 | 84 | # Proxy context path 85 | # default: /sec 86 | #proxy.contextPath=/sec 87 | 88 | # URL called when user has logged out 89 | # default: https://${domainName}/cas/logout?fromgeorchestra 90 | #logout-success-url=https://${domainName}/cas/logout?fromgeorchestra 91 | 92 | # URL where the user can login 93 | # default: https://${domainName}/cas/login 94 | #casLoginUrl=https://${domainName}/cas/login 95 | 96 | # URL that the security system uses to validate the cas tickets 97 | # default: https://${domainName}/cas 98 | #casTicketValidation=https://${domainName}/cas 99 | casTicketValidation=http://localhost:{% if tomcat_version == 10 %}{{ cas.port }}{% else %}{{ tomcat_instances.proxycas.port }}{% endif %}/cas 100 | 101 | # After going to the cas login cas forwards to this URL where the authorities and permissions are checked 102 | # default: https://${domainName}/login/cas 103 | #proxyCallback=https://${domainName}/login/cas 104 | 105 | # List of trusted proxies, all requests from listed server will be trusted and 106 | # will bypass security 107 | # default: 108 | #trustedProxy= 109 | 110 | 111 | # LDAP 112 | 113 | 114 | # The second part of looking up the user 115 | # default: (uid={0}) 116 | #userSearchFilter=(uid={0}) 117 | 118 | # The attribute of the role which is the rolename 119 | # default: cn 120 | #roleRoleAttribute=cn 121 | 122 | # The search filter that selects the roles that the user belongs to. 123 | # If a match is found, the containing object is one of the roles the user belongs to 124 | # default: (member=uid={1},${ldapUsersRdn},${ldapBaseDn}) 125 | #roleSearchFilter=(member=uid={1},${ldapUsersRdn},${ldapBaseDn}) 126 | 127 | 128 | # Name of the realm 129 | # default: georchestra 130 | #realmName=georchestra 131 | 132 | # The security-proxy will 302 redirect / to the defaultTarget value (/header by default). 133 | # Change it if your homepage (eg a CMS) is located on /portal/ for instance 134 | # default: ${headerUrl} 135 | #defaultTarget=${headerUrl} 136 | 137 | 138 | # Connection pool settings for the logger appender that inserts OGC request stats on the database 139 | 140 | # Minimum connections pool size 141 | # default: 2 142 | #ogcStats.minPoolSize=2 143 | 144 | # Maximum connections pool size 145 | # default: 5 146 | #ogcStats.maxPoolSize=5 147 | 148 | # Timeout of connections pool 149 | # default: 2000 150 | #ogcStats.timeout=2000 151 | 152 | # Max time unused connections are kept idle in the pool. Unit is seconds for c3p0. 153 | # default: 60 154 | #ogcStats.maxIdleTime=60 155 | 156 | # A comma-separated list of privileged users that are allowed to impersonate other users 157 | # default: geoserver_privileged_user 158 | #trustedUsers={{ geoserver.privileged.user }} 159 | 160 | # A comma-separated list of patterns of urls that (when matched) will have 161 | # the x-forwarded-* headers removed 162 | # default: .*geo.admin.ch.*,.*rolnp.fr.* 163 | #removedXForwardedHeaders=.*geo.admin.ch.*,.*rolnp.fr.* 164 | 165 | # A comma-separated list of user agents that will receive a basic authentication 166 | # challenge instead of being redirected to CAS login 167 | # eg. userAgents=.*ArcGIS.*,.*uDig.*,.*QGIS.* 168 | # default: empty 169 | #userAgents= 170 | 171 | # Whether to allow semicolons in URLs to avoid errors like: 172 | # the request was rejected because the URL contained a potentially malicious String ";" 173 | # allowSemicolon=false 174 | -------------------------------------------------------------------------------- /roles/georchestra/templates/security-proxy/targets-mapping.properties.j2: -------------------------------------------------------------------------------- 1 | {% for f in georchestra_wars %} 2 | {% if georchestra_wars[f].tomcat != 'proxycas' %} 3 | {{ f }}=http://localhost:{{ tomcat_instances[georchestra_wars[f].tomcat]['port']}}/{{ f }}/ 4 | {% endif %} 5 | {% endfor %} 6 | {% if datafeeder.enabled %} 7 | datafeeder=http://localhost:{{ datafeeder.port }}/datafeeder/ 8 | {% endif %} 9 | {% if mviewer.enabled %} 10 | mviewer=http://localhost:{{ mviewer.port }}/ 11 | {% endif %} 12 | {% if mviewerstudio.enabled %} 13 | mviewerstudio=http://localhost:{{ mviewerstudio.port }}/ 14 | {% endif %} 15 | {% if superset.enabled %} 16 | {{ superset.urlprefix }}=http://localhost:{{ superset.port }}/{{ superset.urlprefix }}/ 17 | {% endif %} 18 | -------------------------------------------------------------------------------- /roles/georchestra/templates/sviewerConfig.js.j2: -------------------------------------------------------------------------------- 1 | customConfig = { 2 | title: 'geOrchestra mobile', 3 | 4 | /** 5 | * force default language, see etc/i18n.js 6 | */ 7 | // lang: 'fr', 8 | 9 | /** 10 | * base url of the geOrchetra SDI. Layers coming from this SDI 11 | * will have enhanced features. 12 | */ 13 | geOrchestraBaseUrl: 'https://{{ georchestra.fqdn }}/', 14 | 15 | /** 16 | * projection 17 | */ 18 | projcode: 'EPSG:3857', 19 | 20 | /** 21 | * map bounds 22 | */ 23 | initialExtent: [-40000,5400000,780000,6000000], 24 | maxExtent: [-20037508.34, -20037508.34, 20037508.34, 20037508.34], 25 | restrictedExtent: [-20037508.34, -20037508.34, 20037508.34, 20037508.34], 26 | 27 | /** 28 | * getFeatureInfo control 29 | */ 30 | maxFeatures: 10, 31 | nodata: '\n', 32 | 33 | /** 34 | * openLS control 35 | */ 36 | openLSGeocodeUrl: "http://gpp3-wxs.ign.fr/{{ georchestra.ign_api_key }}/geoportail/ols?", 37 | 38 | /** 39 | * background layers (EPSG:3857) 40 | */ 41 | layersBackground: [ 42 | new ol.layer.Tile({ 43 | source: new ol.source.OSM() 44 | }) 45 | ], 46 | 47 | /** 48 | * social media links (prefixes) 49 | */ 50 | socialMedia: { 51 | 'Twitter' : 'https://twitter.com/intent/tweet?text=', 52 | 'Google+' : 'https://plus.google.com/share?url=', 53 | 'Facebook': 'http://www.facebook.com/sharer/sharer.php?u=' 54 | } 55 | }; 56 | -------------------------------------------------------------------------------- /roles/nginx/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Test nginx and reload 3 | ansible.builtin.command: nginx -t 4 | notify: Reload nginx 5 | 6 | - name: Reload nginx 7 | ansible.builtin.service: 8 | name: nginx 9 | state: reloaded 10 | -------------------------------------------------------------------------------- /roles/nginx/tasks/datahub.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Fetch the datahub archive 3 | get_url: 4 | url: "{{ datahub.url }}" 5 | dest: /var/www/georchestra/htdocs/datahub.zip 6 | 7 | - name: Unzip the archive 8 | unarchive: 9 | src: /var/www/georchestra/htdocs/datahub.zip 10 | dest: /var/www/georchestra/htdocs/ 11 | remote_src: true 12 | when: not ansible_check_mode 13 | 14 | - name: Remove the downloaded archive 15 | file: 16 | path: /var/www/georchestra/htdocs/datahub.zip 17 | state: absent 18 | 19 | - name: set base url in index.html 20 | replace: 21 | path: /var/www/georchestra/htdocs/datahub/index.html 22 | regexp: 23 | replace: 24 | 25 | - name: add geor-header webcomponent in index.html 26 | blockinfile: 27 | path: /var/www/georchestra/htdocs/datahub/index.html 28 | marker: "" 29 | insertafter: 30 | block: | 31 | 32 | 41 | 42 | -------------------------------------------------------------------------------- /roles/nginx/tasks/generate_cert.yml: -------------------------------------------------------------------------------- 1 | - name: Generate self-signed cert ## XXX will warn that it's a CA cert ? 2 | command: > 3 | openssl req -new -nodes -x509 -subj "/C=FR/L=Somewhere/O=IT/CN={{ georchestra.fqdn }}" 4 | -days 3650 -keyout /var/www/georchestra/ssl/georchestra.key -out /var/www/georchestra/ssl/georchestra.crt -extensions v3_ca 5 | args: 6 | creates: /var/www/georchestra/ssl/georchestra.crt 7 | 8 | - name: fix perms on certificate 9 | file: 10 | path: /var/www/georchestra/ssl/georchestra.key 11 | mode: "0400" 12 | -------------------------------------------------------------------------------- /roles/nginx/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install nginx 3 | apt: 4 | pkg: nginx 5 | state: present 6 | 7 | - name: Create web directories 8 | file: 9 | name: "{{ item }}" 10 | state: directory 11 | with_items: 12 | - /var/www/georchestra/ 13 | - /var/www/georchestra/ssl 14 | 15 | - name: Clone htdocs dir 16 | git: 17 | repo: https://github.com/georchestra/htdocs.git 18 | dest: /var/www/georchestra/htdocs/ 19 | version: master 20 | accept_hostkey: true 21 | 22 | - import_tasks: datahub.yml 23 | when: datahub.enabled 24 | tags: datahub 25 | 26 | - import_tasks: mdeditor.yml 27 | when: mdeditor.enabled 28 | tags: mdeditor 29 | 30 | - import_tasks: generate_cert.yml 31 | when: force_https 32 | 33 | - name: Check if vhost exists 34 | stat: 35 | path: /etc/nginx/sites-available/georchestra 36 | register: vhost_exists 37 | 38 | - name: Template georchestra vhost 39 | tags: vhost 40 | template: 41 | src: vhost.j2 42 | dest: /etc/nginx/sites-available/georchestra 43 | notify: Test nginx and reload 44 | 45 | - name: Enable georchestra vhost 46 | file: 47 | dest: /etc/nginx/sites-enabled/georchestra 48 | src: /etc/nginx/sites-available/georchestra 49 | state: link 50 | owner: root 51 | group: root 52 | notify: Test nginx and reload 53 | 54 | - name: Restart nginx if vhost was deployed and we now listen on port 443 55 | service: 56 | name: nginx 57 | state: restarted 58 | when: force_https and not vhost_exists.stat.exists 59 | 60 | - import_tasks: mviewer.yml 61 | when: mviewer.enabled 62 | tags: mviewer 63 | -------------------------------------------------------------------------------- /roles/nginx/tasks/mdeditor.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Fetch/unzip the mdeditor archive 3 | unarchive: 4 | src: "{{ mdeditor.url }}" 5 | dest: /var/www/georchestra/htdocs/ 6 | remote_src: true 7 | 8 | - name: Symlink to subdir 9 | file: 10 | src: dist/apps/metadata-editor 11 | dest: /var/www/georchestra/htdocs/metadata-editor 12 | state: link 13 | 14 | - name: set base url in index.html 15 | replace: 16 | path: /var/www/georchestra/htdocs/metadata-editor/index.html 17 | regexp: 18 | replace: 19 | 20 | - name: add geor-header webcomponent in index.html 21 | blockinfile: 22 | path: /var/www/georchestra/htdocs/metadata-editor/index.html 23 | marker: "" 24 | insertafter: 25 | block: | 26 | 27 | 28 | 37 | 38 | -------------------------------------------------------------------------------- /roles/nginx/tasks/mviewer.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Checkout mviewer 3 | git: 4 | repo: "{{ mviewer.gitrepo }}" 5 | version: "{{ mviewer.gitversion }}" 6 | dest: /var/www/mviewer 7 | when: mviewer.enabled 8 | 9 | - name: Template mviewer vhost 10 | template: 11 | src: vhost-mviewer.j2 12 | dest: /etc/nginx/sites-available/mviewer 13 | notify: Test nginx and reload 14 | when: mviewer.enabled 15 | 16 | - name: Enable mviewer vhost 17 | file: 18 | dest: /etc/nginx/sites-enabled/mviewer 19 | src: /etc/nginx/sites-available/mviewer 20 | state: link 21 | owner: root 22 | group: root 23 | notify: Test nginx and reload 24 | when: mviewer.enabled 25 | -------------------------------------------------------------------------------- /roles/nginx/templates/vhost-mviewer.j2: -------------------------------------------------------------------------------- 1 | # vhost for Mviewer and Mviewer studio 2 | server { 3 | listen {{ mviewer.port }}; 4 | listen [::]:{{ mviewer.port }}; 5 | 6 | server_name localhost_mviewer; 7 | access_log /var/log/nginx/mviewer_access.log; 8 | error_log /var/log/nginx/mviewer_error.log; 9 | 10 | root /var/www/mviewer/; 11 | 12 | location / { 13 | # First attempt to serve request as file, then 14 | # as directory, then fall back to displaying a 404. 15 | try_files $uri $uri/ =404; 16 | } 17 | 18 | } 19 | -------------------------------------------------------------------------------- /roles/nginx/templates/vhost.j2: -------------------------------------------------------------------------------- 1 | upstream proxycas_backend { 2 | {% if gateway.enabled %} 3 | server localhost:{{ gateway.port }}; 4 | {% else %} 5 | server localhost:{{ tomcat_instances.proxycas.port }}; 6 | {% endif %} 7 | } 8 | 9 | {% if force_https %} 10 | # redirect http to https 11 | server { 12 | listen 80; 13 | listen [::]:80; 14 | server_name {{ georchestra.fqdn }}; 15 | return 301 https://$server_name$request_uri; 16 | } 17 | {% else %} 18 | # to get real client IP in accesslogs 19 | set_real_ip_from {{ reverse_proxy_real_ip }}; 20 | real_ip_header {{ reverse_proxy_real_ip_header }}; 21 | {% endif %} 22 | 23 | # main entrypoint 24 | server { 25 | {% if force_https %} 26 | listen 443 ssl; 27 | listen [::]:443 ssl; 28 | add_header Strict-Transport-Security "max-age=31536000"; 29 | ssl_certificate /var/www/georchestra/ssl/georchestra.crt; 30 | ssl_certificate_key /var/www/georchestra/ssl/georchestra.key; 31 | ssl_prefer_server_ciphers on; 32 | ssl_protocols TLSv1.2 TLSv1.3; 33 | #ssl_dhparam /etc/ssl/dh_2048.pem; 34 | 35 | {% endif %} 36 | server_name {{ georchestra.fqdn }}; 37 | access_log /var/log/nginx/georchestra_access.log; 38 | error_log /var/log/nginx/georchestra_error.log; 39 | 40 | root /var/www/georchestra/htdocs/; 41 | 42 | client_max_body_size {{ georchestra.max_body_size }}; 43 | 44 | location / { 45 | try_files $uri $uri/ @proxycas; 46 | } 47 | location @proxycas { 48 | proxy_pass http://proxycas_backend; 49 | proxy_redirect off; 50 | proxy_set_header Host $host; 51 | proxy_set_header X-Real-IP $remote_addr; 52 | } 53 | 54 | # make sure nobody external tries to use the internal proxy 55 | location /proxy/ { 56 | valid_referers server_names; 57 | if ($invalid_referer) { 58 | return 403; 59 | } 60 | proxy_set_header Host $host; 61 | proxy_set_header X-Real-IP $remote_addr; 62 | proxy_pass http://proxycas_backend; 63 | } 64 | 65 | # CORS geonetwork 66 | location ~ ^/geonetwork/srv/.*/csw { 67 | if ( $request_method = OPTIONS ) { 68 | add_header "Access-Control-Allow-Origin" "*"; 69 | add_header "Access-Control-Allow-Method" "POST, GET, OPTIONS"; 70 | add_header "Access-Control-Allow-Headers" "Origin, X-Requested-With, Content-Type, Accept"; 71 | return 204; 72 | } 73 | proxy_set_header Host $host; 74 | proxy_set_header X-Real-IP $remote_addr; 75 | proxy_pass http://proxycas_backend; 76 | } 77 | 78 | # CORS geoserver 79 | location ~ ^/geoserver/(|.*/)(wms|wfs|ows|wcs)$ { 80 | add_header "Access-Control-Allow-Origin" "*"; 81 | add_header "Access-Control-Allow-Headers" "Origin, X-Requested-With, Content-Type, Accept"; 82 | proxy_set_header Host $host; 83 | proxy_set_header X-Real-IP $remote_addr; 84 | proxy_pass http://proxycas_backend; 85 | } 86 | 87 | {% if datahub.enabled %} 88 | # redirect default to datahub 89 | rewrite ^/$ /datahub/home/search redirect; 90 | {% endif %} 91 | 92 | {%if gn_cloud_searching.enabled %} 93 | # rss 94 | location ~ ^/geonetwork/srv/.../rss.search { 95 | add_header Content-Type "application/rss+xml; charset=utf-8"; 96 | proxy_set_header Accept-Encoding ""; 97 | rewrite ^.*$ /portal/api/search/records/rss.search?f=rss? break; 98 | proxy_pass http://localhost:{{ gn_cloud_searching.port }}; 99 | } 100 | {% endif %} 101 | 102 | {% if gn_ogc_api_records.enabled %} 103 | location /ogc-api-records { 104 | proxy_set_header Host $host; 105 | {% if gn_ogc_api_records.timeout is defined %} 106 | proxy_read_timeout {{ gn_ogc_api_records.timeout }}; 107 | proxy_send_timeout {{ gn_ogc_api_records.timeout }}; 108 | {% endif %} 109 | proxy_pass http://localhost:{{ gn_ogc_api_records.port }}/ogc-api-records; 110 | } 111 | {% endif %} 112 | 113 | {% if datahub.enabled %} 114 | location /datahub { 115 | try_files $uri $uri/ /datahub/index.html; 116 | add_header Cache-Control 'max-age=86400'; # 24h 117 | # catch login requests that don't reach the sec-proxy, and redirect to CAS 118 | if ( $args ~ '\blogin[=&]?' ) { 119 | return 301 /cas/login?service=https%3A%2F%2F{{ georchestra.fqdn }}%2Flogin%2Fcas; 120 | } 121 | location ~ /index.html|.*\.toml|.*\.json$ { 122 | expires -1; 123 | add_header Cache-Control 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0'; 124 | } 125 | location ~ .*\.css$|.*\.js$ { 126 | add_header Cache-Control 'max-age=86400'; # 24h 127 | } 128 | location ~ /datahub/assets/img/(.*).(webp|png|svg) { 129 | alias /etc/georchestra/datahub/assets/img/$1.$2; 130 | } 131 | location = /datahub/assets/favicon.ico { 132 | alias /etc/georchestra/datahub/assets/favicon.ico; 133 | } 134 | location = /datahub/assets/configuration/default.toml { 135 | alias /etc/georchestra/datahub/conf/default.toml; 136 | } 137 | } 138 | {% endif %} 139 | {% if mdeditor.enabled %} 140 | 141 | location /metadata-editor { 142 | try_files $uri $uri/ /metadata-editor/index.html; 143 | add_header Cache-Control 'max-age=86400'; # 24h 144 | location ~ /index.html|.*\.toml|.*\.json$ { 145 | expires -1; 146 | add_header Cache-Control 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0'; 147 | } 148 | } 149 | {% endif %} 150 | } 151 | -------------------------------------------------------------------------------- /roles/openldap/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart slapd 3 | service: 4 | name: slapd 5 | state: restarted 6 | -------------------------------------------------------------------------------- /roles/openldap/tasks/clean.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create temporary directory 3 | check_mode: false 4 | shell: mktemp -d 5 | register: tempdir 6 | 7 | - name: temporarly store cleartext admin rootpw 8 | copy: 9 | dest: "{{ tempdir.stdout }}/admin-georchestra.pass" 10 | content: "{{ openldap.rootpw }}" 11 | mode: "0400" 12 | 13 | - name: purge DIT 14 | ignore_errors: true 15 | command: ldapdelete -D {{ openldap.rootdn }} -x -y {{ tempdir.stdout }}/admin-georchestra.pass -r "{{ openldap.basedn }}" 16 | 17 | - name: recursively purge temp dir 18 | file: 19 | dest: "{{ tempdir.stdout }}" 20 | state: absent 21 | 22 | - name: remove cn=config bits 23 | file: 24 | dest: /etc/ldap/slapd.d/cn=config/{{ item }} 25 | state: absent 26 | with_items: 27 | - olcDatabase={2}mdb.ldif 28 | - cn=schema/cn={4}groupofmembers.ldif 29 | - cn=schema/cn={5}georchestra.ldif 30 | - cn=schema/cn={6}openssh-lpk-openldap.ldif 31 | notify: restart slapd 32 | 33 | - name: remove slapd datadir 34 | file: 35 | dest: /var/lib/ldap/{{ item }} 36 | state: absent 37 | with_items: 38 | - data.mdb 39 | - lock.mdb 40 | when: cleanup_slapd is defined 41 | -------------------------------------------------------------------------------- /roles/openldap/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: installing dependencies 3 | apt: 4 | pkg: [git, slapd, ldap-utils] 5 | state: present 6 | update_cache: true 7 | 8 | - name: create temporary directory 9 | check_mode: false 10 | shell: mktemp -d 11 | register: tempdir 12 | 13 | - name: fetch LDAP files 14 | check_mode: false 15 | get_url: 16 | dest: "{{ tempdir.stdout }}/{{ item | basename }}.ldif" 17 | url: "{{ openldap.gitrepo }}/{{ openldap.gitversion }}/ldap/{{ item }}.ldif" 18 | validate_certs: false 19 | with_items: "{{ openldap.ldifs }}" 20 | 21 | - name: fix ldif to match our layout/rootdn/basedn 22 | replace: 23 | dest: "{{ tempdir.stdout }}/{{ item.1 | basename }}.ldif" 24 | regexp: "{{ item.0.pat }}" 25 | replace: "{{ item.0.val }}" 26 | backup: true 27 | with_nested: 28 | - - { pat: "olcDatabase=\\{1\\}mdb", val: "olcDatabase={2}mdb" } 29 | - { pat: "cn=admin,dc=georchestra,dc=org", val: "{{ openldap.rootdn }}" } 30 | - { pat: "dc=georchestra,dc=org", val: "{{ openldap.basedn }}" } 31 | - { pat: "dc: georchestra", val: "dc: {{ openldap.topdc }}" } 32 | - { pat: "olcRootPW: secret", val: "olcRootPW: {{ openldap.rootpw }}" } 33 | - { pat: geoserver_privileged_user, val: "{{ geoserver.privileged.user }}" } 34 | - { pat: "userPassword:: e1NIQX1XMlY4d2UrOFdNanpma28rMUtZVDFZcWZFVDQ9", val: "userPassword: {{ geoserver.privileged.pass }}" } 35 | - "{{ openldap.ldifs }}" 36 | 37 | - name: bootstrap the db 38 | command: > 39 | ldapadd -Y EXTERNAL -H ldapi:/// -f "{{ tempdir.stdout }}/bootstrap.ldif" 40 | args: 41 | creates: /etc/ldap/slapd.d/cn=config/olcDatabase={2}mdb.ldif 42 | 43 | - name: import groupofMembers 44 | command: > 45 | ldapadd -Y EXTERNAL -H ldapi:/// -f "{{ tempdir.stdout }}/groupofmembers.ldif" 46 | args: 47 | creates: /etc/ldap/slapd.d/cn=config/cn=schema/cn={4}groupofmembers.ldif 48 | 49 | - name: import georchestraSchema 50 | command: > 51 | ldapadd -Y EXTERNAL -H ldapi:/// -f "{{ tempdir.stdout }}/georchestraSchema.ldif" 52 | args: 53 | creates: /etc/ldap/slapd.d/cn=config/cn=schema/cn={5}georchestra.ldif 54 | 55 | - name: import openssh 56 | command: > 57 | ldapadd -Y EXTERNAL -H ldapi:/// -f "{{ tempdir.stdout }}/openssh.ldif" 58 | args: 59 | creates: /etc/ldap/slapd.d/cn=config/cn=schema/cn={6}openssh-lpk-openldap.ldif 60 | 61 | # loaded by default in debian's slapd ? 62 | - name: import memberof 63 | command: > 64 | ldapadd -Y EXTERNAL -H ldapi:/// -f "{{ tempdir.stdout }}/memberof.ldif" 65 | args: 66 | creates: /etc/ldap/slapd.d/cn=config/olcDatabase={2}mdb/olcOverlay={0}memberof.ldif 67 | 68 | - name: import lastbind 69 | command: > 70 | ldapadd -Y EXTERNAL -H ldapi:/// -f "{{ tempdir.stdout }}/lastbind.ldif" 71 | args: 72 | creates: /etc/ldap/slapd.d/cn=config/olcDatabase={2}mdb/olcOverlay={1}lastbind.ldif 73 | 74 | - name: temporarly store cleartext admin rootpw 75 | copy: 76 | dest: "{{ tempdir.stdout }}/admin-georchestra.pass" 77 | content: "{{ openldap.rootpw }}" 78 | mode: "0400" 79 | 80 | - name: check if the root already exists 81 | command: ldapsearch -x -b {{ openldap.basedn }} dc={{ openldap.topdc }} 82 | ignore_errors: true 83 | register: topdc 84 | 85 | - name: create root dn 86 | command: > 87 | ldapadd -D {{ openldap.rootdn }} -x -y {{ tempdir.stdout }}/admin-georchestra.pass -f "{{ tempdir.stdout }}/root.ldif" 88 | when: "topdc.stdout is defined and '# numEntries: 1' not in topdc.stdout" 89 | 90 | - name: check if the testadmin user already exists 91 | command: ldapsearch -x -b {{ openldap.basedn }} uid=testadmin 92 | ignore_errors: true 93 | register: testadmindn 94 | 95 | - name: create tree and users 96 | command: > 97 | ldapadd -D {{ openldap.rootdn }} -x -y {{ tempdir.stdout }}/admin-georchestra.pass -f "{{ tempdir.stdout }}/georchestra.ldif" 98 | when: "testadmindn.stdout is defined and '# numEntries: 1' not in testadmindn.stdout" 99 | 100 | - name: check if ou=groups exists 101 | command: ldapsearch -x -b {{ openldap.basedn }} ou=groups 102 | ignore_errors: true 103 | check_mode: false 104 | register: groupsou 105 | 106 | - import_tasks: migrate_groups.yml 107 | when: "'# numEntries: 1' in groupsou.stdout" 108 | 109 | - name: recursively purge temp dir 110 | check_mode: false 111 | file: 112 | dest: "{{ tempdir.stdout }}" 113 | state: absent 114 | 115 | - import_tasks: clean.yml 116 | tags: [cleanup, openldap_cleanup] 117 | when: cleanup is defined 118 | -------------------------------------------------------------------------------- /roles/openldap/tasks/migrate_groups.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: rename ou=groups to ou=roles 3 | command: > 4 | ldapmodrdn -r -D {{ openldap.rootdn }} -x -y {{ tempdir.stdout }}/admin-georchestra.pass ou=groups,{{ openldap.basedn }} ou=roles 5 | 6 | - name: create ldif for orgs 7 | copy: 8 | dest: "{{ tempdir.stdout }}/orgs.ldif" 9 | content: "dn: ou=orgs,{{ openldap.basedn }}\nobjectClass: organizationalUnit\nou: orgs" 10 | 11 | - name: create ou=orgs 12 | command: > 13 | ldapadd -vv -D {{ openldap.rootdn }} -x -y {{ tempdir.stdout }}/admin-georchestra.pass -f {{ tempdir.stdout }}/orgs.ldif 14 | 15 | - name: rename geonetwork roles 16 | command: > 17 | ldapmodrdn -r -D {{ openldap.rootdn }} -x -y {{ tempdir.stdout }}/admin-georchestra.pass cn={{ item.from }},ou=roles,{{ openldap.basedn }} cn={{ item.to }} 18 | with_items: 19 | - { from: SV_USER, to: USER } 20 | - { from: SV_EDITOR, to: GN_EDITOR } 21 | - { from: SV_REVIEWER, to: GN_REVIEWER } 22 | - { from: SV_ADMIN, to: GN_ADMIN } 23 | -------------------------------------------------------------------------------- /roles/postgresql/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart postgresql 3 | service: 4 | name: postgresql 5 | state: restarted 6 | -------------------------------------------------------------------------------- /roles/postgresql/tasks/cadastrapp.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create cadastrapp user 3 | become: true 4 | become_user: postgres 5 | postgresql_user: 6 | name: "{{ cadastrapp.db.user }}" 7 | password: "{{ cadastrapp.db.pass }}" 8 | 9 | - name: create cadastrapp main database 10 | become: true 11 | become_user: postgres 12 | postgresql_db: 13 | name: "{{ cadastrapp.db.name }}" 14 | owner: "{{ cadastrapp.db.user }}" 15 | template: template0 16 | encoding: UTF8 17 | 18 | - name: check if cadastrapp schema already exists 19 | become: true 20 | become_user: postgres 21 | command: psql -d {{ cadastrapp.db.name }} -t -c "\dn" 22 | register: loaded_schemas 23 | 24 | - name: create cadastrapp schema in cadastrapp db 25 | become: true 26 | become_user: postgres 27 | command: psql -d {{ cadastrapp.db.name }} -c 'CREATE SCHEMA {{ cadastrapp.db.schema }}' 28 | when: loaded_schemas.stdout is defined and cadastrapp.db.schema not in loaded_schemas.stdout 29 | 30 | - name: grant all privileges to cadastrapp user on its schema 31 | become: true 32 | become_user: postgres 33 | postgresql_privs: 34 | database: "{{ cadastrapp.db.name }}" 35 | privs: ALL 36 | type: schema 37 | roles: "{{ cadastrapp.db.user }}" 38 | objs: "{{ cadastrapp.db.schema }}" 39 | 40 | - name: list tables 41 | tags: runsql 42 | check_mode: false 43 | become: true 44 | become_user: postgres 45 | command: psql -d {{ cadastrapp.db.name }} -t -c "\dt {{ cadastrapp.db.schema }}.* " 46 | register: tables 47 | 48 | - name: list views 49 | tags: runsql 50 | check_mode: false 51 | become: true 52 | become_user: postgres 53 | command: psql -d {{ cadastrapp.db.name }} -t -c "\dm {{ cadastrapp.db.schema }}.*" 54 | register: views 55 | 56 | - name: list indexes 57 | tags: runsql 58 | check_mode: false 59 | become: true 60 | become_user: postgres 61 | command: psql -d {{ cadastrapp.db.name }} -t -c "\di {{ cadastrapp.db.schema }}.*" 62 | register: indexes 63 | 64 | - name: remove old sql scripts 65 | tags: fetchsql 66 | file: 67 | path: "{{ cadastrappsql.tmpdir }}" 68 | state: absent 69 | 70 | - name: create temp dir 71 | tags: fetchsql 72 | file: 73 | path: "{{ cadastrappsql.tmpdir }}" 74 | state: directory 75 | 76 | - name: get the sql scripts for views 77 | tags: fetchsql 78 | get_url: 79 | dest: "{{ cadastrappsql.tmpdir }}/{{ item.script }}.sql" 80 | url: https://raw.github.com/georchestra/cadastrapp/master/database/sql/vues/{{ item.script }}.sql 81 | # validate_certs: no 82 | with_items: "{{ cadastrappsql.views }}" 83 | 84 | - name: get the sql scripts for tables 85 | tags: fetchsql 86 | get_url: 87 | dest: "{{ cadastrappsql.tmpdir }}/{{ item.script }}.sql" 88 | url: https://raw.github.com/georchestra/cadastrapp/master/database/sql/tables/{{ item.script }}.sql 89 | # validate_certs: no 90 | with_items: "{{ cadastrappsql.tables }}" 91 | 92 | - name: get the sql scripts for indexes 93 | tags: fetchsql 94 | get_url: 95 | dest: "{{ cadastrappsql.tmpdir }}/{{ item.script }}.sql" 96 | url: https://raw.github.com/georchestra/cadastrapp/master/database/sql/indexes/{{ item.script }}.sql 97 | with_items: "{{ cadastrappsql.indexes }}" 98 | 99 | - name: fix the sql scripts 100 | tags: fixsql 101 | replace: 102 | dest: "{{ cadastrappsql.tmpdir }}/{{ item.1.script }}.sql" 103 | regexp: "{{ item.0.pat }}" 104 | replace: "{{ item.0.val }}" 105 | backup: true 106 | with_nested: 107 | - - { pat: "'CREATE OR REPLACE VIEW'", val: "'CREATE MATERIALIZED VIEW'" } 108 | - { pat: "#DBHost_qgis", val: "{{ cadastrapp.qgisdb.host }}" } 109 | - { pat: "#DBPort_qgis", val: "{{ cadastrapp.qgisdb.port }}" } 110 | - { pat: "#DBName_qgis", val: "{{ cadastrapp.qgisdb.name }}" } 111 | - { pat: "#DBUser_qgis", val: "{{ cadastrapp.qgisdb.user }}" } 112 | - { pat: "#DBpasswd_qgis", val: "{{ cadastrapp.qgisdb.pass }}" } 113 | - { pat: "#DBSchema_qgis", val: "{{ cadastrapp.qgisdb.schema }}" } 114 | - { pat: "#schema_cadastrapp", val: "{{ cadastrapp.db.schema }}" } 115 | - { pat: "#user_cadastrapp", val: "{{ cadastrapp.db.user }}" } 116 | - "{{ cadastrappsql.views }} + {{ cadastrappsql.tables }} + {{ cadastrappsql.indexes }}" 117 | 118 | - name: create tables 119 | tags: runsql 120 | become: true 121 | become_user: postgres 122 | command: psql -d {{ cadastrapp.db.name }} -f {{ cadastrappsql.tmpdir }}/{{ item.script }}.sql 123 | with_items: "{{ cadastrappsql.tables }}" 124 | when: tables.stdout is defined and item.script not in tables.stdout 125 | 126 | - name: create views 127 | tags: runsql 128 | become: true 129 | become_user: postgres 130 | command: psql -d {{ cadastrapp.db.name }} -v ON_ERROR_STOP=yes -f {{ cadastrappsql.tmpdir }}/{{ item.script }}.sql 131 | with_items: "{{ cadastrappsql.views }}" 132 | when: views.stdout is defined and item.view not in views.stdout 133 | 134 | - name: create indexes 135 | tags: runsql 136 | become: true 137 | become_user: postgres 138 | command: psql -d {{ cadastrapp.db.name }} -v ON_ERROR_STOP=yes -f {{ cadastrappsql.tmpdir }}/{{ item.script }}.sql 139 | with_items: "{{ cadastrappsql.indexes }}" 140 | when: indexes.stdout is defined and item.index not in indexes.stdout 141 | -------------------------------------------------------------------------------- /roles/postgresql/tasks/clean.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # instances need to be stopped so that the db can be removed 3 | - name: stop instances 4 | service: 5 | name: tomcat@{{ item }} 6 | enabled: false 7 | state: stopped 8 | with_items: "{{ tomcat_instances | list }}" 9 | 10 | - name: delete georchestra main database 11 | become: true 12 | become_user: postgres 13 | postgresql_db: 14 | name: "{{ item }}" 15 | state: absent 16 | with_items: 17 | - "{{ georchestra.db.name }}" 18 | 19 | - name: delete db users 20 | become: true 21 | become_user: postgres 22 | postgresql_user: 23 | name: "{{ item }}" 24 | state: absent 25 | with_items: 26 | - "{{ georchestra.db.user }}" 27 | -------------------------------------------------------------------------------- /roles/postgresql/tasks/geofence.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check if plpgsql is already loaded 3 | command: psql -d {{ georchestra.db.name }} -t -c "\dx" 4 | register: loaded_extensions 5 | 6 | - name: createlang plpgsql 7 | become: true 8 | become_user: postgres 9 | command: psql -d {{ georchestra.db.name }} -t -c "CREATE EXTENSION plpgsql;" 10 | when: loaded_extensions.stdout is defined and 'plpgsql' not in loaded_extensions.stdout 11 | 12 | - name: check if postgis is already loaded 13 | become: true 14 | become_user: postgres 15 | command: psql -d {{ georchestra.db.name }} -t -c "\dT" 16 | register: loaded_types 17 | 18 | - name: create postgis extension 19 | become: true 20 | become_user: postgres 21 | command: psql -d {{ georchestra.db.name }} -t -c "CREATE EXTENSION postgis;" 22 | when: loaded_types.stdout is defined and 'geometry' not in loaded_types.stdout 23 | 24 | - name: grant SELECT privilege to georchestra user on spatial_ref_sys table 25 | become: true 26 | become_user: postgres 27 | postgresql_privs: 28 | database: "{{ georchestra.db.name }}" 29 | privs: SELECT 30 | type: table 31 | roles: "{{ georchestra.db.user }}" 32 | objs: spatial_ref_sys 33 | 34 | - name: grant SELECT|INSERT|DELETE privileges to georchestra user on geometry_columns 35 | become: true 36 | become_user: postgres 37 | postgresql_privs: 38 | database: "{{ georchestra.db.name }}" 39 | privs: SELECT,INSERT,DELETE 40 | type: table 41 | roles: "{{ georchestra.db.user }}" 42 | objs: geometry_columns 43 | -------------------------------------------------------------------------------- /roles/postgresql/tasks/geonetwork.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check if geonetwork schema already exists 3 | command: psql -d {{ georchestra.db.name }} -t -c "\dn" 4 | register: loaded_schemas 5 | 6 | - name: create geonetwork schema in georchestra db 7 | command: psql -d {{ georchestra.db.name }} -c 'CREATE SCHEMA {{ geonetwork.db.schema }} AUTHORIZATION {{ georchestra.db.user }}' 8 | when: loaded_schemas.stdout is defined and geonetwork.db.schema not in loaded_schemas.stdout 9 | -------------------------------------------------------------------------------- /roles/postgresql/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: setting locale 3 | locale_gen: 4 | name: "{{ system_locale }}" 5 | state: present 6 | 7 | - name: install sudo 8 | apt: 9 | name: sudo 10 | state: present 11 | 12 | - name: installing dependencies 13 | apt: 14 | pkg: postgis 15 | state: present 16 | update_cache: true 17 | 18 | - name: install python-psycopg2 for ansible psql modules 19 | apt: 20 | name: python3-psycopg2 21 | state: present 22 | 23 | - name: create georchestra user 24 | become: true 25 | become_user: postgres 26 | postgresql_user: 27 | name: "{{ georchestra.db.user }}" 28 | password: "{{ georchestra.db.pass }}" 29 | encrypted: true 30 | 31 | - name: create georchestra main database 32 | become: true 33 | become_user: postgres 34 | postgresql_db: 35 | name: "{{ georchestra.db.name }}" 36 | owner: "{{ georchestra.db.user }}" 37 | template: template0 38 | encoding: UTF8 39 | 40 | - import_tasks: geonetwork.yml 41 | become: true 42 | become_user: postgres 43 | tags: postgresql_geonetwork 44 | 45 | - import_tasks: geofence.yml 46 | become: true 47 | become_user: postgres 48 | tags: postgresql_geofence 49 | 50 | - import_tasks: other_schemas.yml 51 | become: true 52 | become_user: postgres 53 | tags: postgresql_other_schemas 54 | 55 | - import_tasks: cadastrapp.yml 56 | tags: postgresql_cadastrapp 57 | when: cadastrapp.enabled 58 | 59 | - import_tasks: clean.yml 60 | tags: [cleanup, postgresql_cleanup] 61 | when: cleanup is defined 62 | -------------------------------------------------------------------------------- /roles/postgresql/tasks/other_schemas.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create temporary directory 3 | shell: mktemp -d 4 | check_mode: false 5 | register: tempdir 6 | 7 | - name: fetch db structure 8 | get_url: 9 | dest: "{{ tempdir.stdout }}/{{ item.0 }}.sql" 10 | url: "{{ item.1 }}" 11 | validate_certs: false 12 | with_together: 13 | - "{{ other_schemas }}" 14 | - "{{ other_schemas_urls }}" 15 | 16 | - name: check if schema already exists 17 | command: psql -d {{ georchestra.db.name }} -t -c "select * from pg_tables where schemaname='{{ item }}';" 18 | register: schema_exists 19 | with_items: "{{ other_schemas }}" 20 | 21 | - name: create missing schemas 22 | command: psql -d {{ georchestra.db.name }} -f {{ tempdir.stdout }}/{{ item.item }}.sql 23 | when: item.stdout is defined and item.item not in item.stdout 24 | with_items: "{{ schema_exists.results }}" 25 | 26 | - name: check if console.delegations exists 27 | check_mode: false 28 | command: psql -d {{ georchestra.db.name }} -t -c "\dt console.delegations" 29 | register: delegations_exist 30 | 31 | - name: fetch console data 32 | get_url: 33 | url: https://raw.github.com/georchestra/georchestra/master/postgresql/docker-entrypoint-initdb.d/041-console-data.sql 34 | dest: "{{ tempdir.stdout }}/041-console-data.sql" 35 | when: delegations_exist.stdout is defined and 'delegations' not in delegations_exist.stdout 36 | 37 | - name: remove docker-only lines 38 | lineinfile: 39 | dest: "{{ tempdir.stdout }}/041-console-data.sql" 40 | regexp: .*docker.* 41 | state: absent 42 | when: delegations_exist.stdout is defined and 'delegations' not in delegations_exist.stdout 43 | 44 | - name: create console example data 45 | command: psql -d {{ georchestra.db.name }} -f {{ tempdir.stdout }}/041-console-data.sql 46 | when: delegations_exist.stdout is defined and 'delegations' not in delegations_exist.stdout 47 | 48 | - name: recursively purge temp dir 49 | check_mode: false 50 | file: 51 | dest: "{{ tempdir.stdout }}" 52 | state: absent 53 | 54 | - name: grant all privileges to georchestra user on schema 55 | postgresql_privs: 56 | database: "{{ georchestra.db.name }}" 57 | privs: ALL 58 | type: schema 59 | roles: "{{ georchestra.db.user }}" 60 | objs: "{{ other_schemas | join(',') }}" 61 | 62 | - name: grant all privileges to georchestra user on sequences in schema 63 | postgresql_privs: 64 | database: "{{ georchestra.db.name }}" 65 | privs: ALL 66 | type: sequence 67 | schema: "{{ item }}" 68 | roles: "{{ georchestra.db.user }}" 69 | objs: ALL_IN_SCHEMA 70 | with_items: "{{ other_schemas }}" 71 | 72 | - name: grant all privileges to georchestra user on tables in schema 73 | postgresql_privs: 74 | database: "{{ georchestra.db.name }}" 75 | privs: ALL 76 | type: table 77 | schema: "{{ item }}" 78 | roles: "{{ georchestra.db.user }}" 79 | objs: ALL_IN_SCHEMA 80 | with_items: "{{ other_schemas }}" 81 | -------------------------------------------------------------------------------- /roles/postgresql/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | other_schemas: [console, geofence, ogcstatistics, mapstore, datafeeder] 3 | other_schemas_urls: 4 | - https://raw.github.com/georchestra/georchestra/master/postgresql/docker-entrypoint-initdb.d/040-console.sql 5 | - https://raw.github.com/georchestra/georchestra/master/postgresql/docker-entrypoint-initdb.d/050-ogc-server-statistics.sql 6 | - https://raw.github.com/georchestra/georchestra/master/postgresql/docker-entrypoint-initdb.d/080-geofence.sql 7 | - https://raw.github.com/georchestra/georchestra/master/postgresql/docker-entrypoint-initdb.d/110-mapstore.sql 8 | - https://raw.github.com/georchestra/georchestra/master/postgresql/docker-entrypoint-initdb.d/120-datafeeder.sql 9 | cadastrappsql: 10 | tmpdir: /tmp/cadastrapp/sql 11 | views: 12 | - { script: uf_parcelle, view: uf_parcelle } 13 | - { script: Commune, view: commune } 14 | - { script: HabitationDetails, view: deschabitation } 15 | - { script: Parcelle, view: parcelle } 16 | - { script: Proprietaire, view: proprietaire } 17 | - { script: ProprietaireParcelle, view: proprietaire_parcelle } 18 | - { script: CoProprieteParcelle, view: co_propriete_parcelle } 19 | - { script: ProprieteBatie, view: proprietebatie } 20 | - { script: ProprieteNonBatie, view: proprietenonbatie } 21 | - { script: ProprieteNonBatieSufExo, view: proprietenonbatiesufexo } 22 | - { script: Lot, view: lot } 23 | - { script: Section, view: section } 24 | tables: 25 | - { script: groupe_autorisation } 26 | - { script: request_information } 27 | - { script: prop_bati_detent } 28 | - { script: prop_ccodem } 29 | - { script: prop_ccodro } 30 | - { script: prop_ccoqua } 31 | - { script: prop_ccogrm } 32 | - { script: prop_dmatgm } 33 | - { script: prop_dmatto } 34 | - { script: prop_dnatpr } 35 | - { script: prop_type_filiation } 36 | - { script: prop_ccocac } 37 | - { script: prop_ccocac_simple } 38 | indexes: [{ script: indexes_vues_materialisees, index: idxcommunelibcom }] 39 | -------------------------------------------------------------------------------- /roles/superset/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart superset 3 | ansible.builtin.service: 4 | name: superset 5 | state: restarted 6 | 7 | - name: Restart superset celery worker 8 | ansible.builtin.service: 9 | name: superset-celery-worker 10 | state: restarted 11 | -------------------------------------------------------------------------------- /roles/superset/tasks/database.yml: -------------------------------------------------------------------------------- 1 | - name: Create database schema 2 | community.postgresql.postgresql_schema: 3 | name: superset 4 | owner: "{{ georchestra.db.user }}" 5 | database: "{{ georchestra.db.name }}" 6 | 7 | - name: List existing database tables 8 | command: psql -d {{ georchestra.db.name }} -t -c "select * from pg_tables where schemaname='superset';" 9 | register: sset_table_exists 10 | 11 | - name: Initialize superset database schema 12 | command: /srv/apps/superset/venv/bin/superset db upgrade 13 | environment: 14 | PYTHONPATH: /srv/apps/superset 15 | FLASK_APP: superset 16 | SUPERSET_CONFIG_PATH: /srv/apps/superset/superset_georchestra_config.py 17 | when: sset_table_exists.stdout is defined and 'ab_user' not in sset_table_exists.stdout 18 | 19 | - name: List existing superset roles 20 | command: psql -d {{ georchestra.db.name }} -t -c "select * from superset.ab_role;" 21 | register: sset_role_exists 22 | 23 | - name: Initialize superset database content 24 | command: /srv/apps/superset/venv/bin/superset init 25 | environment: 26 | PYTHONPATH: /srv/apps/superset 27 | FLASK_APP: superset 28 | SUPERSET_CONFIG_PATH: /srv/apps/superset/superset_georchestra_config.py 29 | when: sset_role_exists.stdout is defined and 'Alpha' not in sset_role_exists.stdout 30 | -------------------------------------------------------------------------------- /roles/superset/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install debian dependencies 3 | tags: superset/apt 4 | ansible.builtin.apt: 5 | name: 6 | - build-essential 7 | - libssl-dev 8 | - libffi-dev 9 | - libsasl2-dev 10 | - libldap2-dev 11 | - libecpg-dev 12 | - python3-dev 13 | - python3-pip 14 | - python3-pil 15 | - python3-gevent 16 | - python3-psycopg2 17 | - python3-virtualenv 18 | 19 | - name: Install redis 20 | tags: superset/apt 21 | ansible.builtin.apt: 22 | name: redis-server 23 | 24 | - name: Install celery 25 | tags: superset/apt 26 | ansible.builtin.apt: 27 | name: celery 28 | 29 | - name: Install superset via pip 30 | tags: superset/pip 31 | ansible.builtin.pip: 32 | name: git+https://github.com/georchestra/superset-core@72e0aadc50#egg=apache-superset 33 | virtualenv: /srv/apps/superset/venv 34 | virtualenv_site_packages: true 35 | 36 | - name: Fetch/Unarchive precompiled assets 37 | tags: superset/assets 38 | ansible.builtin.unarchive: 39 | src: https://packages.georchestra.org/bot/wars/superset/superset-{{ item }}-72e0aadc50.tgz 40 | dest: /srv/apps/superset/venv/lib/python3.11/site-packages/superset/ 41 | remote_src: true 42 | with_items: [translations, assets] 43 | notify: 44 | - Restart superset 45 | - Restart superset celery worker 46 | 47 | - name: Create a georchestra group 48 | tags: superset/user 49 | ansible.builtin.group: 50 | name: georchestra 51 | 52 | - name: Create user for superset 53 | tags: superset/user 54 | ansible.builtin.user: 55 | name: superset 56 | group: georchestra 57 | 58 | - name: Create superset-owned subdirs 59 | tags: superset/dirs 60 | ansible.builtin.file: 61 | path: "{{ item }}" 62 | state: directory 63 | owner: superset 64 | group: georchestra 65 | with_items: 66 | - "{{ logs_basedir }}/superset" 67 | - /srv/data/superset 68 | 69 | - name: Deploy config files from georchestra/superset 70 | tags: superset/config 71 | ansible.builtin.get_url: 72 | url: https://raw.githubusercontent.com/georchestra/superset/5e5dad53a/config/superset/{{ item }}.py 73 | dest: /srv/apps/superset/{{ item }}.py 74 | with_items: 75 | - GeorchestraCustomizations 76 | - LocalizationFr 77 | - superset_georchestra_config 78 | notify: 79 | - Restart superset 80 | - Restart superset celery worker 81 | 82 | - name: Deploy our configfiles 83 | tags: superset/config 84 | ansible.builtin.template: 85 | owner: superset 86 | group: postgres 87 | mode: "0440" 88 | src: "{{ item }}.py.j2" 89 | dest: /srv/apps/superset/{{ item }}.py 90 | with_items: 91 | - Preconfig 92 | - Overrides 93 | notify: 94 | - Restart superset 95 | - Restart superset celery worker 96 | 97 | - name: Initialize database 98 | tags: superset/db 99 | become: true 100 | become_user: postgres 101 | ansible.builtin.import_tasks: database.yml 102 | 103 | - name: Template celery systemd unit 104 | tags: superset/systemd 105 | ansible.builtin.template: 106 | src: celery-worker.service.j2 107 | dest: /etc/systemd/system/superset-celery-worker.service 108 | notify: 109 | - Restart superset celery worker 110 | 111 | - name: Template superset systemd unit 112 | tags: superset/systemd 113 | ansible.builtin.template: 114 | src: superset.service.j2 115 | dest: /etc/systemd/system/superset.service 116 | notify: 117 | - Restart superset 118 | 119 | - name: Reload systemd 120 | tags: superset/systemd 121 | ansible.builtin.systemd: 122 | enabled: true 123 | daemon-reload: true 124 | name: superset.service 125 | 126 | - name: Start celery worker 127 | tags: superset/systemd 128 | ansible.builtin.service: 129 | name: superset-celery-worker 130 | enabled: yes 131 | state: started 132 | 133 | - name: Start superset 134 | tags: superset/systemd 135 | ansible.builtin.service: 136 | name: superset 137 | enabled: yes 138 | state: started 139 | -------------------------------------------------------------------------------- /roles/superset/templates/Overrides.py.j2: -------------------------------------------------------------------------------- 1 | # create superset-specific tables in the superset schema 2 | SQLALCHEMY_DATABASE_URI='postgresql://{{ georchestra.db.user }}:{{ georchestra.db.pass }}@localhost/{{ georchestra.db.name }}?options=-c%20search_path=superset' 3 | SECRET_KEY="{{ superset.secretkey }}" 4 | # upload folders, owned by superset user 5 | UPLOAD_FOLDER = "/srv/data/superset/uploads/" 6 | IMG_UPLOAD_FOLDER = "/srv/data/superset/uploads/" 7 | 8 | FAVICONS=[{"href": "https://{{ georchestra.fqdn }}/favicon.ico"}] 9 | 10 | # take the header config from the datadir defaults 11 | GEORCHESTRA_PROPERTIES_FILE_PATH="/etc/georchestra/default.properties" 12 | 13 | #overrides the one in superset_georchestra_config.py 14 | SUPERSET_WEBSERVER_PORT = {{ superset.port }} 15 | 16 | # for cas 17 | LOGIN_REDIRECT_URL="/cas/login?service=" 18 | 19 | # celery worker config 20 | class CeleryConfig(object): 21 | broker_url = "redis://localhost:6379/0" 22 | imports = ( 23 | "superset.sql_lab", 24 | "superset.tasks.scheduler", 25 | "superset.tasks.thumbnails", 26 | "superset.tasks.cache", 27 | ) 28 | result_backend = "redis://localhost:6379/0" 29 | worker_prefetch_multiplier = 10 30 | task_acks_late = True 31 | task_annotations = { 32 | "sql_lab.get_sql_results": { 33 | "rate_limit": "100/s", 34 | }, 35 | } 36 | 37 | CELERY_CONFIG = CeleryConfig 38 | # results stored in Redis 39 | from flask_caching.backends.rediscache import RedisCache 40 | RESULTS_BACKEND = RedisCache( 41 | host='localhost', port=6379, key_prefix='superset_results') 42 | -------------------------------------------------------------------------------- /roles/superset/templates/Preconfig.py.j2: -------------------------------------------------------------------------------- 1 | REDIS_BASE_URL="redis://localhost:6379" 2 | -------------------------------------------------------------------------------- /roles/superset/templates/celery-worker.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=celery worker for superset 3 | After=network.target 4 | 5 | [Service] 6 | User=superset 7 | Environment="LOG_LEVEL=INFO" 8 | Environment="SUPERSET_CONFIG_PATH=superset_georchestra_config.py" 9 | Environment="PYTHONPATH=/srv/apps/superset/venv/lib/python3.11/site-packages/" 10 | WorkingDirectory=/srv/apps/superset/ 11 | ExecStart=celery --app=superset.tasks.celery_app:app worker -O fair -c 1 -B -s /srv/data/superset/celery-beat-scheduler 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /roles/superset/templates/superset.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=superset 3 | After=network.target 4 | 5 | [Service] 6 | User=superset 7 | Environment="LOG_LEVEL=INFO" 8 | Environment="SUPERSET_CONFIG_PATH=superset_georchestra_config.py" 9 | WorkingDirectory=/srv/apps/superset/ 10 | ExecStart=/srv/apps/superset/venv/bin/gunicorn \ 11 | -w 10 \ 12 | -k gevent \ 13 | --worker-connections 1000 \ 14 | --timeout 120 \ 15 | --limit-request-line 0 \ 16 | --limit-request-field_size 0 \ 17 | -b 127.0.0.1:{{ superset.port}} \ 18 | --access-logfile {{ logs_basedir }}/superset/access.log \ 19 | --log-level info \ 20 | --error-logfile {{ logs_basedir }}/superset/error.log \ 21 | "superset.app:create_app(superset_app_root='/{{ superset.urlprefix }}')" 22 | 23 | [Install] 24 | WantedBy=multi-user.target 25 | -------------------------------------------------------------------------------- /roles/tomcat/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | allow_duplicates: true 3 | -------------------------------------------------------------------------------- /roles/tomcat/tasks/cadastrapp.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add the ResourceLink for jdbc/cadastrapp 3 | lineinfile: 4 | dest: "{{ tomcat_basedir }}/georchestra/conf/context.xml" 5 | insertbefore: 6 | line: 7 | state: present 8 | 9 | - name: symlink postgresql.jar for jdbc/cadastrapp 10 | file: 11 | src: "{{ tomcat_basedir }}/georchestra/webapps/cadastrapp/WEB-INF/lib/postgresql-42.3.3.jar" 12 | dest: /usr/share/tomcat{{ tomcat_version }}/lib/postgresql-42.3.3.jar 13 | state: link 14 | -------------------------------------------------------------------------------- /roles/tomcat/tasks/clean.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: stop instances 3 | service: 4 | name: tomcat@{{ item }} 5 | enabled: false 6 | state: stopped 7 | with_items: "{{ tomcat_instances | list }}" 8 | 9 | - name: remove tomcat directories/config 10 | file: 11 | name: "{{ item }}" 12 | state: absent 13 | with_items: 14 | - "{{ tomcat_basedir }}" 15 | - "{{ logs_basedir }}" 16 | - /etc/tomcat{{ tomcat_version }}/keystore 17 | 18 | # - name: removing pkgs 19 | # apt: 20 | # pkg: "{{ item }}" 21 | # state: absent 22 | # purge: yes 23 | # with_items: 24 | # - tomcat{{ tomcat_version }} 25 | # - tomcat{{ tomcat_version }}-user 26 | 27 | - name: remove init scripts & config files 28 | file: 29 | name: "{{ item[0] }}-{{ item[1] }}" 30 | state: absent 31 | with_nested: 32 | - - /etc/init.d/tomcat 33 | - /etc/default/tomcat 34 | - /etc/rc2.d/K01tomcat 35 | - /etc/rc3.d/K01tomcat 36 | - /etc/rc4.d/K01tomcat 37 | - /etc/rc5.d/K01tomcat 38 | - /etc/rc0.d/K01tomcat 39 | - /etc/rc1.d/K01tomcat 40 | - /etc/rc6.d/K01tomcat 41 | - "{{ tomcat_instances | list }}" 42 | -------------------------------------------------------------------------------- /roles/tomcat/tasks/common.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ensure GPG is installed 3 | apt: 4 | name: gpg 5 | state: present 6 | 7 | - name: setup wtf lts repokey for tomcat9 (deb822 only available in ansible 2.15) 8 | tags: tomcat_apt_repo 9 | get_url: 10 | url: http://www.mirbsd.org/~tg/Debs/sources.txt/wtflts-bookworm.sources 11 | dest: /etc/apt/sources.list.d/wtflts-bookworm.sources 12 | when: tomcat_version == 9 13 | 14 | - name: installing dependencies 15 | apt: 16 | pkg: 17 | - tomcat{{ tomcat_version }} 18 | - tomcat{{ tomcat_version }}-user # needed for create-instance script 19 | - tomcat-jakartaee-migration 20 | - libtcnative-1 # APR native libs 21 | - unzip # for unzipping artifacts from github actions 22 | state: present 23 | update_cache: true 24 | - name: disable default instance 25 | service: 26 | name: tomcat{{ tomcat_version }} 27 | state: stopped 28 | enabled: false 29 | 30 | - name: link tomcat9 to 10 migration jar 31 | file: 32 | src: /usr/share/tomcat-jakartaee-migration/lib/jakartaee-migration-1.0.6-shaded.jar 33 | dest: /usr/share/tomcat10/lib/jakartaee-migration-1.0.6-shaded.jar 34 | state: link 35 | when: tomcat_version == 10 36 | 37 | - name: template tmpfiles.d for pidfile 38 | tags: systemd_unit 39 | template: 40 | src: tomcat.conf.j2 41 | dest: /etc/tmpfiles.d/tomcat.conf 42 | register: tomcat_piddir 43 | 44 | - name: add fqdn to /etc/hosts 45 | lineinfile: 46 | dest: /etc/hosts 47 | line: 127.0.0.1 {{ georchestra.fqdn }} 48 | 49 | - name: create temp dir 50 | tags: systemd_unit 51 | command: systemd-tmpfiles --prefix=/run/tomcat --create 52 | when: tomcat_piddir.changed 53 | 54 | - name: template systemd unit 55 | tags: systemd_unit 56 | template: 57 | src: tomcat.service.j2 58 | dest: /etc/systemd/system/tomcat@.service 59 | 60 | - name: reload systemd so that it finds the new unit 61 | tags: systemd_unit 62 | systemd: 63 | daemon-reload: true 64 | name: tomcat@.service 65 | 66 | - name: disable assistive_technologies (requires non-headless jdk) 67 | lineinfile: 68 | dest: /etc/java-8-openjdk/accessibility.properties 69 | line: assistive_technologies=org.GNOME.Accessibility.AtkWrapper 70 | state: absent 71 | 72 | - name: create tomcat_basedir 73 | file: 74 | name: "{{ tomcat_basedir }}" 75 | state: directory 76 | 77 | - name: create common logdir 78 | file: 79 | path: "{{ logs_basedir }}" 80 | state: directory 81 | owner: tomcat 82 | group: tomcat 83 | -------------------------------------------------------------------------------- /roles/tomcat/tasks/instance.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create instance {{ item.key }} 3 | command: > 4 | tomcat{{ tomcat_version }}-instance-create -p {{ item.value.port }} -c {{ item.value.control_port }} {{ tomcat_basedir }}/{{ item.key }} 5 | args: 6 | creates: "{{ tomcat_basedir }}/{{ item.key }}" 7 | with_dict: "{{ tomcat_instances }}" 8 | 9 | - name: symlink policy.d dir 10 | file: 11 | src: /etc/tomcat{{ tomcat_version }}/policy.d/ 12 | dest: "{{ tomcat_basedir }}/{{ item.key }}/conf/policy.d" 13 | state: link 14 | with_dict: "{{ tomcat_instances }}" 15 | 16 | - name: recursively fix dirs ownership 17 | file: 18 | name: "{{ tomcat_basedir }}/{{ item.0 }}/{{ item.1 }}" 19 | state: directory 20 | recurse: true 21 | owner: tomcat 22 | with_nested: 23 | - "{{ tomcat_instances | list }}" 24 | - [logs, temp, conf, work, webapps] 25 | 26 | - name: install instance config in /etc/default 27 | tags: tomcat_config 28 | template: 29 | src: config-{{ item.key }}.j2 30 | dest: /etc/default/tomcat-{{ item.key }} 31 | with_dict: "{{ tomcat_instances }}" 32 | 33 | - name: install instance connector 34 | #tags: tomcat_config 35 | template: 36 | src: server-{{ item.key }}.xml.j2 37 | dest: "{{ tomcat_basedir }}/{{ item.key }}/conf/server.xml" 38 | with_dict: "{{ tomcat_instances }}" 39 | 40 | - name: add the jakarta loader for tomcat10 compat 41 | lineinfile: 42 | dest: "{{ tomcat_basedir }}/{{ item.key }}/conf/context.xml" 43 | insertafter: 44 | line: 45 | state: present 46 | with_dict: "{{ tomcat_instances }}" 47 | when: tomcat_version == 10 48 | 49 | - name: enable instance 50 | systemd: 51 | name: tomcat@{{ item.key }} 52 | enabled: true 53 | with_dict: "{{ tomcat_instances }}" 54 | -------------------------------------------------------------------------------- /roles/tomcat/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_tasks: common.yml 3 | - import_tasks: instance.yml 4 | - import_tasks: cadastrapp.yml 5 | when: cadastrapp.enabled 6 | - import_tasks: clean.yml 7 | tags: [cleanup, tomcat_cleanup] 8 | when: cleanup is defined 9 | -------------------------------------------------------------------------------- /roles/tomcat/templates/config-georchestra.j2: -------------------------------------------------------------------------------- 1 | JAVA_HOME=/usr/lib/jvm/{{ java_version }} 2 | JRE_HOME=/usr/lib/jvm/{{ java_version }} 3 | 4 | JAVA_OPTS="-Djava.awt.headless=true \ 5 | -Xms{{ tomcat_instances.georchestra.xms }} \ 6 | -Xmx{{ tomcat_instances.georchestra.xmx }} \ 7 | {% if "17" in java_version %} 8 | --add-exports=java.naming/com.sun.jndi.ldap=ALL-UNNAMED \ 9 | {% endif %} 10 | -Dorg.geotools.referencing.forceXY=true \ 11 | -Djava.util.prefs.userRoot=/tmp \ 12 | -Djava.util.prefs.systemRoot=/tmp \ 13 | -Dlog_dir={{ logs_basedir }} \ 14 | -Dgeorchestra.datadir={{ georchestra.datadir.path }} \ 15 | -Dgeonetwork.jeeves.configuration.overrides.file={{ georchestra.datadir.path }}/geonetwork/config/config-overrides-georchestra.xml" 16 | -------------------------------------------------------------------------------- /roles/tomcat/templates/config-geoserver.j2: -------------------------------------------------------------------------------- 1 | JAVA_HOME=/usr/lib/jvm/{{ java_version }} 2 | JRE_HOME=/usr/lib/jvm/{{ java_version }} 3 | 4 | JAVA_OPTS="-Djava.awt.headless=true \ 5 | -Xms{{ tomcat_instances.geoserver.xms }} \ 6 | -Xmx{{ tomcat_instances.geoserver.xmx }} \ 7 | {% if "17" in java_version %} 8 | --add-exports=java.naming/com.sun.jndi.ldap=ALL-UNNAMED \ 9 | {% endif %} 10 | -Dgeorchestra.datadir={{ georchestra.datadir.path }} \ 11 | -DGEOSERVER_DATA_DIR={{ geoserver.datadir.path }} \ 12 | -DGEOWEBCACHE_CACHE_DIR={{ geowebcache_datadir }} \ 13 | -DGEOSERVER_CSRF_WHITELIST={{ georchestra.fqdn }} \ 14 | -Dfile.encoding=UTF8 \ 15 | -Djavax.servlet.request.encoding=UTF-8 \ 16 | -Djavax.servlet.response.encoding=UTF-8 \ 17 | -Djava.library.path=/usr/lib/jni:/opt/libjpeg-turbo/lib64/ \ 18 | -server \ 19 | -XX:SoftRefLRUPolicyMSPerMB=36000 \ 20 | -XX:NewRatio=2" 21 | -------------------------------------------------------------------------------- /roles/tomcat/templates/config-proxycas.j2: -------------------------------------------------------------------------------- 1 | JAVA_HOME=/usr/lib/jvm/{{ java_version }} 2 | JRE_HOME=/usr/lib/jvm/{{ java_version }} 3 | 4 | JAVA_OPTS="-Djava.awt.headless=true \ 5 | -Xms{{ tomcat_instances.proxycas.xms }} \ 6 | -Xmx{{ tomcat_instances.proxycas.xmx }} \ 7 | {% if "17" in java_version %} 8 | --add-exports=java.naming/com.sun.jndi.ldap=ALL-UNNAMED \ 9 | {% endif %} 10 | {% if tomcat_version == 9 %} 11 | -DCAS_BANNER_SKIP=true \ 12 | -Dcas.standalone.configurationDirectory=/etc/georchestra/cas/config \ 13 | {% endif %} 14 | -Dgeorchestra.datadir={{ georchestra.datadir.path }}" 15 | -------------------------------------------------------------------------------- /roles/tomcat/templates/server-georchestra.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 18 | 22 | 23 | 24 | 25 | 26 | 29 | 30 | 31 | 32 | 33 | 34 | 37 | 38 | 41 | 46 | {% if cadastrapp.enabled %} 47 | 55 | {% endif %} 56 | 57 | 58 | 63 | 64 | 65 | 66 | 70 | 71 | 72 | 79 | 88 | 89 | 95 | 99 | 104 | 105 | 106 | 109 | 110 | 111 | 116 | 117 | 120 | 121 | 122 | 125 | 128 | 129 | 131 | 132 | 136 | 138 | 139 | 140 | 143 | 145 | 146 | 148 | 151 | 152 | 154 | 158 | 159 | 161 | 162 | 163 | 164 | 165 | 166 | -------------------------------------------------------------------------------- /roles/tomcat/templates/server-geoserver.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 18 | 22 | 23 | 24 | 25 | 26 | 29 | 30 | 31 | 32 | 33 | 34 | 37 | 38 | 41 | 46 | 47 | 48 | 53 | 54 | 55 | 56 | 60 | 61 | 62 | 69 | 73 | 74 | 80 | 84 | 89 | 90 | 91 | 94 | 95 | 96 | 101 | 102 | 105 | 106 | 107 | 110 | 113 | 114 | 117 | 120 | 121 | 123 | 124 | 128 | 130 | 131 | 132 | 135 | 137 | 138 | 140 | 143 | 144 | 146 | 150 | 151 | 153 | 154 | 155 | 156 | 157 | 158 | -------------------------------------------------------------------------------- /roles/tomcat/templates/server-proxycas.xml.j2: -------------------------------------------------------------------------------- 1 | 2 | 18 | 22 | 23 | 24 | 25 | 26 | 29 | 30 | 31 | 32 | 33 | 34 | 37 | 38 | 41 | 46 | 47 | 48 | 53 | 54 | 55 | 56 | 60 | 61 | 62 | 69 | 73 | 74 | 75 | 81 | 85 | 90 | 91 | 92 | 95 | 96 | 97 | 102 | 103 | 106 | 107 | 108 | 111 | 114 | 115 | 118 | 121 | 122 | 124 | 125 | 129 | 131 | 132 | 133 | 136 | 138 | 139 | 141 | 144 | 145 | 147 | 151 | 153 | 154 | 155 | 156 | 157 | 158 | -------------------------------------------------------------------------------- /roles/tomcat/templates/tomcat.conf.j2: -------------------------------------------------------------------------------- 1 | d /run/tomcat 0755 tomcat tomcat 2 | -------------------------------------------------------------------------------- /roles/tomcat/templates/tomcat.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Tomcat{{ tomcat_version }} - instance %i 3 | After=syslog.target network.target 4 | 5 | [Service] 6 | Type=forking 7 | 8 | User=tomcat 9 | Group=tomcat 10 | 11 | Environment=LANG={{ system_locale }} 12 | Environment=CATALINA_HOME=/usr/share/tomcat{{ tomcat_version }} 13 | Environment=CATALINA_TMPDIR=/srv/tomcat/%i/temp 14 | Environment=CATALINA_PID=/run/tomcat/%i.pid 15 | PIDFile=/run/tomcat/%i.pid 16 | 17 | EnvironmentFile=/etc/default/tomcat-%i 18 | PassEnvironment=JAVA_OPTS LANG 19 | 20 | ExecStart={{ tomcat_basedir }}/%i/bin/startup.sh 21 | ExecStop={{ tomcat_basedir }}/%i/bin/shutdown.sh 22 | 23 | SyslogIdentifier=tomcat{{ tomcat_version }}-%i 24 | Restart=on-failure 25 | 26 | [Install] 27 | WantedBy=multi-user.target 28 | -------------------------------------------------------------------------------- /spec/georchestra/georchestra_spec.rb: -------------------------------------------------------------------------------- 1 | require 'spec_helper' 2 | 3 | describe package('nginx') do 4 | it { should be_installed } 5 | end 6 | 7 | # Frontend webserver (apache2) 8 | describe port(80) do 9 | it { should be_listening } 10 | end 11 | 12 | describe port(443) do 13 | it { should be_listening } 14 | end 15 | 16 | # postgresql 17 | describe port(5432) do 18 | it { should be_listening } 19 | end 20 | 21 | # OpenLDAP / slapd 22 | describe port(389) do 23 | it { should be_listening } 24 | end 25 | 26 | # Elasticsearch 27 | describe port(9200) do 28 | it { should be_listening } 29 | end 30 | 31 | # Kibana 32 | describe port(5601) do 33 | it { should be_listening } 34 | end 35 | 36 | # tomcat-georchestra 37 | describe port(8280) do 38 | it { should be_listening } 39 | end 40 | 41 | # tomcat-geoserver 42 | describe port(8380) do 43 | it { should be_listening } 44 | end 45 | 46 | # tomcat-proxycas 47 | describe port(8180) do 48 | it { should be_listening } 49 | end 50 | 51 | # datafeeder 52 | describe port(8480) do 53 | it { should be_listening } 54 | end 55 | 56 | # gn-cloud-searching 57 | describe port(8580) do 58 | it { should be_listening } 59 | end 60 | 61 | # gn-ogc-api-records 62 | describe port(8880) do 63 | it { should be_listening } 64 | end 65 | 66 | # geOrchestra base debian packages should be present 67 | [ 'georchestra-analytics', 68 | 'georchestra-cas', 69 | 'georchestra-console', 70 | 'georchestra-datafeeder', 71 | 'georchestra-datafeeder-ui', 72 | 'georchestra-geoserver', 73 | 'georchestra-geowebcache', 74 | 'georchestra-header', 75 | 'georchestra-security-proxy', 76 | ].each do |pkg| 77 | describe package(pkg) do 78 | it { should be_installed } 79 | end 80 | end 81 | 82 | # geOrchestra datadir has been set up 83 | describe file('/etc/georchestra') do 84 | it { should be_directory } 85 | end 86 | -------------------------------------------------------------------------------- /spec/spec_helper.rb: -------------------------------------------------------------------------------- 1 | require 'serverspec' 2 | require 'net/ssh' 3 | require 'tempfile' 4 | 5 | set :backend, :ssh 6 | 7 | if ENV['ASK_SUDO_PASSWORD'] 8 | begin 9 | require 'highline/import' 10 | rescue LoadError 11 | fail "highline is not available. Try installing it." 12 | end 13 | set :sudo_password, ask("Enter sudo password: ") { |q| q.echo = false } 14 | else 15 | set :sudo_password, ENV['SUDO_PASSWORD'] 16 | end 17 | 18 | host = ENV['TARGET_HOST'] 19 | 20 | `vagrant up #{host}` 21 | 22 | config = Tempfile.new('', Dir.tmpdir) 23 | config.write(`vagrant ssh-config #{host}`) 24 | config.close 25 | 26 | options = Net::SSH::Config.for(host, [config.path]) 27 | 28 | options[:user] ||= Etc.getlogin 29 | 30 | set :host, options[:host_name] || host 31 | set :ssh_options, options 32 | 33 | # Disable sudo 34 | set :disable_sudo, true 35 | 36 | 37 | # Set environment variables 38 | # set :env, :LANG => 'C', :LC_MESSAGES => 'C' 39 | 40 | # Set PATH 41 | # set :path, '/sbin:/usr/local/sbin:$PATH' 42 | --------------------------------------------------------------------------------