├── .docker-compose-cache-from.yaml ├── .docker-compose-centralized.yaml ├── .docker-compose-db.yaml ├── .docker-compose-harvester_ng.yaml ├── .docker-compose.datagov-theme.yaml ├── .docker-compose.filenames-unicode.yaml.disabled ├── .docker-compose.honduras-theme.yaml ├── .docker-compose.panama-theme.yaml ├── .docker-compose.vital-strategies-philippines-theme.yaml ├── .docker-compose.vital-strategies-shandong-theme.yaml ├── .docker-compose.vital-strategies-shanghai-theme.yaml.disabled ├── .docker-compose.vital-strategies-theme.yaml ├── .github └── workflows │ └── main.yml ├── .gitignore ├── .travis-ci-operator.yaml ├── .travis.sh ├── .travis.yml ├── .travis_ci_operator_self_github_deploy_key.id_rsa.enc ├── CONTRIBUTING.md ├── DEPLOYING.md ├── LICENSE ├── Makefile ├── README.md ├── TESTING.md ├── TESTING.test-core.ini ├── cca-operator ├── Dockerfile ├── README.md ├── add-server-authorized-key.sh ├── cca-operator.py ├── cca-operator.sh ├── create-instance.sh ├── datastore-permissions.sql.template ├── delete-instance.sh ├── functions.sh ├── get-instance-values.sh ├── instance-connection-info.sh ├── instance-status.sh ├── kubectl.sh ├── list-instances.sh ├── recreate-instance.sh ├── server.sh ├── set-instance-values.sh ├── templater.sh └── update-instance.sh ├── cicd_functions.sh ├── ckan ├── Dockerfile ├── entrypoint.sh ├── overrides │ ├── datagov │ │ └── filesystem │ │ │ └── etc │ │ │ ├── crontab-harvester │ │ │ ├── patches │ │ │ └── ckan │ │ │ │ └── search-use-requests.patch │ │ │ └── supervisor │ │ │ ├── conf.d │ │ │ └── ckan_harvesting.conf │ │ │ └── supervisord.conf │ ├── honduras │ │ └── filesystem │ │ │ └── etc │ │ │ ├── crontab-harvester │ │ │ ├── patches │ │ │ └── ckan │ │ │ │ ├── datapusher_status_timestamp.patch │ │ │ │ ├── disable_streaming.patch │ │ │ │ ├── fix_graph_view.patch │ │ │ │ └── stats_disable_html5.patch │ │ │ └── supervisor │ │ │ ├── conf.d │ │ │ └── ckan_harvesting.conf │ │ │ └── supervisord.conf │ └── vital-strategies │ │ └── filesystem │ │ └── .init ├── post_install_functions.sh ├── requirements.txt ├── setup │ ├── supervisord.conf │ └── uwsgi.conf ├── templater.sh └── themer │ ├── Dockerfile │ └── themer.sh ├── configs_diff.sh ├── create_secrets.py ├── datapusher-plus ├── Dockerfile ├── datapusher-settings.py ├── entrypoint │ └── startup.sh └── example.env ├── datapusher ├── Dockerfile └── setup │ ├── datapusher_settings.py │ └── wsgi.py ├── db ├── Dockerfile ├── datastore-permissions-update.sh ├── datastore-permissions.sql.template ├── datastore-public-ro-cron.sh ├── datastore-public-ro-supervisord.conf ├── entrypoint.sh ├── init_ckan_db.sh ├── migration │ ├── ckan-permissions.sql │ ├── datastore-permissions.sql │ └── upgrade_databases.sh └── templater.sh ├── docker-compose.yaml ├── docker-compose ├── cca-operator │ ├── id_rsa │ └── id_rsa.pub ├── ckan-conf-templates │ ├── ckan_init.sh.template │ ├── datagov-theme-production.ini.template │ ├── honduras-theme-production.ini.template │ ├── panama-theme-production.ini.template │ ├── production.ini.template │ ├── vital-strategies-philippines-theme-production.ini.template │ ├── vital-strategies-shandong-theme-production.ini.template │ ├── vital-strategies-shanghai-theme-production.ini.template │ ├── vital-strategies-theme-ckan.ini.template │ └── who.ini ├── ckan-secrets.dat └── provisioning-api │ ├── README.md │ ├── private.pem │ └── public.pem ├── docs └── imgs │ ├── airflow-ready.png │ ├── ckan-ready.png │ ├── dags_ready.png │ ├── harvest-sources-empty.png │ └── new-harvest-source.png ├── jenkins ├── .dockerignore ├── Dockerfile ├── jobs │ ├── .gitignore │ ├── CKAN builds │ │ ├── config.xml │ │ └── jobs │ │ │ ├── custom CKAN build (local) │ │ │ └── config.xml │ │ │ └── custom CKAN build │ │ │ └── config.xml │ └── cluster administration │ │ ├── config.xml │ │ └── jobs │ │ ├── create-instance │ │ └── config.xml │ │ ├── debug │ │ └── config.xml │ │ ├── delete-instance │ │ └── config.xml │ │ ├── get instance values │ │ └── config.xml │ │ ├── instance-connection-info │ │ └── config.xml │ │ ├── list-instances │ │ └── config.xml │ │ ├── logs │ │ └── config.xml │ │ ├── recreate-instance │ │ └── config.xml │ │ └── update-instance │ │ └── config.xml └── scripts │ ├── build_ckan_custom.sh │ ├── create_instance.sh │ ├── delete_instance.sh │ ├── docker_compose_cca_operator.sh │ ├── get_instance_values.sh │ ├── kubectl.sh │ ├── list_instances.sh │ ├── recreate_instance.sh │ └── update_instance.sh ├── migrate_databases.sh ├── migrate_dbs_generic.sh ├── migrate_filestorage.sh ├── nginx ├── Dockerfile └── default.conf ├── solr ├── Dockerfile ├── basic-config │ ├── currency.xml │ ├── elevate.xml │ ├── protwords.txt │ ├── stopwords.txt │ └── synonyms.txt ├── schema-2.9-fix-unicode-filenames.xml ├── schemas │ ├── schema210.xml │ ├── schema26.xml │ ├── schema27.xml │ └── schema28.xml ├── solr.xml ├── solrcloud-entrypoint.sh ├── solrcloud.Dockerfile ├── solrconfig.xml └── zoo.cfg ├── start-harvester.md ├── traefik ├── acme.json ├── certs │ └── .gitkeep ├── entrypoint.sh ├── traefik.dev.toml ├── traefik.toml ├── traefik.toml.template └── traefik_custom_ssl.toml ├── travis_ci_operator.sh └── varnish └── default.vcl /.docker-compose-cache-from.yaml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | nginx: 5 | build: 6 | cache_from: 7 | - viderum/ckan-cloud-docker:nginx-latest 8 | 9 | db: 10 | build: 11 | cache_from: 12 | - viderum/ckan-cloud-docker:db-latest 13 | 14 | solr: 15 | build: 16 | cache_from: 17 | - viderum/ckan-cloud-docker:solr-latest 18 | 19 | ckan: 20 | build: 21 | cache_from: 22 | - viderum/ckan-cloud-docker:ckan-latest 23 | - viderum/ckan-cloud-docker:ckan-latest-datagov-theme 24 | 25 | jenkins: 26 | build: 27 | cache_from: 28 | - viderum/ckan-cloud-docker:jenkins-latest 29 | 30 | cca-operator: 31 | build: 32 | cache_from: 33 | - viderum/ckan-cloud-docker:cca-operator-latest 34 | -------------------------------------------------------------------------------- /.docker-compose-centralized.yaml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | ckan: 5 | environment: 6 | - INSTANCE_ID 7 | - CKAN_K8S_SECRETS=/etc/ckan-conf/secrets/secrets-centralized.sh 8 | volumes: 9 | - ./docker-compose/ckan-secrets.sh:/etc/ckan-conf/secrets/secrets-centralized.sh 10 | 11 | jobs: 12 | environment: 13 | - INSTANCE_ID 14 | - CKAN_K8S_SECRETS=/etc/ckan-conf/secrets/secrets-centralized.sh 15 | volumes: 16 | - ./docker-compose/ckan-secrets.sh:/etc/ckan-conf/secrets/secrets-centralized.sh 17 | 18 | solr: 19 | image: viderum/ckan-cloud-docker:solrcloud-latest 20 | build: 21 | context: solr 22 | dockerfile: solrcloud.Dockerfile 23 | args: 24 | SCHEMA_XML: ${SCHEMA_XML:-schemas/schema28.xml} 25 | entrypoint: [docker-entrypoint.sh, solr, start, -c, -f] 26 | expose: 27 | - "8983" 28 | -------------------------------------------------------------------------------- /.docker-compose-db.yaml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | db: 5 | image: viderum/ckan-cloud-docker:db-latest 6 | build: 7 | context: db 8 | restart: always 9 | expose: 10 | - "5432" 11 | env_file: 12 | - docker-compose/db-secrets.sh 13 | volumes: 14 | - db:/var/lib/postgresql/data 15 | networks: 16 | - ckan-multi 17 | 18 | jobs-db: 19 | image: postgres 20 | restart: always 21 | expose: 22 | - "5432" 23 | env_file: 24 | - docker-compose/db-secrets.sh 25 | volumes: 26 | - jobs-db:/var/lib/postgresql/data 27 | networks: 28 | - ckan-multi 29 | 30 | datastore-db: 31 | image: viderum/ckan-cloud-docker:db-latest 32 | restart: always 33 | expose: 34 | - "5432" 35 | env_file: 36 | - docker-compose/datastore-db-secrets.sh 37 | volumes: 38 | - datastore-db:/var/lib/postgresql/data 39 | networks: 40 | - ckan-multi 41 | 42 | ckan: 43 | depends_on: 44 | - db 45 | - jobs-db 46 | - datastore-db 47 | 48 | volumes: 49 | db: 50 | jobs-db: 51 | datastore-db: 52 | -------------------------------------------------------------------------------- /.docker-compose-harvester_ng.yaml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | # based on https://github.com/puckel/docker-airflow 3 | # and https://medium.com/@tomaszdudek/yet-another-scalable-apache-airflow-with-docker-example-setup-84775af5c451 4 | services: 5 | 6 | harvester_postgres: 7 | image: postgres:9.6 8 | expose: 9 | - "5432" 10 | environment: 11 | - POSTGRES_USER=airflow 12 | - POSTGRES_PASSWORD=airflow 13 | - POSTGRES_DB=airflow 14 | networks: 15 | - ckan-multi 16 | 17 | harvester: 18 | image: viderum/ckan-ng-harvest:latest 19 | restart: always 20 | command: webserver 21 | expose: 22 | - "8082" 23 | ports: 24 | - "8082:8082" 25 | depends_on: 26 | - harvester_postgres 27 | - ckan 28 | env_file: 29 | - docker-compose/harvester-secrets.sh 30 | environment: 31 | # - AIRFLOW__CORE__LOGGING_LEVEL=DEBUG 32 | - LOAD_EX=n 33 | - EXECUTOR=Local 34 | - POSTGRES_USER=airflow 35 | - POSTGRES_PASSWORD=airflow 36 | - POSTGRES_DB=airflow 37 | - POSTGRES_HOST=harvester_postgres 38 | 39 | # CKAN instance to write harvested packages 40 | # Tell to harvester to read API KEY from DB 41 | - CKAN_API_KEY=READ_FROM_DB 42 | - CKAN_BASE_URL=http://ckan:5000 43 | - CKAN_VALID_USER_ID=admin 44 | - HARVESTER_APP_PATH=/app 45 | - AIRFLOW__WEBSERVER__WEB_SERVER_PORT=8082 46 | - AIRFLOW__WEBSERVER__BASE_URL=http://nginx:8080/airflow 47 | - AIRFLOW__CORE__DAGS_FOLDER=/app/automate-tasks/airflow/dags 48 | - AIRFLOW__CORE__DAGBAG_IMPORT_TIMEOUT=180 49 | # - AIRFLOW__SCHEDULER__MIN_FILE_PROCESS_INTERVAL=300 50 | # - AIRFLOW__SCHEDULER__RUN_DURATION=60 51 | 52 | # The amount of parallelism as a setting to the executor. This defines 53 | # the max number of task instances that should run simultaneously 54 | # on this airflow installation 55 | - AIRFLOW__CORE__PARALLELISM=3 56 | # if False all (or limited by other settings) the harvesters will run at start 57 | - AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION=False 58 | 59 | healthcheck: 60 | test: ["CMD-SHELL", "[ -f /usr/local/airflow/airflow-webserver.pid ]"] 61 | interval: 30s 62 | timeout: 30s 63 | retries: 3 64 | 65 | networks: 66 | - ckan-multi -------------------------------------------------------------------------------- /.docker-compose.datagov-theme.yaml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | 5 | nginx: 6 | ports: 7 | - "8080:8080" 8 | 9 | ckan: 10 | image: viderum/ckan-cloud-docker:ckan-latest-datagov-theme 11 | ports: 12 | - "5000:5000" 13 | build: 14 | args: 15 | EXTRA_PACKAGES: supervisor cron 16 | EXTRA_FILESYSTEM: "./overrides/datagov/filesystem/" 17 | ROOT_INIT: | 18 | mkdir -p /var/log/supervisor 19 | chown ckan:ckan /var/log/supervisor 20 | POST_INSTALL: | 21 | install_standard_ckan_extension_github -r ckan/ckanext-spatial &&\ 22 | install_standard_ckan_extension_github -r ckan/ckanext-xloader &&\ 23 | install_standard_ckan_extension_github -r datagovuk/ckanext-report &&\ 24 | install_standard_ckan_extension_github -r ckan/ckanext-archiver &&\ 25 | install_standard_ckan_extension_github -r ckan/ckanext-harvest &&\ 26 | install_standard_ckan_extension_github -r datopian/ckanext-logstash &&\ 27 | install_standard_ckan_extension_github -r ckan/ckanext-dcat ckanext-dcat &&\ 28 | install_standard_ckan_extension_github -r datopian/ckanext-geodatagov &&\ 29 | install_standard_ckan_extension_github -r datopian/ckanext-datajson -b datagov &&\ 30 | install_standard_ckan_extension_github -r akariv/USMetadata -e ckanext-usmetadata &&\ 31 | install_standard_ckan_extension_github -r GSA/ckanext-datagovtheme -b main 32 | POST_DOCKER_BUILD: | 33 | mkdir -p /var/tmp/ckan/dynamic_menu &&\ 34 | mkdir -p /var/log/ckan/std/ 35 | CKAN_INIT: | 36 | ckan-paster --plugin=ckanext-archiver archiver init -c "CKAN_CONFIG/production.ini" 37 | ckan-paster --plugin=ckanext-report report initdb -c "CKAN_CONFIG/production.ini" 38 | ckan-paster --plugin=ckanext-harvest harvester initdb -c "CKAN_CONFIG/production.ini" 39 | cp /var/lib/ckan/main.css /usr/lib/ckan/venv/src/ckan/ckan/public/base/css/main.min.css || true 40 | crontab /etc/crontab-harvester 41 | service supervisor start #rev1 42 | environment: 43 | - CKAN_CONFIG_TEMPLATE_PREFIX=datagov-theme- 44 | jobs: 45 | image: viderum/ckan-cloud-docker:ckan-latest-datagov-theme 46 | environment: 47 | - CKAN_CONFIG_TEMPLATE_PREFIX=datagov-theme- 48 | 49 | db: 50 | image: mdillon/postgis 51 | build: 52 | args: 53 | DB_INIT: | 54 | psql --dbname="ckan" -c "CREATE EXTENSION IF NOT EXISTS postgis; \ 55 | CREATE EXTENSION IF NOT EXISTS postgis_topology; \ 56 | CREATE EXTENSION IF NOT EXISTS fuzzystrmatch; \ 57 | CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder;" 58 | -------------------------------------------------------------------------------- /.docker-compose.filenames-unicode.yaml.disabled: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | solr: 5 | image: viderum/ckan-cloud-docker:solr-latest-filenames-unicode 6 | build: 7 | args: 8 | SCHEMA_XML: schema-2.9-fix-unicode-filenames.xml 9 | ckan: 10 | image: viderum/ckan-cloud-docker:ckan-latest-filenames-unicode 11 | build: 12 | args: 13 | CKAN_BRANCH: filenames-unicode 14 | CKAN_REPO: frafra/ckan 15 | -------------------------------------------------------------------------------- /.docker-compose.honduras-theme.yaml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | proxy: 5 | ports: 6 | - "80:80" 7 | - "443:443" 8 | 9 | nginx: 10 | depends_on: 11 | - proxy 12 | - varnish 13 | 14 | ckan: 15 | depends_on: 16 | - datapusher 17 | links: 18 | - datapusher 19 | 20 | image: viderum/ckan-cloud-docker:ckan-latest-honduras-theme 21 | build: 22 | args: 23 | EXTRA_PACKAGES: supervisor cron 24 | EXTRA_FILESYSTEM: "./overrides/honduras/filesystem/" 25 | POST_INSTALL: | 26 | install_standard_ckan_extension_github -r datopian/ckanext-s3filestore -b fix-null-content-type &&\ 27 | install_standard_ckan_extension_github -r ckan/ckanext-harvest -b 819706ae5dce5c62b2283fd8405fd8b6fa804355 &&\ 28 | install_standard_ckan_extension_github -r okfn/ckanext-geoview -b v0.0.14 &&\ 29 | install_standard_ckan_extension_github -r okfn/ckanext-pdfview -b lacounts-0.1 &&\ 30 | install_standard_ckan_extension_github -r ckan/ckanext-googleanalytics &&\ 31 | install_standard_ckan_extension_github -r ckan/ckanext-spatial &&\ 32 | install_standard_ckan_extension_github -r ckan/ckanext-scheming &&\ 33 | install_standard_ckan_extension_github -r ckan/ckanext-showcase &&\ 34 | install_standard_ckan_extension_github -r ckan/ckanext-pages -b bf25e93f98d1988db0614666afff11b14edd1b4b &&\ 35 | install_standard_ckan_extension_github -r okfn/ckanext-sentry &&\ 36 | install_standard_ckan_extension_github -r ckan/ckanext-dcat &&\ 37 | install_standard_ckan_extension_github -r okfn/ckanext-envvars &&\ 38 | install_standard_ckan_extension_github -r datopian/ckanext-honduras -b 0.0.21 39 | ROOT_INIT: | 40 | mkdir -p /var/log/supervisor 41 | chown ckan:ckan /var/log/supervisor 42 | POST_DOCKER_BUILD: | 43 | mkdir -p /var/log/ckan/std/ 44 | CKAN_INIT: | 45 | ckan-paster --plugin=ckanext-harvest harvester initdb -c "CKAN_CONFIG/production.ini" 46 | crontab /etc/crontab-harvester 47 | service supervisor start #rev1 48 | environment: 49 | - CKAN_CONFIG_TEMPLATE_PREFIX=honduras-theme- 50 | jobs: 51 | image: viderum/ckan-cloud-docker:ckan-latest-honduras-theme 52 | environment: 53 | - CKAN_CONFIG_TEMPLATE_PREFIX=honduras-theme- 54 | 55 | db: 56 | image: mdillon/postgis 57 | build: 58 | args: 59 | DB_INIT: | 60 | psql --dbname="ckan" -c "CREATE EXTENSION IF NOT EXISTS postgis; \ 61 | CREATE EXTENSION IF NOT EXISTS postgis_topology; \ 62 | CREATE EXTENSION IF NOT EXISTS fuzzystrmatch; \ 63 | CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder;" 64 | 65 | datapusher: 66 | container_name: datapusher 67 | -------------------------------------------------------------------------------- /.docker-compose.panama-theme.yaml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | 5 | proxy: 6 | ports: 7 | - "80:80" 8 | - "443:443" 9 | volumes: 10 | - ./traefik/traefik_custom_ssl.toml:/traefik.toml 11 | - ./traefik/certs:/certs 12 | 13 | nginx: 14 | depends_on: 15 | - proxy 16 | - varnish 17 | 18 | solr: 19 | build: 20 | context: solr 21 | args: 22 | SCHEMA_XML: schemas/schema26.xml 23 | 24 | ckan: 25 | depends_on: 26 | - datapusher 27 | links: 28 | - datapusher 29 | image: viderum/ckan-cloud-docker:ckan-latest-panama-theme 30 | build: 31 | context: ckan 32 | args: 33 | CKAN_BRANCH: ckan-2.6.6 34 | POST_INSTALL: | 35 | ckan-pip install --upgrade setuptools &&\ 36 | install_standard_ckan_extension_github -r ViderumGlobal/ckanext-panama -b v1.0.19 &&\ 37 | install_standard_ckan_extension_github -r ckan/ckanext-widgets &&\ 38 | install_standard_ckan_extension_github -r ckan/ckanext-pdfview &&\ 39 | install_standard_ckan_extension_github -r ckan/ckanext-geoview &&\ 40 | install_standard_ckan_extension_github -r datopian/ckanext-s3filestore -b v0.1.1-mimetype &&\ 41 | install_standard_ckan_extension_github -r ckan/ckanext-fluent &&\ 42 | install_standard_ckan_extension_github -r ckan/ckanext-scheming &&\ 43 | install_standard_ckan_extension_github -r keitaroinc/ckanext-showcase -b 26-fixes &&\ 44 | install_standard_ckan_extension_github -r datopian/ckanext-contact -b v1.0.1a &&\ 45 | install_standard_ckan_extension_github -r okfn/ckanext-spatial &&\ 46 | install_standard_ckan_extension_github -r ckan/ckanext-pages &&\ 47 | install_standard_ckan_extension_github -r okfn/ckanext-sentry &&\ 48 | install_standard_ckan_extension_github -r ckan/ckanext-disqus -b 709566b439df6a9cf45708c773c18a71b141f3ef &&\ 49 | install_standard_ckan_extension_github -r ckan/ckanext-googleanalytics &&\ 50 | install_standard_ckan_extension_github -r okfn/ckanext-envvars &&\ 51 | install_standard_ckan_extension_github -r ckan/ckanext-harvest -b v1.1.4 &&\ 52 | msgfmt ~/venv/src/ckanext-panama/i18n/es/LC_MESSAGES/ckan.po -o ~/venv//src/ckanext-panama/i18n/es/LC_MESSAGES/ckan.mo 53 | environment: 54 | - CKAN_CONFIG_TEMPLATE_PREFIX=panama-theme- 55 | 56 | jobs: 57 | image: viderum/ckan-cloud-docker:ckan-latest-panama-theme 58 | build: 59 | context: ckan 60 | args: 61 | CKAN_BRANCH: ckan-2.6.6 62 | POST_INSTALL: | 63 | install_standard_ckan_extension_github -r keitaroinc/ckanext-s3filestore &&\ 64 | install_standard_ckan_extension_github -r ckan/ckanext-geoview 65 | environment: 66 | - CKAN_CONFIG_TEMPLATE_PREFIX=panama-theme- 67 | 68 | db: 69 | image: mdillon/postgis 70 | build: 71 | args: 72 | DB_INIT: | 73 | psql --dbname="ckan" -c "CREATE EXTENSION IF NOT EXISTS postgis; \ 74 | CREATE EXTENSION IF NOT EXISTS postgis_topology; \ 75 | CREATE EXTENSION IF NOT EXISTS fuzzystrmatch; \ 76 | CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder;" 77 | datapusher: 78 | container_name: datapusher 79 | -------------------------------------------------------------------------------- /.docker-compose.vital-strategies-philippines-theme.yaml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | 5 | proxy: 6 | ports: 7 | - "80:80" 8 | - "443:443" 9 | 10 | nginx: 11 | depends_on: 12 | - proxy 13 | - varnish 14 | 15 | ckan: 16 | depends_on: 17 | - datapusher 18 | links: 19 | - datapusher 20 | image: viderum/ckan-cloud-docker:ckan-latest-vital-strategies-theme 21 | build: 22 | context: ckan 23 | args: 24 | CKAN_BRANCH: ckan-2.7.3 25 | EXTRA_PACKAGES: cron 26 | EXTRA_FILESYSTEM: "./overrides/vital-strategies/filesystem/" 27 | PRE_INSTALL: "sed -i -e 's/psycopg2==2.4.5/psycopg2==2.7.7/g' ~/venv/src/ckan/requirements.txt" 28 | POST_INSTALL: | 29 | install_standard_ckan_extension_github -r ViderumGlobal/ckanext-querytool -b v2.0.1 &&\ 30 | install_standard_ckan_extension_github -r ckan/ckanext-geoview && \ 31 | install_standard_ckan_extension_github -r ckan/ckanext-googleanalytics -b v2.0.2 && \ 32 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l en -f && \ 33 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l es -f && \ 34 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l fr -f && \ 35 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l km -f && \ 36 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l pt_BR -f && \ 37 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l zh_CN -f 38 | environment: 39 | - CKAN_CONFIG_TEMPLATE_PREFIX=vital-strategies-philippines-theme- 40 | 41 | jobs: 42 | image: viderum/ckan-cloud-docker:ckan-latest-vital-strategies-theme 43 | build: 44 | context: ckan 45 | args: 46 | CKAN_BRANCH: ckan-2.7.3 47 | POST_INSTALL: | 48 | install_standard_ckan_extension_github -r datopian/ckanext-querytool &&\ 49 | install_standard_ckan_extension_github -r ckan/ckanext-geoview 50 | environment: 51 | - CKAN_CONFIG_TEMPLATE_PREFIX=vital-strategies-philippines-theme- 52 | 53 | db: 54 | image: mdillon/postgis 55 | build: 56 | args: 57 | DB_INIT: | 58 | psql --dbname="ckan" -c "CREATE EXTENSION IF NOT EXISTS postgis; \ 59 | CREATE EXTENSION IF NOT EXISTS postgis_topology; \ 60 | CREATE EXTENSION IF NOT EXISTS fuzzystrmatch; \ 61 | CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder;" 62 | 63 | datapusher: 64 | container_name: datapusher 65 | environment: 66 | - DATAPUSHER_SSL_VERIFY=False 67 | -------------------------------------------------------------------------------- /.docker-compose.vital-strategies-shandong-theme.yaml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | 5 | proxy: 6 | ports: 7 | - "80:80" 8 | - "443:443" 9 | 10 | nginx: 11 | depends_on: 12 | - proxy 13 | - varnish 14 | 15 | ckan: 16 | depends_on: 17 | - datapusher 18 | links: 19 | - datapusher 20 | image: viderum/ckan-cloud-docker:ckan-latest-vital-strategies-theme 21 | build: 22 | context: ckan 23 | args: 24 | # Comment me out if working with Chinese server 25 | # PIP_INDEX_URL: https://pypi.tuna.tsinghua.edu.cn/simple 26 | GITHUB_URL: https://github.com.cnpmjs.org 27 | CKAN_BRANCH: ckan-2.7.3 28 | EXTRA_FILESYSTEM: "./overrides/vital-strategies/filesystem/" 29 | PRE_INSTALL: "sed -i -e 's/psycopg2==2.4.5/psycopg2==2.7.7/g' ~/venv/src/ckan/requirements.txt" 30 | POST_INSTALL: | 31 | install_standard_ckan_extension_github -r ViderumGlobal/ckanext-querytool -b v2.0.1 &&\ 32 | install_standard_ckan_extension_github -r ckan/ckanext-geoview && \ 33 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l en -f && \ 34 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l es -f && \ 35 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l fr -f && \ 36 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l pt_BR -f && \ 37 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l zh_CN -f 38 | environment: 39 | - CKAN_CONFIG_TEMPLATE_PREFIX=vital-strategies-shandong-theme- 40 | 41 | jobs: 42 | image: viderum/ckan-cloud-docker:ckan-latest-vital-strategies-theme 43 | build: 44 | context: ckan 45 | args: 46 | CKAN_BRANCH: ckan-2.7.3 47 | POST_INSTALL: | 48 | install_standard_ckan_extension_github -r keitaroinc/ckanext-s3filestore &&\ 49 | install_standard_ckan_extension_github -r ViderumGlobal/ckanext-querytool &&\ 50 | install_standard_ckan_extension_github -r ckan/ckanext-geoview 51 | environment: 52 | - CKAN_CONFIG_TEMPLATE_PREFIX=vital-strategies-shandong-theme- 53 | 54 | db: 55 | image: mdillon/postgis 56 | build: 57 | args: 58 | APK_REPOSITORY: https://mirrors.tuna.tsinghua.edu.cn/alpine/v3.6/community/ 59 | DB_INIT: | 60 | psql --dbname="ckan" -c "CREATE EXTENSION IF NOT EXISTS postgis; \ 61 | CREATE EXTENSION IF NOT EXISTS postgis_topology; \ 62 | CREATE EXTENSION IF NOT EXISTS fuzzystrmatch; \ 63 | CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder;" 64 | 65 | datapusher: 66 | container_name: datapusher 67 | build: 68 | context: datapusher/ 69 | args: 70 | APK_REPOSITORY: https://mirrors.tuna.tsinghua.edu.cn/alpine/v3.6/community/ 71 | -------------------------------------------------------------------------------- /.docker-compose.vital-strategies-shanghai-theme.yaml.disabled: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | 5 | proxy: 6 | ports: 7 | - "80:80" 8 | - "443:443" 9 | 10 | nginx: 11 | depends_on: 12 | - proxy 13 | - varnish 14 | 15 | ckan: 16 | depends_on: 17 | - datapusher 18 | links: 19 | - datapusher 20 | image: viderum/ckan-cloud-docker:ckan-latest-vital-strategies-shanghai-theme 21 | build: 22 | context: ckan 23 | args: 24 | # Comment me out if working with Chinese server 25 | # PIP_INDEX_URL: https://pypi.tuna.tsinghua.edu.cn/simple 26 | CKAN_BRANCH: ckan-2.7.3 27 | EXTRA_FILESYSTEM: "./overrides/vital-strategies/filesystem/" 28 | PRE_INSTALL: "sed -i -e 's/psycopg2==2.4.5/psycopg2==2.7.7/g' ~/venv/src/ckan/requirements.txt" 29 | POST_INSTALL: | 30 | install_standard_ckan_extension_github -r ViderumGlobal/ckanext-querytool -b v1.2a75 &&\ 31 | install_standard_ckan_extension_github -r ckan/ckanext-geoview && \ 32 | install_standard_ckan_extension_github -r datopian/ckanext-vitalstrategies_shanghai -b v0.0.9 && \ 33 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l es -f && \ 34 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l fr -f && \ 35 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l pt_BR -f && \ 36 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l zh_CN -f 37 | environment: 38 | - CKAN_CONFIG_TEMPLATE_PREFIX=vital-strategies-shanghai-theme- 39 | 40 | jobs: 41 | image: viderum/ckan-cloud-docker:ckan-latest-vital-strategies-theme 42 | build: 43 | context: ckan 44 | args: 45 | CKAN_BRANCH: ckan-2.7.3 46 | POST_INSTALL: | 47 | install_standard_ckan_extension_github -r keitaroinc/ckanext-s3filestore &&\ 48 | install_standard_ckan_extension_github -r ViderumGlobal/ckanext-querytool &&\ 49 | install_standard_ckan_extension_github -r ckan/ckanext-geoview 50 | environment: 51 | - CKAN_CONFIG_TEMPLATE_PREFIX=vital-strategies-shanghai-theme- 52 | 53 | db: 54 | image: mdillon/postgis 55 | build: 56 | args: 57 | APK_REPOSITORY: https://mirrors.tuna.tsinghua.edu.cn/alpine/v3.6/community/ 58 | DB_INIT: | 59 | psql --dbname="ckan" -c "CREATE EXTENSION IF NOT EXISTS postgis; \ 60 | CREATE EXTENSION IF NOT EXISTS postgis_topology; \ 61 | CREATE EXTENSION IF NOT EXISTS fuzzystrmatch; \ 62 | CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder;" 63 | 64 | datapusher: 65 | container_name: datapusher 66 | build: 67 | context: datapusher/ 68 | args: 69 | APK_REPOSITORY: https://mirrors.tuna.tsinghua.edu.cn/alpine/v3.6/community/ 70 | -------------------------------------------------------------------------------- /.docker-compose.vital-strategies-theme.yaml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | 5 | proxy: 6 | ports: 7 | - "80:80" 8 | - "443:443" 9 | 10 | nginx: 11 | depends_on: 12 | - proxy 13 | - varnish 14 | 15 | ckan: 16 | depends_on: 17 | - datapusher 18 | links: 19 | - datapusher 20 | image: viderum/ckan-cloud-docker:ckan-latest-vital-strategies-theme 21 | build: 22 | context: ckan 23 | args: 24 | CKAN_BRANCH: ckan-2.10.4 25 | EXTRA_PACKAGES: cron 26 | EXTRA_FILESYSTEM: "./overrides/vital-strategies/filesystem/" 27 | POST_INSTALL: | 28 | install_standard_ckan_extension_github -r datopian/ckanext-querytool -b v3.0.0 &&\ 29 | install_standard_ckan_extension_github -r ckan/ckanext-geoview && \ 30 | install_standard_ckan_extension_github -r datopian/ckanext-sentry -b 2.10 && \ 31 | install_standard_ckan_extension_github -r datopian/ckanext-gtm && \ 32 | install_standard_ckan_extension_github -r ckan/ckanext-googleanalytics -b 0055c3e06347c8ed31b7abe47aa06e3e44e0bf63 &&\ 33 | install_standard_ckan_extension_github -r datopian/ckanext-s3filestore -b ckan-2.10 && \ 34 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l en -f && \ 35 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l es -f && \ 36 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l fr -f && \ 37 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l km -f && \ 38 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l pt_BR -f && \ 39 | cd ~/venv/src/ckanext-querytool && ~/venv/bin/python setup.py compile_catalog -l zh_Hans_CN -f 40 | environment: 41 | - CKAN_CONFIG_TEMPLATE_PREFIX=vital-strategies-theme- 42 | #ports: # Uncomment to expose CKAN on localhost for development 43 | # - 5000:5000 44 | 45 | jobs: 46 | image: viderum/ckan-cloud-docker:ckan-latest-vital-strategies-theme 47 | build: 48 | context: ckan 49 | args: 50 | CKAN_BRANCH: ckan-2.10.4 51 | POST_INSTALL: | 52 | install_standard_ckan_extension_github -r datopian/ckanext-s3filestore -b ckan-2.10 &&\ 53 | install_standard_ckan_extension_github -r datopian/ckanext-querytool -b cc6c8e6f19f59e6842d370bf7ac87d94e37a2831 &&\ 54 | install_standard_ckan_extension_github -r ckan/ckanext-geoview 55 | environment: 56 | - CKAN_CONFIG_TEMPLATE_PREFIX=vital-strategies-theme- 57 | 58 | db: 59 | image: mdillon/postgis 60 | build: 61 | args: 62 | DB_INIT: | 63 | psql --dbname="ckan" -c "CREATE EXTENSION IF NOT EXISTS postgis; \ 64 | CREATE EXTENSION IF NOT EXISTS postgis_topology; \ 65 | CREATE EXTENSION IF NOT EXISTS fuzzystrmatch; \ 66 | CREATE EXTENSION IF NOT EXISTS postgis_tiger_geocoder;" 67 | 68 | datapusher: 69 | container_name: datapusher 70 | environment: 71 | - DATAPUSHER_SSL_VERIFY=False 72 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Continuous Deployment 2 | on: [push] 3 | jobs: 4 | Deploy: 5 | runs-on: ubuntu-latest 6 | services: 7 | docker: 8 | image: docker 9 | steps: 10 | - name: Checkout 11 | uses: actions/checkout@v2.3.1 12 | - name: Setup Python 13 | uses: actions/setup-python@v2 14 | with: 15 | python-version: 3.7 16 | - name: Install pip 17 | run: | 18 | python -m pip install --upgrade pip 19 | - name: Deploy 20 | env: 21 | TRAVIS_REPO_SLUG: datopian/ckan-cloud-docker 22 | TRAVIS_BRANCH: ${{ env.GITHUB_SHA }} 23 | GITHUB_SHA: ${{ env.GITHUB_SHA }} 24 | DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} 25 | DOCKER_USER: ${{ secrets.DOCKER_USER }} 26 | GITHUB_WORKSPACE: ${{ env.GITHUB_WORKSPACE }} 27 | run: | 28 | mkdir -p $GITHUB_WORKSPACE/bin/ 29 | mv travis_ci_operator.sh $GITHUB_WORKSPACE/bin/travis_ci_operator.sh 30 | bash $GITHUB_WORKSPACE/bin/travis_ci_operator.sh init $GITHUB_WORKSPACE 31 | bash .travis.sh install 32 | bash .travis.sh script 33 | bash .travis.sh deploy $GITHUB_SHA 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *.cover 46 | .hypothesis/ 47 | .pytest_cache/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | db.sqlite3 57 | 58 | # Flask stuff: 59 | instance/ 60 | .webassets-cache 61 | 62 | # Scrapy stuff: 63 | .scrapy 64 | 65 | # Sphinx documentation 66 | docs/_build/ 67 | 68 | # PyBuilder 69 | target/ 70 | 71 | # Jupyter Notebook 72 | .ipynb_checkpoints 73 | 74 | # pyenv 75 | .python-version 76 | 77 | # celery beat schedule file 78 | celerybeat-schedule 79 | 80 | # SageMath parsed files 81 | *.sage.py 82 | 83 | # Environments 84 | .env 85 | .venv 86 | env/ 87 | venv/ 88 | ENV/ 89 | env.bak/ 90 | venv.bak/ 91 | 92 | # Spyder project settings 93 | .spyderproject 94 | .spyproject 95 | 96 | # Rope project settings 97 | .ropeproject 98 | 99 | # mkdocs documentation 100 | /site 101 | 102 | # mypy 103 | .mypy_cache/ 104 | 105 | .idea 106 | docker-compose.override.yaml 107 | 108 | docker-compose/*-secrets.sh 109 | 110 | # sanbdox 111 | sandbox 112 | 113 | # SSL certs 114 | traefik/certs 115 | 116 | # local test yamls 117 | .local-test-*.yaml -------------------------------------------------------------------------------- /.travis-ci-operator.yaml: -------------------------------------------------------------------------------- 1 | encryptedDockerPassword: KNKc72qckJRJxSLzxPEWDGKUSIlUvoOWOdtQopoie9MGv9VUjvzL5kwqOkNkYN5ONZU8RIzVRHywf787zC/qjdYoAOP4ZlDOTHszmIRj39uBAqu3qfri+KAqqG/mkt5xeBa0qNJHphmqEDofsplaDh9xkssvWvbABl8gJ8PyOzTwM5SR2lFW/kHzwyspxhj/l6tjZpYbOQeXQllBabRRzI4j2BRQbz+12cMM6doGR29V3yuGmjbSU5uXful5tSlKLfYqQ4Rf7g00mirLO3I2fKuRFZAqIRNpbV6dLIpSQc6xRnWvGQ2Rpkec3Y9O3dXMwVZyR2KaLHApg9VBDhaRy3H81ZJggVNlUYHKyS83E8oGf42BV9U5UPmoT5x8IVK3oy4zT63z6l5nLyGvf31DH/cKYanF1e9QwxXBPp18de8iJjKsEk76THxKpx8aZIbaniu+TLvBhBT+wvayL/FSqjGp6HS6W6wD8hFod7b9ia4AiTSj7haCAIsK4VouRtIA7LhZTvgABUnhzgeG1ASxYu/pMBj3jIIBib1+G6qfWVRU/Z/Htwm0d/viSncIuSOUb4CbZjgHZOcv5vPrr/IdSsv3dTzebtrDE4OW/iHnzr06yqyPA7TUYcP7QYlXeCpYChYKYUEuN/maaVF31MOGFX3jsQRdApBimcA1KpH3MZQ= 2 | encryptedDockerUser: KuBfWClkLm0SBUGaoPHW5zg8xrYy4c8qUHVuz0EEmCwxr1xKkUWkQUyyg27EQjw7HeVU6Rzg0I4PXV8tsX0aWujhDZm5ioHFuy5qDffVr8Cq7FaXxRnlVTxCPfF+6RYKYj2WU87Cvb3nuKe6y/SIj0WvWlmJKORYHNyPcB89S//9xrg26CNiuxRPaGvQVqlcHOHyQb1nrTJ0Vb0ZtyZIoqP8AQYmHSmKz2/Q0TE2FOUkiDDCnxVeKl19Go85DZIDvhQZqOJ4MAdLaqqm2OOP9Xykeg9qmhQ9XK3enkuK6nyxthFKgK2WcR2h52gICtQoVEei7TOAZZRXkyTVzxE15be303PBCvZU2UWHPzkvAtd0nwFPfCcRunMW6w1XWyiC2g+JOUHcWrhZG79noyTuUWAwwXZAXG2oi8yVI8bpKaHyNpoSj/2yCDm9nYPPOV6HsVQOAI9wjwwDjkdr4AG81AK5VjCcEbpYtmF2wU8Zi01YSMoAFu90Zy/Ci1avNfOTAvwwtFfdB2Vh9w7oYt4zdJJQqcuT7QaFV77+jm3xIh7bn6g0wKsBALFO0u/tw5GTrYQMUKW4hr0ODB11eIoRgIOsNr+oWta2EKVG+NSy+C7XiDlWknKclKiJgCMK7sWz+8NMNnej0dHNTvZpO80dmW0WzkeWZqgqKMc/liqSFl0= 3 | selfDeployKeyDecryptCmd: openssl aes-256-cbc -K $encrypted_3a25b31f1ad6_key -iv $encrypted_3a25b31f1ad6_iv 4 | -in .travis_ci_operator_self_github_deploy_key.id_rsa.enc -out .travis_ci_operator_self_github_deploy_key.id_rsa 5 | -d 6 | -------------------------------------------------------------------------------- /.travis.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | TRAVIS_TAG=${2} 3 | source cicd_functions.sh 4 | 5 | if [ "${1}" == "install" ]; then 6 | ! pull_latest_images && exit 1 7 | exit 0 8 | 9 | elif [ "${1}" == "script" ]; then 10 | ! (build_latest_images) && exit 1 11 | ! docker build -t viderum/ckan-theme-generator:latest ckan/themer && exit 1 12 | exit 0 13 | 14 | elif [ "${1}" == "deploy" ]; then 15 | echo "Logging in to Docker" 16 | $GITHUB_WORKSPACE/bin/travis_ci_operator.sh docker-login $GITHUB_WORKSPACE 17 | TAG="${TRAVIS_TAG:-${TRAVIS_COMMIT}}" 18 | ! tag_images "${TAG}" && echo "Failed to get tag: ${TAG}" && exit 1 19 | if [ "${TRAVIS_BRANCH}" == "master" ]; then 20 | ! push_latest_images && exit 1 21 | ! docker push viderum/ckan-theme-generator:latest && exit 1 22 | PUSHED_LATEST=1 23 | else 24 | PUSHED_LATEST=0 25 | fi 26 | ! push_tag_images "${TAG}" && exit 1 27 | print_summary "${TAG}" "${PUSHED_LATEST}" 28 | if [ "${TRAVIS_TAG}" != "" ]; then 29 | if ! [ -z "${SLACK_TAG_NOTIFICATION_CHANNEL}" ] && ! [ -z "${SLACK_TAG_NOTIFICATION_WEBHOOK_URL}" ]; then 30 | ! curl -X POST \ 31 | --data-urlencode "payload={\"channel\": \"#${SLACK_TAG_NOTIFICATION_CHANNEL}\", \"username\": \"CKAN Cloud\", \"text\": \"Released ckan-cloud-docker ${TAG}\nhttps://github.com/ViderumGlobal/ckan-cloud-docker/releases/tag/${TAG}\", \"icon_emoji\": \":female-technologist:\"}" \ 32 | ${SLACK_TAG_NOTIFICATION_WEBHOOK_URL} && exit 1 33 | fi 34 | fi 35 | exit 0 36 | 37 | fi 38 | 39 | echo unexpected failure 40 | exit 1 41 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: bash 2 | sudo: required 3 | env: 4 | global: 5 | - TRAVIS_CI_OPERATOR=1 6 | - secure: "KuBfWClkLm0SBUGaoPHW5zg8xrYy4c8qUHVuz0EEmCwxr1xKkUWkQUyyg27EQjw7HeVU6Rzg0I4PXV8tsX0aWujhDZm5ioHFuy5qDffVr8Cq7FaXxRnlVTxCPfF+6RYKYj2WU87Cvb3nuKe6y/SIj0WvWlmJKORYHNyPcB89S//9xrg26CNiuxRPaGvQVqlcHOHyQb1nrTJ0Vb0ZtyZIoqP8AQYmHSmKz2/Q0TE2FOUkiDDCnxVeKl19Go85DZIDvhQZqOJ4MAdLaqqm2OOP9Xykeg9qmhQ9XK3enkuK6nyxthFKgK2WcR2h52gICtQoVEei7TOAZZRXkyTVzxE15be303PBCvZU2UWHPzkvAtd0nwFPfCcRunMW6w1XWyiC2g+JOUHcWrhZG79noyTuUWAwwXZAXG2oi8yVI8bpKaHyNpoSj/2yCDm9nYPPOV6HsVQOAI9wjwwDjkdr4AG81AK5VjCcEbpYtmF2wU8Zi01YSMoAFu90Zy/Ci1avNfOTAvwwtFfdB2Vh9w7oYt4zdJJQqcuT7QaFV77+jm3xIh7bn6g0wKsBALFO0u/tw5GTrYQMUKW4hr0ODB11eIoRgIOsNr+oWta2EKVG+NSy+C7XiDlWknKclKiJgCMK7sWz+8NMNnej0dHNTvZpO80dmW0WzkeWZqgqKMc/liqSFl0=" 7 | - secure: "KNKc72qckJRJxSLzxPEWDGKUSIlUvoOWOdtQopoie9MGv9VUjvzL5kwqOkNkYN5ONZU8RIzVRHywf787zC/qjdYoAOP4ZlDOTHszmIRj39uBAqu3qfri+KAqqG/mkt5xeBa0qNJHphmqEDofsplaDh9xkssvWvbABl8gJ8PyOzTwM5SR2lFW/kHzwyspxhj/l6tjZpYbOQeXQllBabRRzI4j2BRQbz+12cMM6doGR29V3yuGmjbSU5uXful5tSlKLfYqQ4Rf7g00mirLO3I2fKuRFZAqIRNpbV6dLIpSQc6xRnWvGQ2Rpkec3Y9O3dXMwVZyR2KaLHApg9VBDhaRy3H81ZJggVNlUYHKyS83E8oGf42BV9U5UPmoT5x8IVK3oy4zT63z6l5nLyGvf31DH/cKYanF1e9QwxXBPp18de8iJjKsEk76THxKpx8aZIbaniu+TLvBhBT+wvayL/FSqjGp6HS6W6wD8hFod7b9ia4AiTSj7haCAIsK4VouRtIA7LhZTvgABUnhzgeG1ASxYu/pMBj3jIIBib1+G6qfWVRU/Z/Htwm0d/viSncIuSOUb4CbZjgHZOcv5vPrr/IdSsv3dTzebtrDE4OW/iHnzr06yqyPA7TUYcP7QYlXeCpYChYKYUEuN/maaVF31MOGFX3jsQRdApBimcA1KpH3MZQ=" 8 | services: 9 | - docker 10 | install: 11 | - curl -L https://raw.githubusercontent.com/OriHoch/travis-ci-operator/master/travis_ci_operator.sh > $HOME/bin/travis_ci_operator.sh 12 | - bash $HOME/bin/travis_ci_operator.sh init 13 | - bash .travis.sh install 14 | script: 15 | - bash .travis.sh script 16 | deploy: 17 | skip_cleanup: true 18 | provider: script 19 | script: bash .travis.sh deploy 20 | on: 21 | all_branches: true 22 | condition: $TRAVIS_PULL_REQUEST = "false" 23 | -------------------------------------------------------------------------------- /.travis_ci_operator_self_github_deploy_key.id_rsa.enc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datopian/ckan-cloud-docker/9cd58625b6c571a4a420b1b21592611548d5a8d9/.travis_ci_operator_self_github_deploy_key.id_rsa.enc -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to CKAN Cloud Docker 2 | 3 | * Welcome to CKAN Cloud! 4 | * Contributions of any kind are welcome. 5 | * Please [Search for issues across the different CKAN Cloud repositories](https://github.com/search?q=repo%3AViderumGlobal%2Fckan-cloud-docker+repo%3AViderumGlobal%2Fckan-cloud-helm+repo%3AViderumGlobal%2Fckan-cloud-cluster&type=Issues) 6 | 7 | 8 | ## CI/CD 9 | 10 | * Docker images are built using Travis 11 | * On merge to master branch images are pushed to viderum docker hub 12 | * Scroll down to the end of the [latest successfull master branch build log](https://travis-ci.org/ViderumGlobal/ckan-cloud-docker/branches) to get the latest image tags 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Viderum 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: start stop build pull shell down remove remove-images logs logs-less exec user sysadmin secret cron clean-rebuild 2 | 3 | COMPOSE_FILES = -f docker-compose.yaml -f .docker-compose-db.yaml -f .docker-compose.$O-theme.yaml 4 | 5 | DATAPUSHER_TYPE ?= datapusher-plus 6 | CKAN_DB_NAME ?= ckan 7 | CKAN_DB_USERNAME ?= ckan 8 | DB_USERNAME ?= postgres 9 | DATASTORE_DB_NAME ?= datastore 10 | DATASTORE_DB_USERNAME ?= postgres 11 | 12 | start: 13 | @export DATAPUSHER_DIRECTORY=$(DATAPUSHER_TYPE) && \ 14 | docker-compose $(COMPOSE_FILES) up -d --build nginx && make cron 15 | 16 | stop: 17 | docker-compose $(COMPOSE_FILES) stop 18 | 19 | build: 20 | @export DATAPUSHER_DIRECTORY=$(DATAPUSHER_TYPE) && \ 21 | docker-compose $(COMPOSE_FILES) build 22 | 23 | pull: 24 | docker-compose $(COMPOSE_FILES) pull 25 | 26 | shell: 27 | docker-compose $(COMPOSE_FILES) exec -it $S sh -c 'if command -v bash > /dev/null 2>&1; then exec bash; else exec sh; fi' 28 | 29 | down: 30 | docker-compose $(COMPOSE_FILES) down 31 | 32 | remove: 33 | docker-compose $(COMPOSE_FILES) down -v 34 | 35 | remove-images: 36 | docker images -a | grep "ckan-cloud-docker" | awk '{print $$3}' | xargs docker rmi -f 37 | 38 | logs: 39 | docker-compose $(COMPOSE_FILES) logs -f $S 40 | 41 | logs-less: 42 | docker-compose $(COMPOSE_FILES) logs $S | less 43 | 44 | exec: 45 | docker-compose $(COMPOSE_FILES) exec $S $C 46 | 47 | user: 48 | docker-compose $(COMPOSE_FILES) exec ckan ckan -c /etc/ckan/ckan.ini user add $U password=$P email=$E 49 | 50 | sysadmin: 51 | docker-compose $(COMPOSE_FILES) exec ckan ckan -c /etc/ckan/ckan.ini sysadmin add $U 52 | 53 | secret: 54 | python create_secrets.py 55 | 56 | cron: 57 | docker-compose $(COMPOSE_FILES) exec --user=root ckan service cron start 58 | 59 | clean-rebuild: 60 | docker-compose $(COMPOSE_FILES) down -v 61 | docker images -a | grep "ckan-cloud-docker" | awk '{print $$3}' | xargs -r docker rmi -f 62 | @export DATAPUSHER_DIRECTORY=$(DATAPUSHER_TYPE) && \ 63 | docker-compose $(COMPOSE_FILES) build --no-cache 64 | @export DATAPUSHER_DIRECTORY=$(DATAPUSHER_TYPE) && \ 65 | docker-compose $(COMPOSE_FILES) up -d nginx && make cron 66 | 67 | backup-db: 68 | docker-compose $(COMPOSE_FILES) exec -T db pg_dump -U postgres --format=custom -d ckan > ckan_test.dump 69 | docker-compose ${COMPOSE_FILES} exec -T ckan sh -c "cd /var/lib/ckan && tar -czf /tmp/ckan_data_test.tar.gz data" 70 | docker cp $$(docker-compose ${COMPOSE_FILES} ps -q ckan):/tmp/ckan_data_test.tar.gz ckan_data_test.tar.gz 71 | docker-compose $(COMPOSE_FILES) exec -T datastore-db pg_dump -U postgres --format=custom -d datastore > datastore_test.dump 72 | 73 | upgrade-db: 74 | ./db/migration/upgrade_databases.sh "$(COMPOSE_FILES)" "$(CKAN_DB_NAME)" "$(CKAN_DB_USERNAME)" "$(DB_USERNAME)" "$(DATASTORE_DB_NAME)" "$(DATASTORE_DB_USERNAME)" 75 | 76 | config-upgrade: 77 | ./configs_diff.sh 78 | -------------------------------------------------------------------------------- /cca-operator/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.6 2 | 3 | ARG PIP_INDEX_URL 4 | ENV PIP_INDEX_URL=$PIP_INDEX_URL 5 | RUN while ! ( apk update && apk add --no-cache --update-cache --repository ${APK_REPOSITORY} --allow-untrusted \ 6 | bash python grep jq python3 libcurl git docker openssl curl ca-certificates wget \ 7 | openssh-server openssh-sftp-server postgresql-client \ 8 | ); do sleep 1; done &&\ 9 | wget -qO kubectl https://storage.googleapis.com/kubernetes-release/release/$(wget -qO - https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl &&\ 10 | chmod +x kubectl && mv ./kubectl /usr/local/bin/kubectl &&\ 11 | python3 -m pip install --index-url ${PIP_INDEX_URL:-https://pypi.org/simple/} pyyaml &&\ 12 | curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh &&\ 13 | chmod 700 get_helm.sh && ./get_helm.sh --version v2.11.0 && helm version --client && rm ./get_helm.sh 14 | 15 | COPY *.sh /cca-operator/ 16 | COPY *.py /cca-operator/ 17 | COPY *.template /cca-operator/ 18 | 19 | RUN chmod +x /cca-operator/*.sh /cca-operator/*.py 20 | 21 | WORKDIR /cca-operator 22 | 23 | ENTRYPOINT ["./cca-operator.sh"] 24 | -------------------------------------------------------------------------------- /cca-operator/README.md: -------------------------------------------------------------------------------- 1 | # cca-operator 2 | 3 | cca-operator manages, provisions and configures Ckan Cloud components inside a [Ckan Cloud cluster](https://github.com/ViderumGlobal/ckan-cloud-cluster). 4 | 5 | ## Running cca-operator 6 | 7 | Build and run using docker-compose: 8 | 9 | ``` 10 | docker-compose build cca-operator && docker-compose run --rm cca-operator --help 11 | ``` 12 | 13 | Cca-operator mounts /etc/ckan-cloud directory from the host into the container 14 | 15 | To use a different directory, create a .docker-compose.override.yaml file: 16 | 17 | ``` 18 | version: '3.2' 19 | services: 20 | cca-operator: 21 | volumes: 22 | - /path/to/custom/etc-ckan-cloud:/etc/ckan-cloud 23 | ``` 24 | 25 | ## Cluster Management 26 | 27 | Follow the [ckan-cloud-helm developement quickstart](https://github.com/ViderumGlobal/ckan-cloud-helm/blob/master/QUICKSTART_DEVELOPMENT.md) 28 | to create the cluster but don't create the CKAN namespace and don't deploy a CKAN instance. 29 | 30 | Set the following in .docker-compose.override.yaml to mount your local kubeconfig into cca-operator 31 | 32 | ``` 33 | version: '3.2' 34 | services: 35 | cca-operator: 36 | volumes: 37 | - /home/host-user-name/.kube:/root/.kube 38 | - /home/host-user-name/.minikube:/home/host-user-name/.minikube 39 | environment: 40 | - KUBE_CONTEXT=minikube 41 | ``` 42 | 43 | Verify that minikube is accessible via cca-operator 44 | 45 | ``` 46 | docker-compose run --rm --entrypoint kubectl cca-operator get nodes 47 | ``` 48 | 49 | Create a values file for the new instance: 50 | 51 | ``` 52 | INSTANCE_ID=test2 53 | curl https://raw.githubusercontent.com/ViderumGlobal/ckan-cloud-helm/master/minikube-values.yaml \ 54 | | tee /etc/ckan-cloud/${INSTANCE_ID}_values.yaml 55 | ``` 56 | 57 | Create the instance: 58 | 59 | ``` 60 | docker-compose build cca-operator && docker-compose run --rm cca-operator ./create-instance.sh $INSTANCE_ID 61 | ``` 62 | 63 | See the log output for accessing the instance 64 | 65 | Get the list of available cca-operator cluster management commands: 66 | 67 | ``` 68 | docker-compose build cca-operator && docker-compose run --rm cca-operator 69 | ``` 70 | 71 | 72 | ## CKAN Management 73 | 74 | This procedure allows to test the cca-operator ckan management tasks, such as managing CKAN secrets 75 | 76 | Follow the [ckan-cloud-helm developement quickstart](https://github.com/ViderumGlobal/ckan-cloud-helm/blob/master/QUICKSTART_DEVELOPMENT.md) to create the cluster and deploy a CKAN instance on it. 77 | 78 | Set the namespace of the deployed instance 79 | 80 | ``` 81 | export CKAN_NAMESPACE=test1 82 | ``` 83 | 84 | Define a shortcut function for running cca-operator 85 | 86 | ``` 87 | cca-operator() { 88 | kubectl --context minikube --namespace ${CKAN_NAMESPACE} run cca-operator \ 89 | --image=viderum/ckan-cloud-docker:cca-operator-latest \ 90 | --serviceaccount=ckan-${CKAN_NAMESPACE}-operator --attach --restart=Never --rm \ 91 | "$@" 92 | } 93 | ``` 94 | 95 | Delete secrets to re-create 96 | 97 | ``` 98 | kubectl --context minikube -n $CKAN_NAMESPACE delete secret ckan-env-vars ckan-secrets 99 | ``` 100 | 101 | Run the cca-operator CKAN commands: 102 | 103 | * create the ckan env vars secret: `cca-operator initialize-ckan-env-vars ckan-env-vars` 104 | * If you use the centralized infra, set the env vars: `--env CKAN_CLOUD_INSTANCE_ID=$CKAN_NAMESPACE --env CKAN_CLOUD_POSTGRES_HOST=db.ckan-cloud --env CKAN_CLOUD_POSTGRES_USER=postgres --env PGPASSWORD=123456 --env CKAN_CLOUD_SOLR_HOST=solr.ckan-cloud --env CKAN_CLOUD_SOLR_PORT=8983` 105 | * Initialize the CKAN secrets.sh: `cca-operator initialize-ckan-secrets ckan-env-vars ckan-secrets` 106 | * Write the CKAN secrets to secrets.sh: `cca-operator --command -- bash -c "./cca-operator.sh get-ckan-secrets ckan-secrets secrets.sh && cat secrets.sh"` 107 | 108 | 109 | ## cca-operator server 110 | 111 | Add the ssh key to the server 112 | 113 | ``` 114 | cat docker-compose/cca-operator/id_rsa.pub | docker-compose run --rm cca-operator ./add-server-authorized-key.sh 115 | ``` 116 | 117 | Start the server 118 | 119 | ``` 120 | docker-compose up -d --build cca-operator 121 | ``` 122 | 123 | Run cca-operator commands via ssh 124 | 125 | ``` 126 | ssh -o IdentitiesOnly=yes -i docker-compose/cca-operator/id_rsa -p 8022 root@localhost ./cca-operator.sh ./list-instances.sh 127 | ``` 128 | 129 | ## Creating a limited access user 130 | 131 | Generate an SSH key for the limited user 132 | 133 | ``` 134 | ssh-keygen -t rsa -b 4096 -C "continuous-deployment" -N "" -f continuous-deployment-id_rsa 135 | ``` 136 | 137 | Add the key to cca-operator server authorized keys 138 | 139 | ``` 140 | CCA_OPERATOR_ROLE=continuous-deployment 141 | 142 | cat continuous-deployment-id_rsa | docker-compose run --rm cca-operator ./add-server-authorized-key.sh "${CCA_OPERATOR_ROLE}" 143 | ``` 144 | 145 | The CCA_OPERATOR_ROLE environment variable is used in cca-operator code to limit access 146 | -------------------------------------------------------------------------------- /cca-operator/add-server-authorized-key.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | [ "${1}" == "--help" ] && echo cat ~/.ssh/id_rsa.pub '|' ./add-server-authorized-key.sh && exit 0 4 | 5 | CCA_OPERATOR_ROLE="${1}" 6 | 7 | echo Adding authorized key $(! [ -z "${CCA_OPERATOR_ROLE}" ] && echo with limited role: $CCA_OPERATOR_ROLE) 8 | 9 | mkdir -p /etc/ckan-cloud/cca-operator && chmod 700 /etc/ckan-cloud && chmod 700 /etc/ckan-cloud/cca-operator && \ 10 | if [ -z "${CCA_OPERATOR_ROLE}" ]; then 11 | cat 12 | else 13 | echo 'command="export CCA_OPERATOR_ROLE='${CCA_OPERATOR_ROLE}'; ./cca-operator.sh ./cca-operator.py \"${SSH_ORIGINAL_COMMAND}\""' $(cat) 14 | fi >> /etc/ckan-cloud/cca-operator/sshd_authorized_keys 15 | [ "$?" != "0" ] && exit 1 16 | 17 | echo Added authorized key, restart cca-operator server for this change to take effect 18 | exit 0 19 | -------------------------------------------------------------------------------- /cca-operator/cca-operator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os, sys, yaml, datetime 3 | 4 | 5 | CCA_OPERATOR_ROLE = os.environ['CCA_OPERATOR_ROLE'] 6 | 7 | 8 | ADMIN_ROLES = ['', 'admin'] 9 | CONTINUOUS_DEPLOYMENT_ROLES = ADMIN_ROLES + ['continuous-deployment'] 10 | 11 | 12 | def print_stderr(*args): 13 | print(*args, file=sys.stderr) 14 | 15 | 16 | if sys.argv[1].startswith('patch-deployment ') and CCA_OPERATOR_ROLE in CONTINUOUS_DEPLOYMENT_ROLES: 17 | _, namespace, deployment, container, values_file, backup_dir, image_attrib, image = sys.argv[1].split(' ') 18 | with open(values_file) as f: 19 | values = yaml.load(f) 20 | os.system(f'mkdir -p {backup_dir}') 21 | backup_file = 'values_' + datetime.datetime.now().strftime('%Y-%m-%d_%H_%M_%s') + '.yaml' 22 | backup_file = os.path.join(backup_dir, backup_file) 23 | print_stderr(f'modifying values file {values_file}, saving backup to {backup_file}') 24 | with open(backup_file, 'w') as f: 25 | yaml.dump(values, f) 26 | values[image_attrib] = image 27 | with open(values_file, 'w') as f: 28 | yaml.dump(values, f) 29 | if deployment != '' and container != '': 30 | patch_params = f'deployment/{deployment} {container}={image}' 31 | print_stderr(f'patching {patch_params}') 32 | patch_cmd = f'kubectl set image -n {namespace} {patch_params}' 33 | if os.system(f'{patch_cmd} --dry-run') != 0: 34 | print_stderr('dry-run failed') 35 | exit(1) 36 | if os.system(f'{patch_cmd}') != 0: 37 | print_stderr('failed to patch deployment') 38 | exit(1) 39 | print_stderr('Great Success!') 40 | exit(0) 41 | else: 42 | print_stderr('Unexpected Error') 43 | exit(1) 44 | -------------------------------------------------------------------------------- /cca-operator/create-instance.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | [ "${1}" == "--help" ] && echo ./create-instance.sh '' && exit 0 4 | 5 | source functions.sh 6 | ! cluster_management_init "${1}" && exit 1 7 | 8 | INSTANCE_ID="${1}" 9 | [ -z "${INSTANCE_ID}" ] && exit 1 10 | INSTANCE_NAMESPACE="${INSTANCE_ID}" 11 | CKAN_HELM_RELEASE_NAME="ckan-cloud-${INSTANCE_NAMESPACE}" 12 | 13 | kubectl $KUBECTL_GLOBAL_ARGS get ns "${INSTANCE_NAMESPACE}" && echo namespace ${INSTANCE_NAMESPACE} already exists && exit 1 14 | helm status $CKAN_HELM_RELEASE_NAME && echo Helm release ${CKAN_HELM_RELEASE_NAME} already exists && exit 1 15 | 16 | exec ./update-instance.sh "$@" -------------------------------------------------------------------------------- /cca-operator/datastore-permissions.sql.template: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE VIEW "_table_metadata" AS 2 | SELECT DISTINCT 3 | substr(md5(dependee.relname || COALESCE(dependent.relname, '')), 0, 17) AS "_id", 4 | dependee.relname AS name, 5 | dependee.oid AS oid, 6 | dependent.relname AS alias_of 7 | FROM 8 | pg_class AS dependee 9 | LEFT OUTER JOIN pg_rewrite AS r ON r.ev_class = dependee.oid 10 | LEFT OUTER JOIN pg_depend AS d ON d.objid = r.oid 11 | LEFT OUTER JOIN pg_class AS dependent ON d.refobjid = dependent.oid 12 | WHERE 13 | (dependee.oid != dependent.oid OR dependent.oid IS NULL) AND 14 | -- is a table (from pg_tables view definition) 15 | -- or is a view (from pg_views view definition) 16 | (dependee.relkind = 'r'::"char" OR dependee.relkind = 'v'::"char") 17 | AND dependee.relnamespace = ( 18 | SELECT oid FROM pg_namespace WHERE nspname='public') 19 | ORDER BY dependee.oid DESC; 20 | ALTER VIEW "_table_metadata" OWNER TO "{{SITE_USER}}"; 21 | GRANT SELECT ON "_table_metadata" TO "{{DS_RO_USER}}"; 22 | 23 | CREATE OR REPLACE FUNCTION populate_full_text_trigger() RETURNS trigger 24 | AS $body$ 25 | BEGIN 26 | IF NEW._full_text IS NOT NULL THEN 27 | RETURN NEW; 28 | END IF; 29 | NEW._full_text := ( 30 | SELECT to_tsvector(string_agg(value, ' ')) 31 | FROM json_each_text(row_to_json(NEW.*)) 32 | WHERE key NOT LIKE '\_%'); 33 | RETURN NEW; 34 | END; 35 | $body$ LANGUAGE plpgsql; 36 | ALTER FUNCTION populate_full_text_trigger() OWNER TO "{{SITE_USER}}"; 37 | 38 | DO $body$ 39 | BEGIN 40 | EXECUTE coalesce( 41 | (SELECT string_agg( 42 | 'CREATE TRIGGER zfulltext BEFORE INSERT OR UPDATE ON ' || 43 | quote_ident(relname) || ' FOR EACH ROW EXECUTE PROCEDURE ' || 44 | 'populate_full_text_trigger();', ' ') 45 | FROM pg_class 46 | LEFT OUTER JOIN pg_trigger AS t 47 | ON t.tgrelid = relname::regclass AND t.tgname = 'zfulltext' 48 | WHERE relkind = 'r'::"char" AND t.tgname IS NULL 49 | AND relnamespace = ( 50 | SELECT oid FROM pg_namespace WHERE nspname='public')), 51 | 'SELECT 1;'); 52 | END; 53 | $body$; 54 | -------------------------------------------------------------------------------- /cca-operator/delete-instance.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | [ "${1}" == "--help" ] && echo ./delete-instance.sh '' && exit 0 4 | 5 | source functions.sh 6 | ! cluster_management_init "${1}" && exit 1 7 | 8 | if kubectl $KUBECTL_GLOBAL_ARGS get ns "${INSTANCE_NAMESPACE}"; then 9 | echo Deleting instance namespace: ${INSTANCE_NAMESPACE} 10 | 11 | ! kubectl $KUBECTL_GLOBAL_ARGS -n ${INSTANCE_NAMESPACE} delete deployment ckan jobs --wait=false && echo WARNING: failed to delete ckan pods 12 | echo waiting 60 seconds to let ckan pods stop 13 | sleep 60 14 | ! kubectl $KUBECTL_GLOBAL_ARGS delete ns "${INSTANCE_NAMESPACE}" --wait=false && echo WARNING: failed to delete instance namespace 15 | echo waiting 60 seconds to let namespace terminate 16 | echo waiting for all pods to be removed from namespace 17 | while [ "$(kubectl get pods -n "${INSTANCE_NAMESPACE}" --no-headers | tee /dev/stderr | wc -l)" != "0" ]; do 18 | sleep 5 19 | echo . 20 | done 21 | 22 | echo WARNING! instance was not removed from the load balancer 23 | 24 | echo Instance namespace ${INSTANCE_NAMESPACE} terminated successfully 25 | else 26 | echo Instance namespace does not exist: ${INSTANCE_NAMESPACE} 27 | fi 28 | 29 | if helm status $CKAN_HELM_RELEASE_NAME; then 30 | ! helm delete --purge "${CKAN_HELM_RELEASE_NAME}" && exit 1 31 | else 32 | echo Helm release does not exist: ${CKAN_HELM_RELEASE_NAME} 33 | fi 34 | 35 | echo Instance deleted successfully: ${INSTANCE_ID} 36 | exit 0 37 | -------------------------------------------------------------------------------- /cca-operator/get-instance-values.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | [ "${1}" == "--help" ] && echo ./get-instance-values.sh '' && exit 0 4 | 5 | source functions.sh 6 | ! cluster_management_init "${1}" >/dev/null 2>&1 && exit 1 7 | 8 | ! [ -e "${CKAN_VALUES_FILE}" ] && echo missinsg values file: ${CKAN_VALUES_FILES} && exit 1 9 | 10 | cat "${CKAN_VALUES_FILE}" 11 | -------------------------------------------------------------------------------- /cca-operator/instance-connection-info.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | [ "${1}" == "--help" ] && echo ./instance-connection-info.sh '' && exit 0 4 | 5 | source functions.sh 6 | ! cluster_management_init "${1}" && exit 1 7 | 8 | CKAN_ADMIN_PASSWORD=$( \ 9 | get_secret_from_json "$(kubectl $KUBECTL_GLOBAL_ARGS -n "${INSTANCE_NAMESPACE}" get secret ckan-admin-password -o json)" \ 10 | "CKAN_ADMIN_PASSWORD" \ 11 | ) 12 | 13 | instance_connection_info "${INSTANCE_ID}" "${INSTANCE_NAMESPACE}" "$(instance_domain $CKAN_VALUES_FILE)" "${CKAN_ADMIN_PASSWORD}" 14 | -------------------------------------------------------------------------------- /cca-operator/instance-status.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | [ "${1}" == "--help" ] && echo ./instance-status.sh '' && exit 0 4 | 5 | source functions.sh 6 | ! cluster_management_init "${1}" && exit 1 7 | 8 | if [ -e _instance_status_pods.yaml ]; then 9 | echo using cached copy from `pwd`/_instance_status_pods.yaml - delete to recreate 10 | PODS=`cat _instance_status_pods.yaml` 11 | else 12 | PODS=`instance_kubectl get all -o yaml` 13 | fi 14 | 15 | # for development 16 | # echo "${PODS}" > _instance_status_pods.yaml 17 | 18 | echo "${PODS}" | python3 -c ' 19 | 20 | import yaml, sys, os, datetime, subprocess, json 21 | from collections import OrderedDict 22 | 23 | def item_detailed_status(kind, name, app, item): 24 | item_status = {"name": name, "created_at": item["metadata"]["creationTimestamp"], "true_status_last_transitions": {}} 25 | if kind in ["Deployment", "ReplicaSet"]: 26 | item_status["generation"] = item["metadata"]["generation"] 27 | for condition in item["status"].get("conditions", []): 28 | assert condition["type"] not in item_status["true_status_last_transitions"] 29 | if condition["status"] == "True": 30 | item_status["true_status_last_transitions"][condition["type"]] = condition["lastTransitionTime"] 31 | else: 32 | item_status.setdefault("errors", []).append({ 33 | "kind": "failed_condition", 34 | "status": condition["status"], 35 | "reason": condition["reason"], 36 | "message": condition["message"], 37 | "last_transition": condition["lastTransitionTime"] 38 | }) 39 | if kind == "Pod" and app == "ckan": 40 | for container in ["secrets", "ckan"]: 41 | container_logs = subprocess.run("kubectl -n {} logs {} -c {}".format(os.environ["INSTANCE_NAMESPACE"], name, container), 42 | stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True).stdout 43 | for logline in container_logs.decode().split("--START_CKAN_CLOUD_LOG--")[1:]: 44 | logdata = json.loads(logline.split("--END_CKAN_CLOUD_LOG--")[0]) 45 | item_status.setdefault("ckan-cloud-logs", []).append(logdata) 46 | return item_status 47 | 48 | status = {} 49 | for item in yaml.load(sys.stdin)["items"]: 50 | kind = item["kind"] 51 | name = item["metadata"]["name"] 52 | if kind in ["Pod", "Deployment", "ReplicaSet"]: 53 | app = item["metadata"]["labels"]["app"] 54 | elif kind == "Service": 55 | app = item["metadata"]["name"] 56 | else: 57 | app = None 58 | if app in ["ckan", "jobs-db", "redis", "nginx", "jobs"]: 59 | app_status = status.setdefault(app, {}) 60 | else: 61 | app_status = status.setdefault("unknown", {}) 62 | item_status = item_detailed_status(kind, name, app, item) 63 | app_status.setdefault("{}s".format(kind.lower()), []).append(item_status) 64 | 65 | print(yaml.dump(status, default_flow_style=False)) 66 | print("---") 67 | print(yaml.dump({ 68 | "ckan_instance_id": os.environ["INSTANCE_ID"], 69 | "namespace": os.environ["INSTANCE_NAMESPACE"], 70 | "status_generated_at": datetime.datetime.now(), 71 | "status_generated_from": subprocess.check_output("hostname").decode().strip() 72 | }, default_flow_style=False)) 73 | ' 74 | -------------------------------------------------------------------------------- /cca-operator/kubectl.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | [ "${1}" == "--help" ] && echo ./kubectl.sh '[kubectl args..]' && exit 0 4 | 5 | source functions.sh 6 | ! kubectl_init >/dev/null 2>&1 && exit 1 7 | 8 | exec kubectl $KUBECTL_GLOBAL_ARGS "$@" 9 | -------------------------------------------------------------------------------- /cca-operator/list-instances.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | [ "${1}" == "--help" ] && echo ./list-instances.sh && exit 0 4 | 5 | source functions.sh 6 | ! kubectl_init && exit 1 7 | 8 | kubectl $KUBECTL_GLOBAL_ARGS get pods -l app=ckan --all-namespaces -o yaml \ 9 | | python3 -c ' 10 | 11 | import yaml, sys, glob 12 | 13 | pod_phases = {} 14 | 15 | for pod in yaml.load(sys.stdin)["items"]: 16 | if pod["status"]["phase"] != "Running": 17 | pod_phase = pod["status"]["phase"] 18 | pod_active = False 19 | elif not pod["status"]["containerStatuses"][0]["ready"]: 20 | pod_phase = "not ready" 21 | pod_active = False 22 | else: 23 | pod_phase = pod["status"]["phase"] 24 | pod_active = True 25 | 26 | instance_id = pod["metadata"]["namespace"] 27 | pod_phases[instance_id] = { 28 | "ckanPhase": pod_phase, 29 | "active": pod_active, 30 | "valuesFile": f"/etc/ckan-cloud/{instance_id}_values.yaml" 31 | } 32 | 33 | values_files = [p["valuesFile"] for p in pod_phases.values()] 34 | 35 | values_without_pod = [] 36 | for values_file in glob.glob("/etc/ckan-cloud/*_values.yaml"): 37 | if values_file not in values_files: 38 | values_without_pod.append(values_file) 39 | 40 | print("# pod_phases") 41 | print(yaml.dump(pod_phases, default_flow_style=False)) 42 | print("------") 43 | print("# value files without pod") 44 | print(yaml.dump(values_without_pod, default_flow_style=False)) 45 | 46 | ' 47 | -------------------------------------------------------------------------------- /cca-operator/recreate-instance.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | [ "${1}" == "--help" ] && echo ./recreate-instance.sh '' && exit 0 4 | 5 | source functions.sh 6 | ! kubectl_init && exit 1 7 | 8 | INSTANCE_ID="${1}" 9 | [ -z "${INSTANCE_ID}" ] && exit 1 10 | INSTANCE_NAMESPACE="${INSTANCE_ID}" 11 | 12 | ./delete-instance.sh "${INSTANCE_ID}" &&\ 13 | while kubectl $KUBECTL_GLOBAL_ARGS get ns "${INSTANCE_NAMESPACE}"; do 14 | echo . 15 | sleep 2 16 | done &&\ 17 | ./create-instance.sh "${INSTANCE_ID}" 18 | -------------------------------------------------------------------------------- /cca-operator/server.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | [ "${1}" == "--help" ] && echo ./server.sh && exit 0 4 | 5 | if ! [ -e /etc/ckan-cloud/cca-operator/sshd_authorized_keys ]; then 6 | mkdir -p /etc/ckan-cloud/cca-operator &&\ 7 | touch /etc/ckan-cloud/cca-operator/sshd_authorized_keys 8 | [ "$?" != "0" ] && exit 1 9 | fi 10 | 11 | if ! [ -e /etc/ssh/ssh_host_rsa_key ]; then ! ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -N "" && exit 1; fi 12 | if ! [ -e /etc/ssh/ssh_host_dsa_key ]; then ! ssh-keygen -t dsa -f /etc/ssh/ssh_host_dsa_key -N "" && exit 1; fi 13 | if ! [ -e /etc/ssh/ssh_host_ecdsa_key ]; then ! ssh-keygen -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key -N "" && exit 1; fi 14 | if ! [ -e /etc/ssh/ssh_host_ed25519_key ]; then ! ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N "" && exit 1; fi 15 | 16 | mkdir -p /root/.ssh &&\ 17 | cp /etc/ckan-cloud/cca-operator/sshd_authorized_keys /root/.ssh/authorized_keys &&\ 18 | chmod 600 /root/.ssh/authorized_keys &&\ 19 | echo '#!/usr/bin/env bash 20 | export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 21 | export KUBECONFIG='"${KUBECONFIG}"' 22 | source /etc/ckan-cloud/.cca_operator-secrets.env 23 | cd /cca-operator 24 | exec "$@"' > /root/cca-operator.sh && chmod +x /root/cca-operator.sh 25 | 26 | /usr/sbin/sshd -E /var/log/sshd.log &&\ 27 | exec tail -f /var/log/sshd.log 28 | -------------------------------------------------------------------------------- /cca-operator/set-instance-values.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | [ "${1}" == "--help" ] && echo cat values.yaml '|' ./set-instance-values.sh '' && exit 0 4 | 5 | source functions.sh 6 | ! cluster_management_init "${1}" && exit 1 7 | 8 | if [ -e "${CKAN_VALUES_FILE}" ]; then 9 | mkdir -p "/etc/ckan-cloud/backups/${INSTANCE_ID}/values" 10 | ! mv "${CKAN_VALUES_FILE}" "/etc/ckan-cloud/backups/${INSTANCE_ID}/values/`date +%Y%m%d%H%M%s`.yaml" && exit 1 11 | fi 12 | 13 | ! cat > "${CKAN_VALUES_FILE}" && echo failed to set instance values && exit 1 14 | 15 | echo Stored values for instance ${INSTANCE_ID} 16 | exit 0 17 | -------------------------------------------------------------------------------- /cca-operator/templater.sh: -------------------------------------------------------------------------------- 1 | # https://github.com/johanhaleby/bash-templater/commit/5ac655d554238ac70b08ee4361d699ea9954c941 2 | readonly PROGNAME=$(basename $0) 3 | config_file="" 4 | print_only="false" 5 | silent="false" 6 | [ $# -eq 0 ] && exit 1 7 | [[ ! -f "${1}" ]] && exit 1 8 | template="${1}" 9 | if [ "$#" -ne 0 ]; then 10 | while [ "$#" -gt 0 ] 11 | do 12 | case "$1" in 13 | -p|--print) 14 | print_only="true" 15 | ;; 16 | -f|--file) 17 | config_file="$2" 18 | ;; 19 | -s|--silent) 20 | silent="true" 21 | ;; 22 | --) 23 | break 24 | ;; 25 | -*) 26 | exit 1 27 | ;; 28 | *) ;; 29 | esac 30 | shift 31 | done 32 | fi 33 | vars=$(grep -oE '\{\{[A-Za-z0-9_]+\}\}' "${template}" | sort | uniq | sed -e 's/^{{//' -e 's/}}$//') 34 | if [[ -z "$vars" ]]; then 35 | if [ "$silent" == "false" ]; then 36 | echo "Warning: No variable was found in ${template}, syntax is {{VAR}}" >&2 37 | fi 38 | fi 39 | if [ "${config_file}" != "" ]; then 40 | if [[ ! -f "${config_file}" ]]; then 41 | echo "The file ${config_file} does not exists" >&2 42 | echo "$usage" 43 | exit 1 44 | fi 45 | tmpfile=`mktemp` 46 | sed -e "s;\&;\\\&;g" -e "s;\ ;\\\ ;g" "${config_file}" > $tmpfile 47 | source $tmpfile 48 | fi 49 | var_value() { 50 | eval echo \$$1 51 | } 52 | replaces="" 53 | defaults=$(grep -oE '^\{\{[A-Za-z0-9_]+=.+\}\}' "${template}" | sed -e 's/^{{//' -e 's/}}$//') 54 | for default in $defaults; do 55 | var=$(echo "$default" | grep -oE "^[A-Za-z0-9_]+") 56 | current=`var_value $var` 57 | if [[ -z "$current" ]]; then 58 | eval $default 59 | fi 60 | replaces="-e '/^{{$var=/d' $replaces" 61 | vars="$vars 62 | $current" 63 | done 64 | vars=$(echo $vars | sort | uniq) 65 | if [[ "$print_only" == "true" ]]; then 66 | for var in $vars; do 67 | value=`var_value $var` 68 | echo "$var = $value" 69 | done 70 | exit 0 71 | fi 72 | for var in $vars; do 73 | value=`var_value $var` 74 | if [[ -z "$value" ]]; then 75 | if [ $silent == "false" ]; then 76 | echo "Warning: $var is not defined and no default is set, replacing by empty" >&2 77 | fi 78 | fi 79 | value=$(echo "$value" | sed 's/\//\\\//g'); 80 | replaces="-e 's/{{$var}}/${value}/g' $replaces" 81 | done 82 | escaped_template_path=$(echo $template | sed 's/ /\\ /g') 83 | eval sed $replaces "$escaped_template_path" 84 | -------------------------------------------------------------------------------- /cicd_functions.sh: -------------------------------------------------------------------------------- 1 | DOCKER_IMAGE=viderum/ckan-cloud-docker 2 | BUILD_APPS="ckan cca-operator jenkins nginx db solr" 3 | BUILD_CKAN_OVERRIDES="1" 4 | BUILD_SOLR_OVERRIDES="1" 5 | 6 | touch docker-compose/ckan-secrets.sh docker-compose/datastore-db-secrets.sh docker-compose/db-secrets.sh docker-compose/provisioning-api-db-secrets.sh docker-compose/provisioning-api-secrets.sh 7 | 8 | exec_build_apps() { 9 | for APP in $BUILD_APPS; do 10 | APP_LATEST_IMAGE="${DOCKER_IMAGE}:${APP}-latest" 11 | ! eval "${1}" && return 1 12 | done 13 | return 0 14 | } 15 | 16 | get_ckan_compose_ovverride_name() { 17 | echo "${1}" | python -c "import sys; print(sys.stdin.read().split('.')[2])" 18 | } 19 | 20 | exec_ckan_compose_overrides() { 21 | for DOCKER_COMPOSE_OVERRIDE in `ls .docker-compose.*.yaml`; do 22 | OVERRIDE_NAME=$(get_ckan_compose_ovverride_name "${DOCKER_COMPOSE_OVERRIDE}") 23 | echo "OVERRIDE ${OVERRIDE_NAME}" 24 | ! eval "${1}" && return 1 25 | done 26 | return 0 27 | } 28 | 29 | pull_latest_images() { 30 | echo -e "\n** Pulling latest images **\n" 31 | exec_build_apps 'docker-compose pull $APP' 32 | if [ "${BUILD_CKAN_OVERRIDES}" == "1" ]; then 33 | exec_ckan_compose_overrides 'docker pull "${DOCKER_IMAGE}:ckan-latest-${OVERRIDE_NAME}"' 34 | fi 35 | if [ "${BUILD_SOLR_OVERRIDES}" == "1" ]; then 36 | docker pull "${DOCKER_IMAGE}:solrcloud-latest" 37 | fi 38 | return 0 39 | } 40 | 41 | build_latest_images() { 42 | echo -e "\n** Building latest images **\n" 43 | ! exec_build_apps 'docker-compose -f docker-compose.yaml -f .docker-compose-db.yaml -f .docker-compose-cache-from.yaml build $APP' && return 1 44 | if [ "${BUILD_CKAN_OVERRIDES}" == "1" ]; then 45 | ! exec_ckan_compose_overrides ' 46 | docker-compose -f docker-compose.yaml \ 47 | -f .docker-compose-db.yaml \ 48 | -f .docker-compose-cache-from.yaml \ 49 | -f .docker-compose.${OVERRIDE_NAME}.yaml build ckan 50 | ' && return 1 51 | fi 52 | if [ "${BUILD_SOLR_OVERRIDES}" == "1" ]; then 53 | ! docker-compose -f docker-compose.yaml -f .docker-compose-db.yaml \ 54 | -f .docker-compose-cache-from.yaml \ 55 | -f .docker-compose-centralized.yaml build solr && return 1 56 | fi 57 | return 0 58 | } 59 | 60 | tag_images() { 61 | [ -z "${1}" ] && return 1 62 | export TAG_SUFFIX="${1}" 63 | echo -e "\n** Tagging images with tag suffix ${TAG_SUFFIX} **\n" 64 | ! exec_build_apps ' 65 | docker tag "${APP_LATEST_IMAGE}" "${DOCKER_IMAGE}:${APP}-${TAG_SUFFIX}" &&\ 66 | echo tagged ${APP} latest image: ${DOCKER_IMAGE}:${APP}-${TAG_SUFFIX} 67 | ' && return 1 68 | if [ "${BUILD_CKAN_OVERRIDES}" == "1" ]; then 69 | ! exec_ckan_compose_overrides ' 70 | docker tag "${DOCKER_IMAGE}:ckan-latest-${OVERRIDE_NAME}" \ 71 | "${DOCKER_IMAGE}:ckan-${TAG_SUFFIX}-${OVERRIDE_NAME}" &&\ 72 | echo tagged ckan override ${OVERRIDE_NAME} latest image: ${DOCKER_IMAGE}:ckan-${TAG_SUFFIX}-${OVERRIDE_NAME} 73 | ' && return 1 74 | fi 75 | if [ "${BUILD_SOLR_OVERRIDES}" == "1" ]; then 76 | docker tag "${DOCKER_IMAGE}:solrcloud-latest" \ 77 | "${DOCKER_IMAGE}:solrcloud-${TAG_SUFFIX}" &&\ 78 | echo tagged solrcloud latest image: ${DOCKER_IMAGE}:solrcloud-${TAG_SUFFIX} 79 | [ "$?" != "0" ] && return 1 80 | fi 81 | return 0 82 | } 83 | 84 | push_latest_images() { 85 | echo -e "\n** Pushing latest images **\n" 86 | ! exec_build_apps ' 87 | docker push "${DOCKER_IMAGE}:${APP}-latest" 88 | ' && return 1 89 | if [ "${BUILD_CKAN_OVERRIDES}" == "1" ]; then 90 | ! exec_ckan_compose_overrides ' 91 | docker push "${DOCKER_IMAGE}:ckan-latest-${OVERRIDE_NAME}" 92 | ' && return 1 93 | fi 94 | if [ "${BUILD_SOLR_OVERRIDES}" == "1" ]; then 95 | ! docker push "${DOCKER_IMAGE}:solrcloud-latest" && return 1 96 | fi 97 | return 0 98 | } 99 | 100 | push_tag_images() { 101 | [ -z "${1}" ] && return 1 102 | export TAG_SUFFIX="${1}" 103 | echo -e "\n** Pushing tag images: ${TAG_SUFFIX} **\n" 104 | ! exec_build_apps ' 105 | docker push "${DOCKER_IMAGE}:${APP}-${TAG_SUFFIX}" 106 | ' && return 1 107 | if [ "${BUILD_CKAN_OVERRIDES}" == "1" ]; then 108 | ! exec_ckan_compose_overrides ' 109 | docker push "${DOCKER_IMAGE}:ckan-${TAG_SUFFIX}-${OVERRIDE_NAME}" 110 | ' && return 1 111 | fi 112 | if [ "${BUILD_SOLR_OVERRIDES}" == "1" ]; then 113 | ! docker push "${DOCKER_IMAGE}:solrcloud-${TAG_SUFFIX}" && return 1 114 | fi 115 | return 0 116 | } 117 | 118 | print_summary() { 119 | [ -z "${1}" ] && return 1 120 | [ -z "${2}" ] && return 1 121 | export TAG_SUFFIX="${1}" 122 | export PUSHED_LATEST="${2}" 123 | echo -e "\n** Published docker images **\n" 124 | if [ "${PUSHED_LATEST}" == "1" ]; then 125 | exec_build_apps 'echo "${DOCKER_IMAGE}:${APP}-latest"' 126 | if [ "${BUILD_CKAN_OVERRIDES}" == "1" ]; then 127 | exec_ckan_compose_overrides 'echo "${DOCKER_IMAGE}:ckan-latest-${OVERRIDE_NAME}"' 128 | fi 129 | if [ "${BUILD_SOLR_OVERRIDES}" == "1" ]; then 130 | echo "${DOCKER_IMAGE}:solrcloud-latest" 131 | fi 132 | fi 133 | exec_build_apps 'echo "${DOCKER_IMAGE}:${APP}-${TAG_SUFFIX}"' 134 | if [ "${BUILD_CKAN_OVERRIDES}" == "1" ]; then 135 | exec_ckan_compose_overrides 'echo "${DOCKER_IMAGE}:ckan-${TAG_SUFFIX}-${OVERRIDE_NAME}"' 136 | fi 137 | if [ "${BUILD_SOLR_OVERRIDES}" == "1" ]; then 138 | echo "${DOCKER_IMAGE}:solrcloud-${TAG_SUFFIX}" 139 | fi 140 | return 0 141 | } 142 | -------------------------------------------------------------------------------- /ckan/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | source $CKAN_K8S_SECRETS &&\ 4 | rm -f $CKAN_CONFIG/*.ini &&\ 5 | cp -f $CKAN_K8S_TEMPLATES/${CKAN_WHO_TEMPLATE_PREFIX}who.ini $CKAN_CONFIG/who.ini &&\ 6 | bash /templater.sh $CKAN_K8S_TEMPLATES/${CKAN_CONFIG_TEMPLATE_PREFIX}ckan.ini.template > $CKAN_CONFIG/ckan.ini &&\ 7 | echo 'ckan.ini:' && cat $CKAN_CONFIG/ckan.ini &&\ 8 | bash /templater.sh $CKAN_K8S_TEMPLATES/${CKAN_INIT_TEMPLATE_PREFIX}ckan_init.sh.template > $CKAN_CONFIG/ckan_init.sh &&\ 9 | echo 'ckan_init.sh:' && cat $CKAN_CONFIG/ckan_init.sh &&\ 10 | bash $CKAN_CONFIG/ckan_init.sh 11 | CKAN_CONFIG_PATH="$CKAN_CONFIG/ckan.ini" 12 | 13 | [ "$?" != "0" ] && echo ERROR: CKAN Initialization failed: $? && exit 1 14 | 15 | echo '--START_CKAN_CLOUD_LOG--{"event":"ckan-entrypoint-initialized"}--END_CKAN_CLOUD_LOG--' >/dev/stderr 16 | 17 | if [ "$DEBUG_MODE" == "TRUE" ]; then 18 | sleep 300 19 | fi 20 | 21 | if [ "$*" == "" ]; then 22 | echo running ckan db init &&\ 23 | ckan -c ${CKAN_CONFIG_PATH} db init &&\ 24 | echo db initialization complete 25 | [ "$?" != "0" ] && echo ERROR: DB Initialization failed && exit 1 26 | 27 | echo '--START_CKAN_CLOUD_LOG--{"event":"ckan-entrypoint-db-init-success"}--END_CKAN_CLOUD_LOG--' >/dev/stderr 28 | 29 | echo running ckan_extra_init &&\ 30 | . $CKAN_CONFIG/ckan_extra_init.sh &&\ 31 | echo ckan_extra_init complete 32 | [ "$?" != "0" ] && echo ERROR: CKAN extra initialization failed && exit 1 33 | 34 | echo '--START_CKAN_CLOUD_LOG--{"event":"ckan-entrypoint-extra-init-success"}--END_CKAN_CLOUD_LOG--' >/dev/stderr 35 | 36 | ## Generate a random password 37 | #RANDOM_PASSWORD=$(< /dev/urandom tr -dc A-Za-z0-9 | head -c 12) 38 | 39 | #echo "Creating system admin user 'ckan_admin'" 40 | #yes y | ckan -c $CKAN_CONFIG_PATH sysadmin add ckan_admin email=ckan_admin@localhost password=$RANDOM_PASSWORD 41 | #echo "Setting up ckan.datapusher.api_token in the CKAN config file $CKAN_CONFIG_PATH" 42 | #CKAN_API_KEY=$(ckan -c $CKAN_CONFIG_PATH user token add ckan_admin datapusher | tail -n 1 | tr -d '\t') 43 | #echo "CKAN_API_KEY: $CKAN_API_KEY" 44 | #ckan config-tool $CKAN_CONFIG_PATH "ckan.datapusher.api_token=$CKAN_API_KEY" 45 | #cat $CKAN_CONFIG_PATH | grep ckan.datapusher.api_token 46 | 47 | #ckan config-tool $CKAN_CONFIG_PATH -e "ckan.plugins = image_view text_view recline_view datastore datapusher resource_proxy geojson_view querytool stats" 48 | 49 | source /usr/lib/ckan/venv/bin/activate 50 | 51 | export CKAN_INI=$CKAN_CONFIG_PATH 52 | export PYTHONPATH=/usr/lib/ckan/venv:$PYTHONPATH 53 | 54 | # Set the common uwsgi options 55 | UWSGI_OPTS="--plugins-dir /usr/lib/uwsgi/plugins \ 56 | --plugins http \ 57 | --socket /tmp/uwsgi.sock \ 58 | --wsgi-file /usr/lib/ckan/venv/wsgi.py \ 59 | --module wsgi:application \ 60 | --callable application \ 61 | --virtualenv /usr/lib/ckan/venv \ 62 | --uid 900 --gid 900 \ 63 | --http [::]:5000 \ 64 | --master --enable-threads \ 65 | --lazy-apps \ 66 | -p 2 -L -b 32768 --vacuum \ 67 | --harakiri 300" 68 | 69 | # Start supervisord 70 | supervisord --configuration /etc/supervisord.conf & 71 | # Start uwsgi 72 | uwsgi $UWSGI_OPTS 73 | 74 | else 75 | sleep 180 76 | exec "$@" 77 | fi 78 | -------------------------------------------------------------------------------- /ckan/overrides/datagov/filesystem/etc/crontab-harvester: -------------------------------------------------------------------------------- 1 | # m h dom mon dow command 2 | */15 * * * * /usr/local/bin/ckan-paster --plugin=ckanext-harvest harvester run --config=/etc/ckan/production.ini 3 | 0 5 * * * /usr/local/bin/ckan-paster --plugin=ckanext-harvest harvester clean_harvest_log --config=/etc/ckan/production.ini 4 | -------------------------------------------------------------------------------- /ckan/overrides/datagov/filesystem/etc/patches/ckan/search-use-requests.patch: -------------------------------------------------------------------------------- 1 | diff --git a/ckan/lib/search/__init__.py b/ckan/lib/search/__init__.py 2 | index dbc8ac12c..487a32118 100644 3 | --- a/ckan/lib/search/__init__.py 4 | +++ b/ckan/lib/search/__init__.py 5 | @@ -5,7 +5,7 @@ import sys 6 | import cgitb 7 | import warnings 8 | import xml.dom.minidom 9 | -import urllib2 10 | +import requests 11 | 12 | from paste.deploy.converters import asbool 13 | 14 | @@ -284,16 +284,14 @@ def check_solr_schema_version(schema_file=None): 15 | 16 | url = solr_url.strip('/') + SOLR_SCHEMA_FILE_OFFSET 17 | 18 | - req = urllib2.Request(url=url) 19 | + headers = {} 20 | if http_auth: 21 | - req.add_header('Authorization', http_auth) 22 | - 23 | - res = urllib2.urlopen(req) 24 | + headers['Authorization'] = http_auth 25 | + res = requests.get(url, headers=headers).text 26 | else: 27 | - url = 'file://%s' % schema_file 28 | - res = urllib2.urlopen(url) 29 | + res = open(schema_file).read() 30 | 31 | - tree = xml.dom.minidom.parseString(res.read()) 32 | + tree = xml.dom.minidom.parseString(res) 33 | 34 | version = tree.documentElement.getAttribute('version') 35 | if not len(version): 36 | -------------------------------------------------------------------------------- /ckan/overrides/datagov/filesystem/etc/supervisor/conf.d/ckan_harvesting.conf: -------------------------------------------------------------------------------- 1 | ; =============================== 2 | ; ckan harvester 3 | ; =============================== 4 | [program:ckan_gather_consumer] 5 | 6 | command=/usr/local/bin/ckan-paster --plugin=ckanext-harvest harvester gather_consumer --config=/etc/ckan/production.ini 7 | 8 | ; user that owns virtual environment. 9 | user=ckan 10 | 11 | numprocs=1 12 | stdout_logfile=/var/log/ckan/std/gather_consumer.log 13 | stderr_logfile=/var/log/ckan/std/gather_consumer.log 14 | autostart=true 15 | autorestart=true 16 | startsecs=10 17 | 18 | [program:ckan_fetch_consumer] 19 | 20 | command=/usr/local/bin/ckan-paster --plugin=ckanext-harvest harvester fetch_consumer --config=/etc/ckan/production.ini 21 | 22 | ; user that owns virtual environment. 23 | user=ckan 24 | 25 | numprocs=1 26 | stdout_logfile=/var/log/ckan/std/fetch_consumer.log 27 | stderr_logfile=/var/log/ckan/std/fetch_consumer.log 28 | autostart=true 29 | autorestart=true 30 | startsecs=10 31 | -------------------------------------------------------------------------------- /ckan/overrides/datagov/filesystem/etc/supervisor/supervisord.conf: -------------------------------------------------------------------------------- 1 | ; supervisor config file 2 | 3 | [unix_http_server] 4 | file=/var/tmp/supervisord.sock 5 | chmod=0770 6 | chown=ckan:ckan 7 | 8 | 9 | [supervisord] 10 | logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log) 11 | pidfile=/var/tmp/supervisord.pid ; (supervisord pidfile;default supervisord.pid) 12 | childlogdir=/var/log/supervisor ; ('AUTO' child log dir, default $TEMP) 13 | 14 | ; the below section must remain in the config file for RPC 15 | ; (supervisorctl/web interface) to work, additional interfaces may be 16 | ; added by defining them in separate rpcinterface: sections 17 | [rpcinterface:supervisor] 18 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface 19 | 20 | [supervisorctl] 21 | serverurl=unix:///var/tmp/supervisor.sock 22 | 23 | ; The [include] section can just contain the "files" setting. This 24 | ; setting can list multiple files (separated by whitespace or 25 | ; newlines). It can also contain wildcards. The filenames are 26 | ; interpreted as relative to this file. Included files *cannot* 27 | ; include files themselves. 28 | 29 | [include] 30 | files = /etc/supervisor/conf.d/*.conf 31 | -------------------------------------------------------------------------------- /ckan/overrides/honduras/filesystem/etc/crontab-harvester: -------------------------------------------------------------------------------- 1 | # m h dom mon dow command 2 | */15 * * * * /usr/local/bin/ckan-paster --plugin=ckanext-harvest harvester run --config=/etc/ckan/production.ini 3 | 0 5 * * * /usr/local/bin/ckan-paster --plugin=ckanext-harvest harvester clean_harvest_log --config=/etc/ckan/production.ini 4 | -------------------------------------------------------------------------------- /ckan/overrides/honduras/filesystem/etc/patches/ckan/datapusher_status_timestamp.patch: -------------------------------------------------------------------------------- 1 | diff --git a/ckanext/datapusher/logic/action.py b/ckanext/datapusher/logic/action.py 2 | index 5a5f51967..a64313e04 100644 3 | --- a/ckanext/datapusher/logic/action.py 4 | +++ b/ckanext/datapusher/logic/action.py 5 | @@ -301,7 +301,7 @@ def datapusher_status(context, data_dict): 6 | log['timestamp'], "%Y-%m-%dT%H:%M:%S.%f") 7 | date = datetime.datetime.utcfromtimestamp( 8 | time.mktime(date)) 9 | - log['timestamp'] = date 10 | + log['timestamp'] = date.isoformat() 11 | except (requests.exceptions.ConnectionError, 12 | requests.exceptions.HTTPError): 13 | job_detail = {'error': 'cannot connect to datapusher'} 14 | -------------------------------------------------------------------------------- /ckan/overrides/honduras/filesystem/etc/patches/ckan/disable_streaming.patch: -------------------------------------------------------------------------------- 1 | diff --git a/ckan/config/middleware/pylons_app.py b/ckan/config/middleware/pylons_app.py 2 | index 2d3523241..313a76dc9 100644 3 | --- a/ckan/config/middleware/pylons_app.py 4 | +++ b/ckan/config/middleware/pylons_app.py 5 | @@ -137,7 +137,7 @@ def make_pylons_stack(conf, full_stack=True, static_files=True, 6 | # The RegistryManager includes code to pop 7 | # registry values after the stream has completed, 8 | # so we need to prevent this with `streaming` set to True. 9 | - app = RegistryManager(app, streaming=True) 10 | + app = RegistryManager(app, streaming=False) 11 | 12 | if asbool(static_files): 13 | # Serve static files 14 | -------------------------------------------------------------------------------- /ckan/overrides/honduras/filesystem/etc/patches/ckan/fix_graph_view.patch: -------------------------------------------------------------------------------- 1 | diff --git a/ckanext/reclineview/plugin.py b/ckanext/reclineview/plugin.py 2 | index debcc574e..b159a7d6c 100644 3 | --- a/ckanext/reclineview/plugin.py 4 | +++ b/ckanext/reclineview/plugin.py 5 | @@ -152,7 +152,7 @@ class ReclineGraphView(ReclineViewBase): 6 | 7 | datastore_fields = [] 8 | 9 | - datastore_field_types = ['numeric', 'int4', 'timestamp'] 10 | + datastore_field_types = ['numeric', 'int4', 'timestamp', 'text'] 11 | 12 | def list_graph_types(self): 13 | return [t['value'] for t in self.graph_types] 14 | -------------------------------------------------------------------------------- /ckan/overrides/honduras/filesystem/etc/patches/ckan/stats_disable_html5.patch: -------------------------------------------------------------------------------- 1 | diff --git a/ckanext/stats/templates/ckanext/stats/index.html b/ckanext/stats/templates/ckanext/stats/index.html 2 | index 0f9373f..ef4869d 100644 3 | --- a/ckanext/stats/templates/ckanext/stats/index.html 4 | +++ b/ckanext/stats/templates/ckanext/stats/index.html 5 | @@ -189,11 +189,5 @@ 6 | 7 | {% block scripts %} 8 | {{ super() }} 9 | -{# 10 | -Hellish hack to get excanvas to work in IE8. We disable html5shiv from 11 | -overriding the createElement() method on this page. 12 | -See: http://stackoverflow.com/questions/10208062/using-flot-with-bootstrap-ie8-incompatibility 13 | -#} 14 | -{% resource "vendor/block_html5_shim" %} 15 | {% resource "ckanext_stats/stats" %} 16 | {% endblock %} 17 | 18 | -------------------------------------------------------------------------------- /ckan/overrides/honduras/filesystem/etc/supervisor/conf.d/ckan_harvesting.conf: -------------------------------------------------------------------------------- 1 | ; =============================== 2 | ; ckan harvester 3 | ; =============================== 4 | [program:ckan_gather_consumer] 5 | 6 | command=/usr/local/bin/ckan-paster --plugin=ckanext-harvest harvester gather_consumer --config=/etc/ckan/production.ini 7 | 8 | ; user that owns virtual environment. 9 | user=ckan 10 | 11 | numprocs=1 12 | stdout_logfile=/var/log/ckan/std/gather_consumer.log 13 | stderr_logfile=/var/log/ckan/std/gather_consumer.log 14 | autostart=true 15 | autorestart=true 16 | startsecs=10 17 | 18 | [program:ckan_fetch_consumer] 19 | 20 | command=/usr/local/bin/ckan-paster --plugin=ckanext-harvest harvester fetch_consumer --config=/etc/ckan/production.ini 21 | 22 | ; user that owns virtual environment. 23 | user=ckan 24 | 25 | numprocs=1 26 | stdout_logfile=/var/log/ckan/std/fetch_consumer.log 27 | stderr_logfile=/var/log/ckan/std/fetch_consumer.log 28 | autostart=true 29 | autorestart=true 30 | startsecs=10 31 | -------------------------------------------------------------------------------- /ckan/overrides/honduras/filesystem/etc/supervisor/supervisord.conf: -------------------------------------------------------------------------------- 1 | ; supervisor config file 2 | 3 | [unix_http_server] 4 | file=/var/tmp/supervisord.sock 5 | chmod=0770 6 | chown=ckan:ckan 7 | 8 | 9 | [supervisord] 10 | logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log) 11 | pidfile=/var/tmp/supervisord.pid ; (supervisord pidfile;default supervisord.pid) 12 | childlogdir=/var/log/supervisor ; ('AUTO' child log dir, default $TEMP) 13 | 14 | ; the below section must remain in the config file for RPC 15 | ; (supervisorctl/web interface) to work, additional interfaces may be 16 | ; added by defining them in separate rpcinterface: sections 17 | [rpcinterface:supervisor] 18 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface 19 | 20 | [supervisorctl] 21 | serverurl=unix:///var/tmp/supervisor.sock 22 | 23 | ; The [include] section can just contain the "files" setting. This 24 | ; setting can list multiple files (separated by whitespace or 25 | ; newlines). It can also contain wildcards. The filenames are 26 | ; interpreted as relative to this file. Included files *cannot* 27 | ; include files themselves. 28 | 29 | [include] 30 | files = /etc/supervisor/conf.d/*.conf 31 | -------------------------------------------------------------------------------- /ckan/overrides/vital-strategies/filesystem/.init: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datopian/ckan-cloud-docker/9cd58625b6c571a4a420b1b21592611548d5a8d9/ckan/overrides/vital-strategies/filesystem/.init -------------------------------------------------------------------------------- /ckan/post_install_functions.sh: -------------------------------------------------------------------------------- 1 | install_standard_ckan_extension_github() { 2 | ### Help ### 3 | # -r: RepoName 4 | # -b: BranchName (Optional. If not specified, it defaults to Master) 5 | # -e: EGG (Optional. If not specified, it gets extracted from RepoName) 6 | # Usage: 7 | # install_standard_ckan_extension_github -r [repo/name] -b [optional] -e [optional] 8 | 9 | # By default, the master branch is used unless specified otherwise 10 | BRANCH="master" 11 | GITHUB_URL=${GITHUB_URL:-https://github.com} 12 | PIP_INDEX_URL=${PIP_INDEX_URL:-https://pypi.org/simple/} 13 | 14 | while getopts ":r:b:e:" options; do 15 | case ${options} in 16 | r) REPO_NAME=${OPTARG} 17 | # By default, EGG is part of REPO_NAME 18 | EGG=$(echo $REPO_NAME | cut -d / -f 2) 19 | ;; 20 | b) BRANCH=${OPTARG:=$BRANCH};; 21 | # If -e option is specified, it overrides the default stated above 22 | e) EGG=${OPTARG};; 23 | esac 24 | done 25 | 26 | echo "#### REPO: $REPO_NAME ####" 27 | echo "#### BRANCH: $BRANCH ####" 28 | echo "#### REPO URL: $GITHUB_URL/$REPO_NAME.git ####" 29 | 30 | BRANCH_EXISTS=$(git ls-remote --heads ${GITHUB_URL}/${REPO_NAME}.git ${BRANCH}) 31 | 32 | if [ -z "$BRANCH_EXISTS" ]; then 33 | BRANCH_EXISTS=$(git ls-remote --tags ${GITHUB_URL}/${REPO_NAME}.git ${BRANCH}) 34 | 35 | if [ -z "$BRANCH_EXISTS" ]; then 36 | BRANCH_EXISTS=$(git ls-remote ${GITHUB_URL}/${REPO_NAME}.git | grep -o ${BRANCH}) 37 | fi 38 | fi 39 | 40 | if [ -z "$BRANCH_EXISTS" ]; then 41 | echo "#### BRANCH EXISTS: $BRANCH_EXISTS ####" 42 | 43 | if [ "$BRANCH" = "master" ]; then 44 | BRANCH_EXISTS=$(git ls-remote --heads ${GITHUB_URL}/${REPO_NAME}.git main) 45 | if [ -n "$BRANCH_EXISTS" ]; then 46 | echo "Branch 'master' not found, switching to 'main'." 47 | BRANCH="main" 48 | else 49 | echo "Branch 'master' not found, and 'main' also does not exist." 50 | exit 1 51 | fi 52 | else 53 | echo "Branch '$BRANCH' not found. Please check the branch name." 54 | exit 1 55 | fi 56 | fi 57 | 58 | if [ $PIP_INDEX_URL != https://pypi.org/simple/ ]; then 59 | TMPDIR=${CKAN_VENV}/src/${EGG} 60 | git clone -b $BRANCH ${GITHUB_URL}/${REPO_NAME}.git ${TMPDIR} 61 | 62 | for REQUIREMENTS_FILE_NAME in requirements pip-requirements 63 | do 64 | if [ -f ${TMPDIR}/$REQUIREMENTS_FILE_NAME.txt ]; then 65 | ckan-pip install --index-url ${PIP_INDEX_URL} -r ${TMPDIR}/$REQUIREMENTS_FILE_NAME.txt && break; 66 | fi 67 | done &&\ 68 | ckan-pip install --no-use-pep517 --index-url ${PIP_INDEX_URL} -e ${TMPDIR} 69 | else 70 | # Remove poetry files: ckan-cloud-docker currently has issues with poetry dependencies 71 | if [ "${REPO_NAME}" = "datopian/ckanext-sentry" ]; then 72 | TMPDIR=${CKAN_VENV}/src/${EGG} 73 | git clone -b $BRANCH ${GITHUB_URL}/${REPO_NAME}.git ${TMPDIR} 74 | 75 | CURRENT_DIR=$(pwd) 76 | 77 | cd ${TMPDIR} 78 | 79 | if [ -f "poetry.lock" ] && [ -f "pyproject.toml" ]; then 80 | rm -f "poetry.lock" "pyproject.toml" 81 | fi 82 | 83 | for REQUIREMENTS_FILE_NAME in requirements pip-requirements 84 | do 85 | if [ -f ${TMPDIR}/$REQUIREMENTS_FILE_NAME.txt ]; then 86 | ckan-pip install --index-url ${PIP_INDEX_URL} -r ${TMPDIR}/$REQUIREMENTS_FILE_NAME.txt && break; 87 | fi 88 | done 89 | 90 | ckan-pip install --no-use-pep517 --index-url ${PIP_INDEX_URL} -e ${TMPDIR} 91 | 92 | cd ${CURRENT_DIR} 93 | 94 | else 95 | TEMPFILE=`mktemp` 96 | for REQUIREMENTS_FILE_NAME in requirements pip-requirements 97 | do 98 | if wget -O $TEMPFILE https://raw.githubusercontent.com/${REPO_NAME}/$BRANCH/$REQUIREMENTS_FILE_NAME.txt 99 | then ckan-pip install --index-url ${PIP_INDEX_URL} -r $TEMPFILE && break; 100 | fi 101 | done &&\ 102 | ckan-pip install --no-use-pep517 --index-url ${PIP_INDEX_URL} -e git+${GITHUB_URL}/${REPO_NAME}.git@$BRANCH#egg=${EGG} 103 | fi 104 | fi 105 | } 106 | 107 | install_bundled_requirements() { 108 | ckan-pip install --index-url ${PIP_INDEX_URL} -r "/tmp/${1}" 109 | } 110 | 111 | patch_ckan() { 112 | for d in /etc/patches/*; do 113 | for f in `ls $d/*.patch | sort -g`; do 114 | cd /usr/lib/ckan/venv/src/`basename "$d"` && echo "$0: Applying patch $f to /usr/lib/ckan/venv/src/`basename $d`"; patch -p1 < "$f" ; 115 | done; 116 | done; 117 | } 118 | -------------------------------------------------------------------------------- /ckan/requirements.txt: -------------------------------------------------------------------------------- 1 | gunicorn 2 | ckanext-xloader 3 | messytables 4 | #pdftables 5 | Unidecode 6 | -------------------------------------------------------------------------------- /ckan/setup/supervisord.conf: -------------------------------------------------------------------------------- 1 | [unix_http_server] 2 | file = /tmp/supervisor.sock 3 | chmod = 0777 4 | chown = ckan:ckan 5 | 6 | [supervisord] 7 | logfile = /tmp/supervisord.log 8 | logfile_maxbytes = 50MB 9 | logfile_backups=10 10 | loglevel = info 11 | pidfile = /tmp/supervisord.pid 12 | nodaemon = true 13 | umask = 022 14 | identifier = supervisor 15 | 16 | [supervisorctl] 17 | serverurl = unix:///tmp/supervisor.sock 18 | 19 | [rpcinterface:supervisor] 20 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface 21 | 22 | [include] 23 | files = /etc/supervisord.d/*.conf 24 | -------------------------------------------------------------------------------- /ckan/setup/uwsgi.conf: -------------------------------------------------------------------------------- 1 | [uwsgi] 2 | route = ^(?!/api).*$ basicauth:Restricted,/srv/app/.htpasswd 3 | 4 | virtualenv = /usr/lib/ckan/venv 5 | module = wsgi:application -------------------------------------------------------------------------------- /ckan/templater.sh: -------------------------------------------------------------------------------- 1 | # https://github.com/johanhaleby/bash-templater/commit/5ac655d554238ac70b08ee4361d699ea9954c941 2 | readonly PROGNAME=$(basename $0) 3 | config_file="" 4 | print_only="false" 5 | silent="false" 6 | [ $# -eq 0 ] && exit 1 7 | [[ ! -f "${1}" ]] && exit 1 8 | template="${1}" 9 | if [ "$#" -ne 0 ]; then 10 | while [ "$#" -gt 0 ] 11 | do 12 | case "$1" in 13 | -p|--print) 14 | print_only="true" 15 | ;; 16 | -f|--file) 17 | config_file="$2" 18 | ;; 19 | -s|--silent) 20 | silent="true" 21 | ;; 22 | --) 23 | break 24 | ;; 25 | -*) 26 | exit 1 27 | ;; 28 | *) ;; 29 | esac 30 | shift 31 | done 32 | fi 33 | vars=$(grep -oE '\{\{[A-Za-z0-9_]+\}\}' "${template}" | sort | uniq | sed -e 's/^{{//' -e 's/}}$//') 34 | if [[ -z "$vars" ]]; then 35 | if [ "$silent" == "false" ]; then 36 | echo "Warning: No variable was found in ${template}, syntax is {{VAR}}" >&2 37 | fi 38 | fi 39 | if [ "${config_file}" != "" ]; then 40 | if [[ ! -f "${config_file}" ]]; then 41 | echo "The file ${config_file} does not exists" >&2 42 | echo "$usage" 43 | exit 1 44 | fi 45 | tmpfile=`mktemp` 46 | sed -e "s;\&;\\\&;g" -e "s;\ ;\\\ ;g" "${config_file}" > $tmpfile 47 | source $tmpfile 48 | fi 49 | var_value() { 50 | eval echo \$$1 51 | } 52 | replaces="" 53 | defaults=$(grep -oE '^\{\{[A-Za-z0-9_]+=.+\}\}' "${template}" | sed -e 's/^{{//' -e 's/}}$//') 54 | for default in $defaults; do 55 | var=$(echo "$default" | grep -oE "^[A-Za-z0-9_]+") 56 | current=`var_value $var` 57 | if [[ -z "$current" ]]; then 58 | eval $default 59 | fi 60 | replaces="-e '/^{{$var=/d' $replaces" 61 | vars="$vars 62 | $current" 63 | done 64 | vars=$(echo $vars | sort | uniq) 65 | if [[ "$print_only" == "true" ]]; then 66 | for var in $vars; do 67 | value=`var_value $var` 68 | echo "$var = $value" 69 | done 70 | exit 0 71 | fi 72 | for var in $vars; do 73 | value=`var_value $var` 74 | if [[ -z "$value" ]]; then 75 | if [ $silent == "false" ]; then 76 | echo "Warning: $var is not defined and no default is set, replacing by empty" >&2 77 | fi 78 | fi 79 | value=$(echo "$value" | sed 's/\//\\\//g'); 80 | replaces="-e 's/{{$var}}/${value}/g' $replaces" 81 | done 82 | 83 | escaped_template_path=$(echo $template | sed 's/ /\\ /g') 84 | eval sed $replaces "$escaped_template_path" 85 | -------------------------------------------------------------------------------- /ckan/themer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM viderum/ckan-cloud-docker:ckan-latest 2 | 3 | USER root 4 | 5 | # Install required system packages 6 | RUN apt-get -q -y --force-yes update \ 7 | && DEBIAN_FRONTEND=noninteractive apt-get -q -y --force-yes upgrade \ 8 | && apt-get -q -y --force-yes install \ 9 | curl \ 10 | && apt-get -q clean \ 11 | && rm -rf /var/lib/apt/lists/* 12 | 13 | # Install less compiler 14 | RUN curl -sL https://deb.nodesource.com/setup_11.x | bash - && \ 15 | apt-get install -y --force-yes nodejs && \ 16 | npm install -g less && \ 17 | mkdir -p /usr/lib/node_modules/.bin && \ 18 | ln -s /usr/bin/lessc /usr/lib/node_modules/.bin/lessc 19 | 20 | COPY themer.sh /themer.sh 21 | 22 | USER ckan 23 | 24 | ENTRYPOINT ["/themer.sh"] 25 | -------------------------------------------------------------------------------- /ckan/themer/themer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cat < /usr/lib/ckan/venv/src/ckan/ckan/public/base/less/custom.less 3 | @layoutLinkColor: $CKAN_PRIMARY_COLOR; 4 | @footerTextColor: mix(#FFF, @layoutLinkColor, 60%); 5 | @footerLinkColor: @footerTextColor; 6 | @mastheadBackgroundColor: @layoutLinkColor; 7 | @btnPrimaryBackground: lighten(@layoutLinkColor, 10%); 8 | @btnPrimaryBackgroundHighlight: @layoutLinkColor; 9 | EOF 10 | lessc /usr/lib/ckan/venv/src/ckan/ckan/public/base/less/main.less /var/lib/ckan/main.css 11 | -------------------------------------------------------------------------------- /configs_diff.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script will output .ini and traefik.toml variable changes from a 'git diff > configs.diff'. 4 | # Use this before stashing changes and upgrading CKAN so you can run `make secret` again and input the variable values. 5 | # Note: this script only looks for changes in .ini and traefik.toml (specifically email, main, and rule). 6 | 7 | file_path="configs.diff" 8 | output_file="config_changes.txt" 9 | 10 | if [ ! -f "$file_path" ]; then 11 | echo "File $file_path not found. Please run 'git diff > configs.diff' first." 12 | exit 1 13 | fi 14 | 15 | rm -f "$output_file" 16 | 17 | trim() { 18 | local var="$1" 19 | var="${var#"${var%%[![:space:]]*}"}" 20 | var="${var%"${var##*[![:space:]]}"}" 21 | var="${var%\"}" 22 | var="${var#\"}" 23 | echo -n "$var" 24 | } 25 | 26 | echo "The following variables have changed in the .ini and traefik.toml files and need to be updated in the secrets:" 27 | echo "" 28 | 29 | in_target_file=0 30 | current_file="" 31 | 32 | # Parse the diff file 33 | while IFS= read -r line; do 34 | if [[ "$line" =~ ^diff\ --git\ a/.*\.ini\.template ]]; then 35 | in_target_file=1 36 | current_file="ini" 37 | elif [[ "$line" =~ ^diff\ --git\ a/.*traefik.toml ]]; then 38 | in_target_file=1 39 | current_file="toml" 40 | elif [[ "$line" =~ ^diff\ --git ]]; then 41 | in_target_file=0 42 | fi 43 | 44 | # Output the variable changes 45 | if [[ "$in_target_file" -eq 1 ]]; then 46 | if [[ "$line" == +* ]] && [[ ! "$line" == "+++"* ]]; then 47 | line_content="${line:1}" 48 | 49 | if [[ "$current_file" == "toml" && "$line_content" == *email* ]]; then 50 | key="${line_content%%=*}" 51 | value="${line_content#*=}" 52 | echo "Let's Encrypt Email: $(trim $value)" | tee -a "$output_file" 53 | elif [[ "$current_file" == "toml" && "$line_content" == *main* ]]; then 54 | key="${line_content%%=*}" 55 | value="${line_content#*=}" 56 | echo "Let's Encrypt Domain: $(trim $value)" | tee -a "$output_file" 57 | elif [[ "$current_file" == "ini" && "$line_content" == *ckanext.gtm.gtm_id* ]]; then 58 | key="${line_content%%=*}" 59 | value="${line_content#*=}" 60 | echo "Google Tag Manager ID: $(trim $value)" | tee -a "$output_file" 61 | elif [[ "$current_file" == "ini" && "$line_content" == *googleanalytics.id* ]]; then 62 | key="${line_content%%=*}" 63 | value="${line_content#*=}" 64 | echo "Google Analytics ID: $(trim $value)" | tee -a "$output_file" 65 | elif [[ "$current_file" == "ini" && "$line_content" == *googleanalytics.account* ]]; then 66 | key="${line_content%%=*}" 67 | value="${line_content#*=}" 68 | echo "Google Analytics Account: $(trim $value)" | tee -a "$output_file" 69 | elif [[ "$current_file" == "ini" && "$line_content" == *googleanalytics.username* ]]; then 70 | key="${line_content%%=*}" 71 | value="${line_content#*=}" 72 | echo "Google Analytics Username: $(trim $value)" | tee -a "$output_file" 73 | elif [[ "$current_file" == "ini" && "$line_content" == *googleanalytics.password* ]]; then 74 | key="${line_content%%=*}" 75 | value="${line_content#*=}" 76 | echo "Google Analytics Password: $(trim $value)" | tee -a "$output_file" 77 | elif [[ "$current_file" == "ini" && "$line_content" == *ckan.sentry.dsn* ]]; then 78 | key="${line_content%%=*}" 79 | value="${line_content#*=}" 80 | echo "Sentry DSN: $(trim $value)" | tee -a "$output_file" 81 | elif [[ "$current_file" == "ini" && "$line_content" == *smtp.server* ]]; then 82 | key="${line_content%%=*}" 83 | value="${line_content#*=}" 84 | echo "SMTP Server Address (include port, e.g., 'my.smtp.server:587'): $(trim $value)" | tee -a "$output_file" 85 | elif [[ "$current_file" == "ini" && "$line_content" == *smtp.user* ]]; then 86 | key="${line_content%%=*}" 87 | value="${line_content#*=}" 88 | echo "SMTP Username: $(trim $value)" | tee -a "$output_file" 89 | elif [[ "$current_file" == "ini" && "$line_content" == *smtp.password* ]]; then 90 | key="${line_content%%=*}" 91 | value="${line_content#*=}" 92 | echo "SMTP Password: $(trim $value)" | tee -a "$output_file" 93 | elif [[ "$current_file" == "ini" && "$line_content" == *smtp.mail_from* ]]; then 94 | key="${line_content%%=*}" 95 | value="${line_content#*=}" 96 | echo "SMTP Mail From: $(trim $value)" | tee -a "$output_file" 97 | elif [[ "$current_file" == "ini" && "$line_content" == *=* ]]; then 98 | key="${line_content%%=*}" 99 | value="${line_content#*=}" 100 | echo "$(trim "$key"): $(trim $value)" | tee -a "$output_file" 101 | fi 102 | fi 103 | fi 104 | done <"$file_path" 105 | 106 | if [ ! -s "$output_file" ]; then 107 | echo "" 108 | echo "No changes found in .ini or traefik.toml files." 109 | else 110 | echo "" 111 | echo "Note: A list of these changes can also be found in '$output_file'. Make sure to run 'git pull' before running 'make secret'." 112 | fi 113 | 114 | echo "" 115 | echo "You are now ready to run 'git pull' and continue with the upgrade." 116 | echo "" 117 | -------------------------------------------------------------------------------- /create_secrets.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import glob 3 | import os 4 | import re 5 | import sys 6 | import uuid 7 | 8 | if sys.version_info[0] < 3: 9 | input = raw_input 10 | 11 | current_dir = os.path.dirname(os.path.realpath(__file__)) 12 | write_secrets = {} 13 | 14 | 15 | def set_databse_urls(secrets): 16 | alchemy_url = 'SQLALCHEMY_URL=postgresql://ckan:{db_password}@db/ckan'.format( 17 | db_password=secrets['db-POSTGRES_PASSWORD'] 18 | ) 19 | write_secrets['ckan'].append('export {}'.format(alchemy_url)) 20 | 21 | # add as env file also 22 | write_secrets.setdefault('harvester', []) 23 | write_secrets['harvester'].append(alchemy_url) 24 | 25 | write_secrets['ckan'].append( 26 | 'export CKAN_DATASTORE_WRITE_URL=postgresql://postgres:{datastore_password}@datastore-db/datastore'.format( 27 | datastore_password = secrets['datastore-db-DATASTORE_PASSWORD'] 28 | )) 29 | write_secrets['ckan'].append( 30 | 'export CKAN_DATASTORE_READ_URL=postgresql://{ro_user}:{ro_password}@datastore-db/datastore'.format( 31 | ro_user=secrets['datastore-db-DATASTORE_RO_USER'], 32 | ro_password=secrets['datastore-db-DATASTORE_RO_PASSWORD'] 33 | )) 34 | 35 | def main(): 36 | print('The script will create or update (if it is already exists) local secrets files.\n') 37 | 38 | filename = os.path.join(current_dir, 'docker-compose', 'ckan-secrets.dat') 39 | secrets_filenames = os.path.join(current_dir, 'docker-compose', '*-secrets.sh') 40 | spec = open(filename, 'r').readlines() 41 | secrets = {} 42 | 43 | for filename in glob.glob(secrets_filenames): 44 | secrets_lines = open(filename, 'r').readlines() 45 | secrets_for = filename.split('/')[-1].replace('-secrets.sh', '') 46 | for secret in secrets_lines: 47 | idx = 1 if secrets_for == 'ckan' else 0 48 | name, value = secret.split()[idx].split('=') 49 | secrets['{}-{}'.format(secrets_for, name)] = value 50 | 51 | for i, line in enumerate(spec): 52 | secrets_for, mode, name, default, description = line.split(' ', 4) 53 | saved_value = secrets.get('{}-{}'.format(secrets_for, name)) 54 | 55 | if name == 'BEAKER_SESSION_SECRET' or name == 'APP_INSTANCE_UUID': 56 | default = str(uuid.uuid4()) 57 | if saved_value: 58 | if name == 'TIMEZONE': 59 | example = 'Skip to use saved value "{}" e.g. Asia/Tokyo'.format(saved_value) 60 | else: 61 | example = 'Skip to use saved value "{}"'.format(saved_value) 62 | else: 63 | if name == 'TIMEZONE': 64 | example = 'Default value "{}" e.g. Asia/Tokyo'.format(default) 65 | else: 66 | example = 'Default value "{}"'.format(default) 67 | 68 | 69 | value = input('[{}] {} \n({}): '.format( 70 | i + 1, 71 | description.strip('\n'), 72 | example 73 | )) 74 | if not value and saved_value: 75 | value = saved_value 76 | 77 | if value is None: 78 | value = '' 79 | else: 80 | value = value.strip() 81 | 82 | if not value and mode == 'required': 83 | print('Used default value: {}'.format(default)) 84 | value = default 85 | 86 | if not value and mode == 'optional': 87 | value = '' 88 | 89 | prefix = 'export ' if secrets_for == 'ckan' else '' 90 | write_secrets.setdefault(secrets_for, []).append('{}{}={}'.format(prefix, name, value)) 91 | print('') 92 | secrets['{}-{}'.format(secrets_for, name)] = value 93 | 94 | 95 | set_databse_urls(secrets) 96 | save_values() 97 | 98 | 99 | def save_values(): 100 | for filename, write_secret in write_secrets.items(): 101 | secrets_filename = os.path.join(current_dir, 'docker-compose', '%s-secrets.sh' % filename) 102 | with open(secrets_filename, 'w') as f: 103 | f.write('\n'.join(write_secret)) 104 | print('Saved {}'.format(secrets_filename)) 105 | 106 | 107 | if __name__ == '__main__': 108 | try: 109 | main() 110 | except KeyboardInterrupt: 111 | value = input('\n\nSave entered values (old non-entered values from secrets file will be also removed)? [y/N]: ') 112 | if value == 'y': 113 | save_values() 114 | else: 115 | print('\nExiting without saving') 116 | -------------------------------------------------------------------------------- /datapusher-plus/Dockerfile: -------------------------------------------------------------------------------- 1 | ############################# 2 | ### Build DataPusher Plus ### 3 | ############################# 4 | FROM ubuntu:jammy 5 | 6 | 7 | LABEL maintainer="Minhaj" 8 | 9 | 10 | # Set timezone 11 | ENV TZ=UTC 12 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone 13 | 14 | # Set Locale 15 | ENV LC_ALL=en_US.UTF-8 16 | 17 | ENV SRC_DIR=/srv/app/src 18 | ENV VENV=/usr/lib/ckan/dpplus_venv 19 | ENV CFG_DIR=/etc/ckan/datapusher 20 | 21 | WORKDIR ${SRC_DIR} 22 | 23 | # Set the locale 24 | RUN apt-get update 25 | RUN apt-get install --no-install-recommends -y locales 26 | RUN sed -i "/$LC_ALL/s/^# //g" /etc/locale.gen 27 | RUN dpkg-reconfigure --frontend=noninteractive locales 28 | RUN update-locale LANG=${LC_ALL} 29 | RUN apt-get install -y software-properties-common 30 | RUN add-apt-repository ppa:deadsnakes/ppa 31 | 32 | # Install apt-utils and other dependencies 33 | RUN apt-get install --no-install-recommends -y \ 34 | apt-utils \ 35 | build-essential \ 36 | libxslt1-dev \ 37 | libxml2-dev \ 38 | libffi-dev \ 39 | wget \ 40 | curl \ 41 | unzip \ 42 | git \ 43 | libpq-dev \ 44 | file \ 45 | vim 46 | 47 | # Install Python 3.9 (check if it's available in the default repo first) 48 | # If not available, use the previously added PPA 49 | RUN apt-get install -y python3.9 python3.9-dev python3.9-venv 50 | 51 | # Set Python 3.9 as the default Python version 52 | RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.9 1 53 | RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 54 | 55 | # Install pip for Python 3.9 56 | RUN apt-get install -y python3-pip 57 | 58 | # Clean up APT when done 59 | RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 60 | 61 | 62 | #install qsv; 63 | RUN wget https://github.com/jqnatividad/qsv/releases/download/0.108.0/qsv-0.108.0-x86_64-unknown-linux-gnu.zip 64 | RUN unzip qsv-0.108.0-x86_64-unknown-linux-gnu.zip 65 | RUN rm qsv-0.108.0-x86_64-unknown-linux-gnu.zip 66 | RUN mv qsv* /usr/local/bin 67 | 68 | 69 | #python env setup; link python3 to python cmd; make venv; install uwsgi; 70 | RUN python3 -m venv ${VENV} 71 | RUN ${VENV}/bin/pip install uwsgi 72 | 73 | 74 | #INSTALL DATAPUSHER-PLUS FROM SOURCE REPO 75 | RUN git clone --branch 0.15.0 https://github.com/datHere/datapusher-plus 76 | RUN cd ${SRC_DIR}/datapusher-plus && \ 77 | ${VENV}/bin/pip install -r requirements-dev.txt && \ 78 | ${VENV}/bin/pip install -e . 79 | 80 | 81 | RUN ${VENV}/bin/pip install Flask==2.3.3 82 | RUN ${VENV}/bin/pip install Werkzeug==2.3.0 83 | 84 | 85 | #SETUP CONFIG/SETTINGS.PY 86 | RUN mkdir -p ${CFG_DIR} 87 | 88 | RUN curl https://raw.githubusercontent.com/dathere/datapusher-plus/0.15.0/deployment/datapusher-uwsgi.ini -o ${CFG_DIR}/uwsgi.ini 89 | 90 | COPY datapusher-plus/example.env ${SRC_DIR}/datapusher-plus/datapusher/.env 91 | ENV JOB_CONFIG=${SRC_DIR}/datapusher-plus/datapusher/.env 92 | 93 | COPY datapusher-plus/entrypoint/startup.sh /startup.sh 94 | RUN chmod +x /startup.sh 95 | 96 | 97 | ENTRYPOINT [ "bash", "-c", "/startup.sh" ] 98 | 99 | 100 | EXPOSE 8800 101 | -------------------------------------------------------------------------------- /datapusher-plus/datapusher-settings.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | WRITE_ENGINE_URL = os.environ.get("WRITE_ENGINE_URL") 4 | SQLALCHEMY_DATABASE_URI = os.environ.get("SQLALCHEMY_DATABASE_URI") -------------------------------------------------------------------------------- /datapusher-plus/entrypoint/startup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # turn on bash's job control 4 | set -m 5 | 6 | check_db_ready() { 7 | (echo > /dev/tcp/datastore-db/5432) >/dev/null 2>&1 8 | } 9 | 10 | until check_db_ready; do 11 | echo "Waiting for datastore-db to be ready..." 12 | sleep 2 13 | done 14 | 15 | echo "datastore-db is ready. Starting datapusher..." 16 | 17 | # Start the primary process and put it in the background 18 | ${VENV}/bin/uwsgi --socket=/tmp/uwsgi.sock --enable-threads -i ${CFG_DIR}/uwsgi.ini --wsgi-file=${SRC_DIR}/datapusher-plus/wsgi.py & 19 | 20 | # Start the test process 21 | #cd ${SRC_DIR}/testing-datapusher-plus && ${VENV}/bin/python test.py 22 | 23 | fg %1 24 | -------------------------------------------------------------------------------- /datapusher/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM viderum/base:0.4 2 | 3 | MAINTAINER Keitaro Inc 4 | 5 | ENV APP_DIR=/srv/app 6 | ENV GIT_BRANCH 0.0.19 7 | ENV GIT_URL https://github.com/ckan/datapusher.git 8 | ENV JOB_CONFIG ${APP_DIR}/datapusher_settings.py 9 | 10 | WORKDIR ${APP_DIR} 11 | 12 | 13 | ARG APK_REPOSITORY 14 | ARG PIP_INDEX_URL 15 | ENV PIP_INDEX_URL=$PIP_INDEX_URL 16 | 17 | RUN apk add --no-cache python \ 18 | tzdata \ 19 | py-pip \ 20 | py-gunicorn \ 21 | libffi-dev \ 22 | libressl-dev \ 23 | libxslt --update-cache --repository ${APK_REPOSITORY} --allow-untrusted && \ 24 | pip install --upgrade pip && \ 25 | # Temporary packages to build CKAN requirements 26 | apk add --no-cache --virtual .build-deps \ 27 | gcc \ 28 | git \ 29 | musl-dev \ 30 | python-dev \ 31 | libxml2-dev \ 32 | libxslt-dev --update-cache --repository ${APK_REPOSITORY} --allow-untrusted && \ 33 | # Fetch datapusher and install 34 | mkdir ${APP_DIR}/src && cd ${APP_DIR}/src && \ 35 | git clone -b ${GIT_BRANCH} --depth=1 --single-branch ${GIT_URL} && \ 36 | cd datapusher && \ 37 | # pin xlrd version for xlsx support 38 | pip install xlrd==1.2.0 && \ 39 | python setup.py install && \ 40 | pip install --index-url ${PIP_INDEX_URL:-https://pypi.org/simple/} --no-cache-dir -r requirements.txt && \ 41 | # Remove temporary packages and files 42 | apk del .build-deps && \ 43 | rm -rf ${APP_DIR}/src 44 | 45 | COPY docker-compose/datapusher-secrets.sh /tmp/secrets.sh 46 | SHELL ["/bin/bash", "-c"] 47 | RUN source /tmp/secrets.sh || true && ln -sf /usr/share/zoneinfo/${TIMEZONE:-UTC} /etc/localtime || true 48 | 49 | COPY datapusher/setup ${APP_DIR} 50 | 51 | EXPOSE 8800 52 | 53 | CMD ["gunicorn", "--bind=0.0.0.0:8800", "--log-file=-", "wsgi"] 54 | -------------------------------------------------------------------------------- /datapusher/setup/datapusher_settings.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | 3 | DEBUG = False 4 | TESTING = False 5 | SECRET_KEY = str(uuid.uuid4()) 6 | USERNAME = str(uuid.uuid4()) 7 | PASSWORD = str(uuid.uuid4()) 8 | 9 | NAME = 'datapusher' 10 | 11 | # database 12 | 13 | SQLALCHEMY_DATABASE_URI = 'sqlite:////tmp/job_store.db' 14 | 15 | # webserver host and port 16 | 17 | HOST = '0.0.0.0' 18 | PORT = 8800 19 | 20 | # logging 21 | 22 | #FROM_EMAIL = 'server-error@example.com' 23 | #ADMINS = ['yourname@example.com'] # where to send emails 24 | 25 | #LOG_FILE = '/tmp/ckan_service.log' 26 | STDERR = True 27 | 28 | # cloud settings 29 | MAX_CONTENT_LENGTH = 1073400320 30 | -------------------------------------------------------------------------------- /datapusher/setup/wsgi.py: -------------------------------------------------------------------------------- 1 | import ckanserviceprovider.web as web 2 | web.init() 3 | 4 | import datapusher.jobs as jobs 5 | 6 | application = web.app 7 | -------------------------------------------------------------------------------- /db/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM postgis/postgis:12-3.1-alpine 2 | 3 | ARG APK_REPOSITORY 4 | 5 | RUN apk --update add supervisor --update-cache --repository ${APK_REPOSITORY} --allow-untrusted 6 | 7 | #RUN apk add --no-cache postgis 8 | # 9 | #RUN mkdir -p /usr/local/share/postgresql/extension 10 | # 11 | #RUN cp /usr/share/postgresql15/extension/postgis.control /usr/local/share/postgresql/extension/ 12 | # 13 | COPY init_ckan_db.sh /docker-entrypoint-initdb.d/ 14 | COPY *.sh /db-scripts/ 15 | COPY datastore-permissions.sql.template /db-scripts/ 16 | COPY datastore-public-ro-supervisord.conf /db-scripts/ 17 | 18 | ARG DB_INIT 19 | RUN echo "${DB_INIT}" >> /docker-entrypoint-initdb.d/init_ckan_db.sh 20 | 21 | ENTRYPOINT ["/db-scripts/entrypoint.sh"] 22 | CMD ["postgres"] 23 | -------------------------------------------------------------------------------- /db/datastore-permissions-update.sh: -------------------------------------------------------------------------------- 1 | cd / 2 | while ! su postgres -c "pg_isready"; do echo waiting for DB..; sleep 1; done 3 | [ `su postgres -c "psql -c \"select count(1) from pg_roles where rolname='readonly'\" -tA"` == "0" ] &&\ 4 | echo creating role readonly &&\ 5 | ! su postgres -c "psql -c \"create role readonly with login password '${DATASTORE_PUBLIC_RO_PASSWORD}';\"" \ 6 | && echo failed to create readonly role && exit 1 7 | echo getting all datastore resource ids 8 | ! DATASTORE_RESOURCES=`su postgres -c 'psql datastore -c "select tablename from pg_tables where schemaname='"'public'"';" -tA'` \ 9 | && echo failed to get datastore tables && exit 1 10 | echo updating datastore table permissions 11 | for RESOURCE in $DATASTORE_RESOURCES; do 12 | if wget -qO /dev/null http://ckan:5000/api/3/action/resource_show?id=${RESOURCE} 2>/dev/null; then 13 | ! su postgres -c "psql datastore -c 'grant select on \"${RESOURCE}\" to readonly;'" >/dev/null &&\ 14 | echo failed to grant select permissions for readonly on ${RESOURCE} 15 | else 16 | ! su postgres -c "psql datastore -c 'revoke select on \"${RESOURCE}\" from readonly;'" >/dev/null &&\ 17 | echo failed to revoke select permission for readonly on ${RESOURCE} 18 | fi 19 | done 20 | -------------------------------------------------------------------------------- /db/datastore-permissions.sql.template: -------------------------------------------------------------------------------- 1 | /* 2 | This script configures the permissions for the datastore. 3 | 4 | It ensures that the datastore read-only user will only be able to select from 5 | the datastore database but has no create/write/edit permission or any 6 | permissions on other databases. You must execute this script as a database 7 | superuser on the PostgreSQL server that hosts your datastore database. 8 | 9 | For example, if PostgreSQL is running locally and the "postgres" user has the 10 | appropriate permissions (as in the default Ubuntu PostgreSQL install), you can 11 | run: 12 | 13 | paster datastore set-permissions | sudo -u postgres psql 14 | 15 | Or, if your PostgreSQL server is remote, you can pipe the permissions script 16 | over SSH: 17 | 18 | paster datastore set-permissions | ssh dbserver sudo -u postgres psql 19 | 20 | */ 21 | 22 | -- Most of the following commands apply to an explicit database or to the whole 23 | -- 'public' schema, and could be executed anywhere. But ALTER DEFAULT 24 | -- PERMISSIONS applies to the current database, and so we must be connected to 25 | -- the datastore DB: 26 | \connect "datastore" 27 | 28 | -- grant select permissions for read-only user 29 | GRANT CONNECT ON DATABASE "datastore" TO "{{DATASTORE_RO_USER}}"; 30 | GRANT USAGE ON SCHEMA public TO "{{DATASTORE_RO_USER}}"; 31 | 32 | -- grant access to current tables and views to read-only user 33 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{DATASTORE_RO_USER}}"; 34 | 35 | -- grant access to new tables and views by default 36 | ALTER DEFAULT PRIVILEGES FOR USER "postgres" IN SCHEMA public 37 | GRANT SELECT ON TABLES TO "{{DATASTORE_RO_USER}}"; 38 | 39 | -- a view for listing valid table (resource id) and view names 40 | CREATE OR REPLACE VIEW "_table_metadata" AS 41 | SELECT DISTINCT 42 | substr(md5(dependee.relname || COALESCE(dependent.relname, '')), 0, 17) AS "_id", 43 | dependee.relname AS name, 44 | dependee.oid AS oid, 45 | dependent.relname AS alias_of 46 | FROM 47 | pg_class AS dependee 48 | LEFT OUTER JOIN pg_rewrite AS r ON r.ev_class = dependee.oid 49 | LEFT OUTER JOIN pg_depend AS d ON d.objid = r.oid 50 | LEFT OUTER JOIN pg_class AS dependent ON d.refobjid = dependent.oid 51 | WHERE 52 | (dependee.oid != dependent.oid OR dependent.oid IS NULL) AND 53 | -- is a table (from pg_tables view definition) 54 | -- or is a view (from pg_views view definition) 55 | (dependee.relkind = 'r'::"char" OR dependee.relkind = 'v'::"char") 56 | AND dependee.relnamespace = ( 57 | SELECT oid FROM pg_namespace WHERE nspname='public') 58 | ORDER BY dependee.oid DESC; 59 | ALTER VIEW "_table_metadata" OWNER TO "postgres"; 60 | GRANT SELECT ON "_table_metadata" TO "{{DATASTORE_RO_USER}}"; 61 | 62 | -- _full_text fields are now updated by a trigger when set to NULL 63 | CREATE OR REPLACE FUNCTION populate_full_text_trigger() RETURNS trigger 64 | AS $body$ 65 | BEGIN 66 | IF NEW._full_text IS NOT NULL THEN 67 | RETURN NEW; 68 | END IF; 69 | NEW._full_text := ( 70 | SELECT to_tsvector(string_agg(value, ' ')) 71 | FROM json_each_text(row_to_json(NEW.*)) 72 | WHERE key NOT LIKE '\_%'); 73 | RETURN NEW; 74 | END; 75 | $body$ LANGUAGE plpgsql; 76 | ALTER FUNCTION populate_full_text_trigger() OWNER TO "postgres"; 77 | 78 | -- migrate existing tables that don't have full text trigger applied 79 | DO $body$ 80 | BEGIN 81 | EXECUTE coalesce( 82 | (SELECT string_agg( 83 | 'CREATE TRIGGER zfulltext BEFORE INSERT OR UPDATE ON ' || 84 | quote_ident(relname) || ' FOR EACH ROW EXECUTE PROCEDURE ' || 85 | 'populate_full_text_trigger();', ' ') 86 | FROM pg_class 87 | LEFT OUTER JOIN pg_trigger AS t 88 | ON t.tgrelid = relname::regclass AND t.tgname = 'zfulltext' 89 | WHERE relkind = 'r'::"char" AND t.tgname IS NULL 90 | AND relnamespace = ( 91 | SELECT oid FROM pg_namespace WHERE nspname='public')), 92 | 'SELECT 1;'); 93 | END; 94 | $body$; 95 | -------------------------------------------------------------------------------- /db/datastore-public-ro-cron.sh: -------------------------------------------------------------------------------- 1 | echo "Setting up datastore permissions cron" &&\ 2 | mkdir -p /datastore-permissions-crontabs &&\ 3 | echo '* * * * * bash /db-scripts/datastore-permissions-update.sh' > /datastore-permissions-crontabs/root 4 | [ "$?" != "0" ] && echo failed to initialize datastore permissions cron && exit 1 5 | exec crond -f -L /dev/stdout -c /datastore-permissions-crontabs 6 | -------------------------------------------------------------------------------- /db/datastore-public-ro-supervisord.conf: -------------------------------------------------------------------------------- 1 | [unix_http_server] 2 | file = /tmp/datastore-public-ro-supervisor.sock 3 | 4 | [supervisord] 5 | logfile = /tmp/datastore-public-ro-supervisord.log 6 | pidfile = /tmp/datastore-public-ro-supervisord.pid 7 | identifier = datastore-public-ro-supervisor 8 | 9 | [supervisorctl] 10 | serverurl = unix:///tmp/datastore-public-ro-supervisor.sock 11 | prompt = datastore-public-ro-supervisor 12 | 13 | [rpcinterface:supervisor] 14 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface 15 | 16 | [program:cron] 17 | command=bash /db-scripts/datastore-public-ro-cron.sh 18 | redirect_stderr=true 19 | -------------------------------------------------------------------------------- /db/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if ! [ -z "${DATASTORE_PUBLIC_RO_PASSWORD}" ]; then 4 | echo Starting datastore-public-ro supervisord... 5 | ! supervisord -c /db-scripts/datastore-public-ro-supervisord.conf \ 6 | && echo failed to start datastore-public-ro-supervisord && exit 1 7 | ! supervisorctl -c /db-scripts/datastore-public-ro-supervisord.conf status \ 8 | && echo failed to get datastore-public-ro supervisor status && exit 1 9 | echo 10 | 11 | fi 12 | 13 | exec /usr/local/bin/docker-entrypoint.sh "$@" 14 | -------------------------------------------------------------------------------- /db/init_ckan_db.sh: -------------------------------------------------------------------------------- 1 | while ! pg_isready; do 2 | echo waiting for DB to accept connections... 3 | sleep 1 4 | done 5 | sleep 2 6 | 7 | if [ -z "${DATASTORE_RO_USER}" ]; then 8 | echo Initializing CKAN DB &&\ 9 | echo Creating db &&\ 10 | createdb ckan -E utf-8 &&\ 11 | echo Creating role "create role ckan with login password '${POSTGRES_PASSWORD}'" &&\ 12 | psql -c "create role ckan with login password '${POSTGRES_PASSWORD}';" &&\ 13 | echo Granting privileges &&\ 14 | psql -c 'GRANT ALL PRIVILEGES ON DATABASE "ckan" to ckan;' &&\ 15 | echo Successfully initialized the CKAN DB 16 | 17 | else 18 | echo Initializing datastore DB &&\ 19 | echo Creating db &&\ 20 | createdb datastore -E utf-8 &&\ 21 | echo creating readonly user &&\ 22 | psql -c "create role ${DATASTORE_RO_USER} with login password '${DATASTORE_RO_PASSWORD}';" &&\ 23 | echo setting datastore permissions &&\ 24 | bash /db-scripts/templater.sh /db-scripts/datastore-permissions.sql.template \ 25 | | psql --set ON_ERROR_STOP=1 &&\ 26 | echo Successfully initialized the datastore DB 27 | 28 | fi 29 | -------------------------------------------------------------------------------- /db/migration/ckan-permissions.sql: -------------------------------------------------------------------------------- 1 | \connect "ckan" 2 | 3 | GRANT CREATE ON SCHEMA public TO "ckan"; 4 | GRANT USAGE ON SCHEMA public TO "ckan"; 5 | 6 | -- take connect permissions from main db 7 | REVOKE CONNECT ON DATABASE "ckan" FROM "readonly"; -------------------------------------------------------------------------------- /db/migration/datastore-permissions.sql: -------------------------------------------------------------------------------- 1 | \connect "datastore" 2 | 3 | -- revoke permissions for the read-only user 4 | REVOKE CREATE ON SCHEMA public FROM PUBLIC; 5 | REVOKE USAGE ON SCHEMA public FROM PUBLIC; 6 | 7 | GRANT CREATE ON SCHEMA public TO "postgres"; 8 | GRANT USAGE ON SCHEMA public TO "postgres"; 9 | 10 | -- grant select permissions for read-only user 11 | GRANT CONNECT ON DATABASE "datastore" TO "readonly"; 12 | GRANT USAGE ON SCHEMA public TO "readonly"; 13 | 14 | -- grant access to current tables and views to read-only user 15 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO "readonly"; 16 | 17 | -- grant access to new tables and views by default 18 | ALTER DEFAULT PRIVILEGES FOR USER "postgres" IN SCHEMA public 19 | GRANT SELECT ON TABLES TO "readonly"; 20 | 21 | -- a view for listing valid table (resource id) and view names 22 | CREATE OR REPLACE VIEW "_table_metadata" AS 23 | SELECT DISTINCT 24 | substr(md5(dependee.relname || COALESCE(dependent.relname, '')), 0, 17) AS "_id", 25 | dependee.relname AS name, 26 | dependee.oid AS oid, 27 | dependent.relname AS alias_of 28 | FROM 29 | pg_class AS dependee 30 | LEFT OUTER JOIN pg_rewrite AS r ON r.ev_class = dependee.oid 31 | LEFT OUTER JOIN pg_depend AS d ON d.objid = r.oid 32 | LEFT OUTER JOIN pg_class AS dependent ON d.refobjid = dependent.oid 33 | WHERE 34 | (dependee.oid != dependent.oid OR dependent.oid IS NULL) AND 35 | -- is a table (from pg_tables view definition) 36 | -- or is a view (from pg_views view definition) 37 | (dependee.relkind = 'r'::"char" OR dependee.relkind = 'v'::"char") 38 | AND dependee.relnamespace = ( 39 | SELECT oid FROM pg_namespace WHERE nspname='public') 40 | ORDER BY dependee.oid DESC; 41 | ALTER VIEW "_table_metadata" OWNER TO "postgres"; 42 | GRANT SELECT ON "_table_metadata" TO "readonly"; 43 | 44 | -- _full_text fields are now updated by a trigger when set to NULL 45 | CREATE OR REPLACE FUNCTION populate_full_text_trigger() RETURNS trigger 46 | AS $body$ 47 | BEGIN 48 | IF NEW._full_text IS NOT NULL THEN 49 | RETURN NEW; 50 | END IF; 51 | NEW._full_text := ( 52 | SELECT to_tsvector(string_agg(value, ' ')) 53 | FROM json_each_text(row_to_json(NEW.*)) 54 | WHERE key NOT LIKE '\_%'); 55 | RETURN NEW; 56 | END; 57 | $body$ LANGUAGE plpgsql; 58 | ALTER FUNCTION populate_full_text_trigger() OWNER TO "postgres"; 59 | 60 | -- migrate existing tables that don't have full text trigger applied 61 | DO $body$ 62 | BEGIN 63 | EXECUTE coalesce( 64 | (SELECT string_agg( 65 | 'CREATE TRIGGER zfulltext BEFORE INSERT OR UPDATE ON ' || 66 | quote_ident(relname) || ' FOR EACH ROW EXECUTE PROCEDURE ' || 67 | 'populate_full_text_trigger();', ' ') 68 | FROM pg_class 69 | LEFT OUTER JOIN pg_trigger AS t 70 | ON t.tgrelid = relname::regclass AND t.tgname = 'zfulltext' 71 | WHERE relkind = 'r'::"char" AND t.tgname IS NULL 72 | AND relnamespace = ( 73 | SELECT oid FROM pg_namespace WHERE nspname='public')), 74 | 'SELECT 1;'); 75 | END; 76 | $body$; 77 | -------------------------------------------------------------------------------- /db/templater.sh: -------------------------------------------------------------------------------- 1 | # https://github.com/johanhaleby/bash-templater/commit/5ac655d554238ac70b08ee4361d699ea9954c941 2 | readonly PROGNAME=$(basename $0) 3 | config_file="" 4 | print_only="false" 5 | silent="false" 6 | [ $# -eq 0 ] && exit 1 7 | [[ ! -f "${1}" ]] && exit 1 8 | template="${1}" 9 | if [ "$#" -ne 0 ]; then 10 | while [ "$#" -gt 0 ] 11 | do 12 | case "$1" in 13 | -p|--print) 14 | print_only="true" 15 | ;; 16 | -f|--file) 17 | config_file="$2" 18 | ;; 19 | -s|--silent) 20 | silent="true" 21 | ;; 22 | --) 23 | break 24 | ;; 25 | -*) 26 | exit 1 27 | ;; 28 | *) ;; 29 | esac 30 | shift 31 | done 32 | fi 33 | vars=$(grep -oE '\{\{[A-Za-z0-9_]+\}\}' "${template}" | sort | uniq | sed -e 's/^{{//' -e 's/}}$//') 34 | if [[ -z "$vars" ]]; then 35 | if [ "$silent" == "false" ]; then 36 | echo "Warning: No variable was found in ${template}, syntax is {{VAR}}" >&2 37 | fi 38 | fi 39 | if [ "${config_file}" != "" ]; then 40 | if [[ ! -f "${config_file}" ]]; then 41 | echo "The file ${config_file} does not exists" >&2 42 | echo "$usage" 43 | exit 1 44 | fi 45 | tmpfile=`mktemp` 46 | sed -e "s;\&;\\\&;g" -e "s;\ ;\\\ ;g" "${config_file}" > $tmpfile 47 | source $tmpfile 48 | fi 49 | var_value() { 50 | eval echo \$$1 51 | } 52 | replaces="" 53 | defaults=$(grep -oE '^\{\{[A-Za-z0-9_]+=.+\}\}' "${template}" | sed -e 's/^{{//' -e 's/}}$//') 54 | for default in $defaults; do 55 | var=$(echo "$default" | grep -oE "^[A-Za-z0-9_]+") 56 | current=`var_value $var` 57 | if [[ -z "$current" ]]; then 58 | eval $default 59 | fi 60 | replaces="-e '/^{{$var=/d' $replaces" 61 | vars="$vars 62 | $current" 63 | done 64 | vars=$(echo $vars | sort | uniq) 65 | if [[ "$print_only" == "true" ]]; then 66 | for var in $vars; do 67 | value=`var_value $var` 68 | echo "$var = $value" 69 | done 70 | exit 0 71 | fi 72 | for var in $vars; do 73 | value=`var_value $var` 74 | if [[ -z "$value" ]]; then 75 | if [ $silent == "false" ]; then 76 | echo "Warning: $var is not defined and no default is set, replacing by empty" >&2 77 | fi 78 | fi 79 | value=$(echo "$value" | sed 's/\//\\\//g'); 80 | replaces="-e 's/{{$var}}/${value}/g' $replaces" 81 | done 82 | escaped_template_path=$(echo $template | sed 's/ /\\ /g') 83 | eval sed $replaces "$escaped_template_path" 84 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | proxy: 5 | image: traefik:1.7.2-alpine 6 | restart: always 7 | volumes: 8 | - ./traefik/traefik.toml.template:/traefik.toml.template 9 | #- ./traefik/traefik.dev.toml:/traefik.dev.toml # Uncomment for development 10 | - ./traefik/acme.json:/acme.json 11 | - ./cca-operator/templater.sh:/templater.sh 12 | - ./docker-compose/traefik-secrets.sh:/traefik-secrets.sh 13 | - ./traefik/entrypoint.sh:/entrypoint.sh 14 | networks: 15 | - ckan-multi 16 | entrypoint: ["/bin/sh", "-c", "/entrypoint.sh"] 17 | 18 | varnish: 19 | image: million12/varnish 20 | depends_on: 21 | - ckan 22 | volumes: 23 | - ./varnish:/etc/varnish 24 | expose: 25 | - "80" 26 | networks: 27 | - ckan-multi 28 | 29 | redis: 30 | image: redis:alpine 31 | restart: always 32 | expose: 33 | - "6379" 34 | networks: 35 | - ckan-multi 36 | 37 | nginx: 38 | depends_on: 39 | - ckan 40 | image: viderum/ckan-cloud-docker:nginx-latest 41 | build: 42 | context: nginx 43 | restart: always 44 | expose: 45 | - "8080" 46 | networks: 47 | - ckan-multi 48 | 49 | adminer: 50 | image: adminer 51 | restart: always 52 | expose: 53 | - "8080" 54 | networks: 55 | - ckan-multi 56 | 57 | jobs: 58 | depends_on: 59 | - ckan 60 | - nginx 61 | image: viderum/ckan-cloud-docker:ckan-latest 62 | command: [/ckan-entrypoint.sh, ckan, -c, /etc/ckan/ckan.ini, jobs, worker] 63 | restart: always 64 | volumes: 65 | - ./docker-compose/ckan-secrets.sh:/etc/ckan-conf/secrets/secrets.sh 66 | - ./docker-compose/ckan-conf-templates:/etc/ckan-conf/templates 67 | - ckan-data:/var/lib/ckan 68 | environment: 69 | - CKAN_STORAGE_PATH=/var/lib/ckan/data 70 | - CKAN_K8S_SECRETS=/etc/ckan-conf/secrets/secrets.sh 71 | - CKAN_K8S_TEMPLATES=/etc/ckan-conf/templates 72 | networks: 73 | - ckan-multi 74 | 75 | solr: 76 | image: ckan/ckan-solr:${SOLR_IMAGE:-2.10-solr9} 77 | restart: always 78 | expose: 79 | - "8983" 80 | volumes: 81 | - solr:/var/solr 82 | networks: 83 | - ckan-multi 84 | 85 | datapusher: 86 | build: 87 | context: . 88 | dockerfile: ${DATAPUSHER_DIRECTORY:-datapusher}/Dockerfile 89 | args: 90 | PIP_INDEX_URL: ${PIP_INDEX_URL:-https://pypi.org/simple/} 91 | expose: 92 | - "8800" 93 | networks: 94 | - ckan-multi 95 | environment: 96 | - WRITE_ENGINE_URL=postgresql://postgres:123456@datastore-db/datastore 97 | - SQLALCHEMY_DATABASE_URI=postgresql://postgres:123456@datastore-db/datapusher_jobs 98 | 99 | ckan: 100 | depends_on: 101 | - redis 102 | - solr 103 | image: viderum/ckan-cloud-docker:ckan-latest 104 | build: 105 | context: ckan 106 | args: 107 | CKAN_BRANCH: ${CKAN_BRANCH:-ckan-2.10.4} 108 | CKAN_REPO: ${CKAN_REPO:-ckan/ckan} 109 | PIP_INDEX_URL: ${PIP_INDEX_URL:-https://pypi.org/simple/} 110 | restart: always 111 | volumes: 112 | - ./docker-compose/ckan-secrets.sh:/etc/ckan-conf/secrets/secrets.sh 113 | - ./docker-compose/ckan-conf-templates:/etc/ckan-conf/templates 114 | - ckan-data:/var/lib/ckan 115 | - ./migrate_databases.sh:/usr/lib/ckan/migrate_databases.sh 116 | - ./migrate_filestorage.sh:/usr/lib/ckan/migrate_filestorage.sh 117 | environment: 118 | - CKAN_STORAGE_PATH=/var/lib/ckan/data 119 | - CKAN_K8S_SECRETS=/etc/ckan-conf/secrets/secrets.sh 120 | - CKAN_K8S_TEMPLATES=/etc/ckan-conf/templates 121 | - GUNICORN_WORKERS=2 122 | expose: 123 | - "5000" 124 | networks: 125 | - ckan-multi 126 | 127 | jenkins: 128 | image: viderum/ckan-cloud-docker:jenkins-latest 129 | build: 130 | context: jenkins 131 | restart: always 132 | volumes: 133 | - ./jenkins/jobs:/var/jenkins_home/jobs 134 | - .:/etc/ckan-cloud/ckan-cloud-docker 135 | - /var/run/docker.sock:/var/run/docker.sock 136 | - ./jenkins/scripts/docker_compose_cca_operator.sh:/etc/ckan-cloud/cca_operator.sh 137 | ports: 138 | - "8089:8080" 139 | networks: 140 | - cloud-management 141 | 142 | cca-operator: 143 | image: viderum/ckan-cloud-docker:cca-operator-latest 144 | build: 145 | context: cca-operator 146 | command: ./server.sh 147 | restart: always 148 | volumes: 149 | - /etc/ckan-cloud:/etc/ckan-cloud 150 | ports: 151 | - "8022:22" 152 | networks: 153 | - cloud-management 154 | - ckan-multi 155 | 156 | provisioning-api-db: 157 | image: postgres 158 | restart: always 159 | ports: 160 | - "5439:5432" 161 | env_file: 162 | - docker-compose/provisioning-api-db-secrets.sh 163 | volumes: 164 | - provisioning-api-db:/var/lib/postgresql/data 165 | networks: 166 | - cloud-management 167 | 168 | provisioning-api: 169 | depends_on: 170 | - provisioning-api-db 171 | - cca-operator 172 | image: viderum/ckan-cloud-provisioning-api:latest 173 | restart: always 174 | env_file: 175 | - docker-compose/provisioning-api-secrets.sh 176 | environment: 177 | - INSTANCE_MANAGER=root@cca-operator 178 | - PRIVATE_SSH_KEY 179 | - PRIVATE_KEY 180 | - PUBLIC_KEY 181 | - GITHUB_KEY 182 | - GITHUB_SECRET 183 | - EXTERNAL_ADDRESS=http://localhost:8092 184 | ports: 185 | - "8092:8000" 186 | networks: 187 | - cloud-management 188 | 189 | volumes: 190 | ckan-data: 191 | solr: 192 | provisioning-api-db: 193 | 194 | networks: 195 | ckan-multi: 196 | cloud-management: 197 | -------------------------------------------------------------------------------- /docker-compose/cca-operator/id_rsa: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEogIBAAKCAQEAtnKgELeel3rgaPMbYUrt9hdQxJIMTHKmWo1eezaKyqNDjZAy 3 | cHKn+eXmRv/Tdb4SW6fx9OkHwcx/NZ13tDuyUeKqkqsmaaQR+srE59YRrJisObAw 4 | h9fhpVqDgFTOkav6HAw3ApLsBncoUJo7EbRq9KJ83nGmRYRUAdf6wjeKpFKUkBkB 5 | X2jf+GBD8Hz4IjIqPFyF2W1ZidobWALUScole4o1DkjTdm+/WFmIU7zG467zattc 6 | UEuB/wUCn6zlnFdEENafFPIg/PVTFq1GPl737fbGPXM0+LoDEa8Q1sP2kgwi2gNG 7 | P1wjDERg+jzpZF1eJI6MP/EvYoTnL4lT2GnhAwIDAQABAoIBAAL52ayMIjUcVv6/ 8 | QGoU287Q0It0SmkQ0a/WcH0YBamKgjaT7I6zABRucJQl5iAcipYeJi81gd/iYwzP 9 | 1b0F4EG/rcKYsha1C2oI1q9laYJkNyL1wcTle5PD5zM3rurnYDwG4vrSxLDSTsEJ 10 | 0v7V74yv/dNvDSRDJU+mYzu5xjQp8Z2ljyhX2TMrqu4YfoFFCT0lHWUE5VKUysG5 11 | o7xGCYzmhm9koBf0VEaREWeuS/aFuUCx+ZeBi2ht9ya8jLqKWstDtDIATiAU0yhL 12 | DfnuVU50MttTtVNs3GT8nsSlHQb3xNt1+WA94EE42H2OdKpfCCobOw/Q2ymCemoz 13 | XnTRYLECgYEA4iceDHuRUJVM5dh53sD5zWaAeIgxfpx5uP0R/q2hRzmZ00Uitec0 14 | 3IVDvBOHoCuecehr5ZqWdIRVWhncNTESWuZOXO0Zg7SoqOaFPAdL5ZQkrTfThqkI 15 | +WHG/72PhDLXpcxCuL8kjxgiqGg93ay0rEmbCkyfGisMlIEyd63+WHsCgYEAzobf 16 | drWfJzXYq/rvFCr4D65C5cWMt17cWhbVqc6BCZ4ehT9vhKKGvrBHYE6lylMUoX+/ 17 | I/mvkLfnE+EgkBFyF3nrSpuh+8huX67J3HqIW655aswDoCZ0+PXUfQnoIeNx1hV0 18 | rYn5rJigtlGXD1nbVd0DCIZg74gFJyxbA2UhpxkCgYB36uQdHFy2jSRkcEBXJJ6R 19 | ErwmrZkJGPHWbKLhF7KAMQPnKi64o+u8zxFWpkhxXw+6ONMVwb3r9MvJZhMFRYsF 20 | FkAROEwiMWaJgQq/BONyzfYQv5xzOCihC/7YVuzF3avJp6Dtk7VQBy1BfVzosY5w 21 | GOuUL4Lh/lfCWyummGVZBwKBgGCRiCPFwOlgsDOaXfgcL2mFnho6SRXk8TveuYFw 22 | 4b1RhgvgIZDvPfokCvvpSnRUbK0nQoTb10/f7djJx6QVxDdd9rsoEpHcol9zJ0hE 23 | s0XLS1K4mmlkEgXnTLg6zaQq6auk1K3gejJOG7ekZvHynjCD60stKl5WR7KNdvEb 24 | dKThAoGAHfR8wR/IDVG344jQP/1hcZcZpBrKjCd9w3HnqppHKXJaoKm1Ujfv45bq 25 | hrsQiPrTnnebbcKBEpSsryMzq++YnB2pUaNC61TGJu4D/K6S/yFi2HGVHvurJmPm 26 | PdobbZRX9KkyHv2sIIlEHoSZxwKphL+eTr07CnuafWjTCYE7Suc= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /docker-compose/cca-operator/id_rsa.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC2cqAQt56XeuBo8xthSu32F1DEkgxMcqZajV57NorKo0ONkDJwcqf55eZG/9N1vhJbp/H06QfBzH81nXe0O7JR4qqSqyZppBH6ysTn1hGsmKw5sDCH1+GlWoOAVM6Rq/ocDDcCkuwGdyhQmjsRtGr0onzecaZFhFQB1/rCN4qkUpSQGQFfaN/4YEPwfPgiMio8XIXZbVmJ2htYAtRJyiV7ijUOSNN2b79YWYhTvMbjrvNq21xQS4H/BQKfrOWcV0QQ1p8U8iD89VMWrUY+Xvft9sY9czT4ugMRrxDWw/aSDCLaA0Y/XCMMRGD6POlkXV4kjow/8S9ihOcviVPYaeED cca-operator@localhost 2 | -------------------------------------------------------------------------------- /docker-compose/ckan-conf-templates/ckan_init.sh.template: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo Waiting for ckan infrastructure... 4 | 5 | while sleep 2; do 6 | psql -d {{SQLALCHEMY_URL}} -c "select 1" &&\ 7 | pg_isready -h datastore-db &&\ 8 | wget -qO - {{SOLR_URL}}/schema/version &&\ 9 | redis-cli -h redis ping &&\ 10 | exit 0 11 | echo . 12 | done 13 | 14 | exit 1 15 | -------------------------------------------------------------------------------- /docker-compose/ckan-conf-templates/who.ini: -------------------------------------------------------------------------------- 1 | [plugin:auth_tkt] 2 | use = ckan.lib.auth_tkt:make_plugin 3 | 4 | [plugin:friendlyform] 5 | use = repoze.who.plugins.friendlyform:FriendlyFormPlugin 6 | login_form_url= /user/login 7 | login_handler_path = /login_generic 8 | logout_handler_path = /user/logout 9 | rememberer_name = auth_tkt 10 | post_login_url = /user/logged_in 11 | post_logout_url = /user/logged_out 12 | charset = utf-8 13 | 14 | [general] 15 | request_classifier = repoze.who.classifiers:default_request_classifier 16 | challenge_decider = repoze.who.classifiers:default_challenge_decider 17 | 18 | [identifiers] 19 | plugins = 20 | friendlyform;browser 21 | auth_tkt 22 | 23 | [authenticators] 24 | plugins = 25 | auth_tkt 26 | ckan.lib.authenticator:UsernamePasswordAuthenticator 27 | 28 | [challengers] 29 | plugins = 30 | friendlyform;browser 31 | -------------------------------------------------------------------------------- /docker-compose/ckan-secrets.dat: -------------------------------------------------------------------------------- 1 | db required POSTGRES_PASSWORD 123456 Enter password for CKAN database 2 | datastore-db required DATASTORE_PASSWORD 123456 Enter password for datastore database 3 | datastore-db required DATASTORE_RO_PASSWORD 123456 Enter password for datastore read only user 4 | datastore-db required DATASTORE_RO_USER readonly Enter datastore read only username 5 | datastore-db required DATASTORE_PUBLIC_RO_PASSWORD 123456 Enter password for datastore public read only user 6 | provisioning-api required DATABASE_URL postgresql://postgres:123456@provisioning-api-db:5432/postgres Provisioning Database URI [Skip if not relevant] 7 | provisioning-api-db required POSTGRES_PASSWORD 123456 Provisioning Database password [Skip if not relevant] 8 | ckan required BEAKER_SESSION_SECRET owbf4obj34fb3jk4bo5b45kjb45pjbg5ojgbv54jgb54jg Enter Beaker session secret string 9 | ckan required APP_INSTANCE_UUID 1b05eb54-743a-40a7-8e31-f2c5ff69c0cb Enter Application UUID 10 | ckan required SOLR_URL http://solr:8983/solr/ckan Enter SOLR connection string 11 | ckan required CKAN_REDIS_URL redis://redis:6379/1 Enter Redis URL 12 | ckan required CKAN_DATAPUSHER_URL http://datapusher:8800/ Enter Datapusher URL 13 | ckan required CKAN_DATAPUSHER_API_TOKEN xxxxxxxx Enter Datapusher API token 14 | ckan required SMTP_SERVER mail.example.com Enter SMTP server address 15 | ckan required SMTP_USER info Enter SMTP server username 16 | ckan optional SMTP_PASSWORD empty Enter SMTP server password 17 | ckan optional SMTP_MAIL_FROM empty Enter SMTP mail from 18 | ckan required CKAN_SITE_URL http://ckan:5000 Enter Website URL (including https:// or http://) 19 | ckan optional AWS_ACCESS_KEY_ID empty Enter AWS secret key [Skip if not using AWS] 20 | ckan optional AWS_SECRET_ACCESS_KEY empty AWS secret access key [Skip if not using AWS] 21 | ckan optional SENTRY_DSN https://@sentry.io/ Enter Sentry DSN URL with token and ID [Skip if not using Sentry] 22 | ckan optional GTM_ID empty Enter Google Tag Manager ID [Skip if not using the new Google Analytics (GTM) or not using Google Analytics at all] 23 | ckan optional GA_ID empty Enter Google Analytics ID [Skip if using GTM or not using Google Analytics at all] 24 | ckan optional GA_ACCOUNT empty Enter Google Analytics account name [Skip if using GTM or not using Google Analytics at all] 25 | ckan optional GA_PASSWORD empty Enter Google Analytics password [Skip if using GTM or not using Google Analytics at all] 26 | ckan optional GA_USERNAME empty Enter Google Analytics username [Skip if using GTM or not using Google Analytics at all] 27 | datapusher optional TIMEZONE UTC Enter Datapusher timezone 28 | traefik optional CERTIFICATE_EMAIL Enter email address for Let's Encrypt certificate [Skip if using self-signed certificate] 29 | traefik optional CERTIFICATE_DOMAIN Enter domain for Let's Encrypt certificate [Skip if using self-signed certificate] 30 | -------------------------------------------------------------------------------- /docker-compose/provisioning-api/README.md: -------------------------------------------------------------------------------- 1 | The *.pem files were created using [this script](https://github.com/datahq/auth/blob/master/tools/generate_key_pair.sh) 2 | -------------------------------------------------------------------------------- /docker-compose/provisioning-api/private.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEogIBAAKCAQEAwmd8SaXae5+s/ZIrz9d2mhv1HYy3eCvTEejGLKeaKUuuXLCS 3 | 5fajL7ddAD8I/mY3JELdKt8JjVvyXrFP6Cp83Lp9n0rL4BOo2gUGnHLcIvMpT5t/ 4 | YiSJh+s4dwPXIBre8JuI46QwHQMBTLOYSCyimj3JQdk6gwGVbgn0/TerhpwYFoo0 5 | ABABh/mhSQHGjooNlkyLP+SVELk52dn8Rp0+jbrakMQO9PSV1j+CizViixtdQD3y 6 | 2gD470fM14s0lauiM5eu8jkB1U1FAGlpuOKpxv3Kh+8T74trJeDVpa3gi4XUoVJI 7 | XhfdM1/hZNg6Cf6s/4wnnY6df307BbUmeoM1cwIDAQABAoIBAFz3ulNKEnLeSI83 8 | EOajRp+qUQ0hlzfL1XOSGvuN50ZqX+cJtiZtTVrrWHleq8FlRmd75mKzKNFGMGEi 9 | cHE5qF6yfJLp5ReyryQcFS5KK4Y3PQFJ/qADkxXB4k3gM0eyMf1klBExGqU1I7D3 10 | jhKt7dDPTWjFJpsx7bEi59sUfV2wfyGXLHHfqBQUlsyZdjS1ZSagnyb9RGtdCzqS 11 | EuMaC3CwC3OqdSCSvrFuemu/jTKinwIV96Svm5H6YWdgohehxx3WkeEUnfRCEfjV 12 | gAHu+6L5ILGdQlqF/GM/ymMdQL+vRMRtBq2hLDk+rm7qtUyX9QMGtEIPu98AV3E2 13 | UZ57hukCgYEA/YrVj6/fLUZej3HJ3lUg0zDdMcjc2L210+eFgraj4oX14iD+ho5l 14 | VJqah+CEbhP3rgJsvvwOE3sUeBIfmCbXhGeOPSRGxXI+CdDf3i8vVmPRUHG5YVaV 15 | zAKIr1BhIWPKcMO/11qNvy8ztZDUXKN+HoLy/1oH6duk46Zi38PmP50CgYEAxEnm 16 | Zv57tzhtaQ0aGlfQ/41fB/tmn1ixzyw1HQPdgF93srDKlRwwZSncYJA5+BDoUiR+ 17 | 9YWXWEk6Wklk1fWhMqMkJhRvEgcRoXoeDXuDK2E9Y0iAeEDzzQRaSNxr3EzGUtqG 18 | 6DcjrDacrsz05AfQa6IRdNe69manvrZKUl75pE8CgYBV+VEHozyLbrQ78frg8H7r 19 | /kLTUehJerMAgx6Lv4D9HC3Pmx6T5A9KTA6giqp9ZTn0gI/9hx0C0BaVzJd4EZu9 20 | IxxhcT6qT13oGDMBdYCYy8o7fAS/lAvBnQ5aXGhDWUJZUJdp4CP2s3vWUqEgNLRc 21 | hkz997p9O2TDaTVX6WbXJQKBgAfmqZ2PtM8lOFI4MsrIqiL/P8oEdpcHbPDX6LNo 22 | Smw2a40HzL+/yLrpunMwmwS/OXrE+U9Mt1DYcb7coXBzWK+/HV9jBy2El1MWpUsJ 23 | o7gFTAFJE9LDWozwFepE5RqTBJClNzT1szvlXt0yZtEe92a9uVIotN/iNfJZSBiU 24 | thvFAoGAJ7iK+hl5T937H5D4iQL5rZW/d9jl+qSjgWyCcqWxLQPsfiNRVJEy8bLD 25 | 2BqAcHCxC+ozbu2e4tY+sprwN4LrgEvTDj6CtRyWrqBt31GDgw1QSxsidv6fJt2o 26 | lAAh/EEc8JLRgigVS9B4urNgJiaziwWqhugc7BfXXKFm/8RZyTM= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /docker-compose/provisioning-api/public.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PUBLIC KEY----- 2 | MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwmd8SaXae5+s/ZIrz9d2 3 | mhv1HYy3eCvTEejGLKeaKUuuXLCS5fajL7ddAD8I/mY3JELdKt8JjVvyXrFP6Cp8 4 | 3Lp9n0rL4BOo2gUGnHLcIvMpT5t/YiSJh+s4dwPXIBre8JuI46QwHQMBTLOYSCyi 5 | mj3JQdk6gwGVbgn0/TerhpwYFoo0ABABh/mhSQHGjooNlkyLP+SVELk52dn8Rp0+ 6 | jbrakMQO9PSV1j+CizViixtdQD3y2gD470fM14s0lauiM5eu8jkB1U1FAGlpuOKp 7 | xv3Kh+8T74trJeDVpa3gi4XUoVJIXhfdM1/hZNg6Cf6s/4wnnY6df307BbUmeoM1 8 | cwIDAQAB 9 | -----END PUBLIC KEY----- 10 | -------------------------------------------------------------------------------- /docs/imgs/airflow-ready.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datopian/ckan-cloud-docker/9cd58625b6c571a4a420b1b21592611548d5a8d9/docs/imgs/airflow-ready.png -------------------------------------------------------------------------------- /docs/imgs/ckan-ready.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datopian/ckan-cloud-docker/9cd58625b6c571a4a420b1b21592611548d5a8d9/docs/imgs/ckan-ready.png -------------------------------------------------------------------------------- /docs/imgs/dags_ready.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datopian/ckan-cloud-docker/9cd58625b6c571a4a420b1b21592611548d5a8d9/docs/imgs/dags_ready.png -------------------------------------------------------------------------------- /docs/imgs/harvest-sources-empty.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datopian/ckan-cloud-docker/9cd58625b6c571a4a420b1b21592611548d5a8d9/docs/imgs/harvest-sources-empty.png -------------------------------------------------------------------------------- /docs/imgs/new-harvest-source.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datopian/ckan-cloud-docker/9cd58625b6c571a4a420b1b21592611548d5a8d9/docs/imgs/new-harvest-source.png -------------------------------------------------------------------------------- /jenkins/.dockerignore: -------------------------------------------------------------------------------- 1 | jobs 2 | scripts 3 | -------------------------------------------------------------------------------- /jenkins/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jenkins/jenkins:2.478 2 | 3 | ARG PIP_INDEX_URL 4 | ENV PIP_INDEX_URL=$PIP_INDEX_URL 5 | 6 | RUN export JENKINS_VERSION=$(unzip -p /usr/share/jenkins/jenkins.war META-INF/MANIFEST.MF | grep 'Jenkins-Version' | cut -d' ' -f2) && \ 7 | echo "Jenkins version: $JENKINS_VERSION" && \ 8 | jenkins-plugin-cli --jenkins-version $JENKINS_VERSION --plugins \ 9 | build-timeout envfile copyartifact extensible-choice-parameter file-operations \ 10 | fstrigger generic-webhook-trigger git-parameter github-branch-source \ 11 | global-variable-string-parameter http_request jobgenerator join managed-scripts matrix-combinations-parameter \ 12 | persistent-parameter workflow-aggregator pipeline-github-lib python ssh-slaves timestamper urltrigger \ 13 | ws-cleanup 14 | 15 | USER root 16 | RUN curl -L "https://github.com/docker/compose/releases/download/1.23.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 17 | RUN apt-get update && apt-get install -y apt-transport-https ca-certificates curl gnupg2 software-properties-common 18 | RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - && apt-key fingerprint 0EBFCD88 &&\ 19 | add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable" &&\ 20 | apt-get update && apt-get install -y docker-ce 21 | RUN chmod +x /usr/local/bin/docker-compose && echo "jenkins ALL=NOPASSWD: ALL" >> /etc/sudoers &&\ 22 | echo "export CKAN_CLOUD_DOCKER_JENKINS=1" > /etc/profile.d/ckan_cloud_docker_jenkins &&\ 23 | chmod +x /etc/profile.d/ckan_cloud_docker_jenkins 24 | RUN apt update && apt install -y python3-pip 25 | RUN python3 -m pip install --break-system-packages --index-url ${PIP_INDEX_URL:-https://pypi.org/simple/} pyyaml 26 | 27 | USER jenkins 28 | -------------------------------------------------------------------------------- /jenkins/jobs/.gitignore: -------------------------------------------------------------------------------- 1 | builds 2 | lastStable 3 | lastSuccessful 4 | workspace 5 | nextBuildNumber 6 | -------------------------------------------------------------------------------- /jenkins/jobs/CKAN builds/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | All 16 | false 17 | false 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | false 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /jenkins/jobs/CKAN builds/jobs/custom CKAN build (local)/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Build a custom CKAN (from local directory) 5 | false 6 | 7 | 8 | 9 | 10 | DOCKER_IMAGE 11 | 12 | ckan-cloud-custom-ckan-${BUILD_NUMBER} 13 | true 14 | 15 | 16 | CKAN_BRANCH 17 | 18 | ckan-2.8.1 19 | true 20 | 21 | 22 | CKAN_REPO 23 | 24 | ckan/ckan 25 | true 26 | 27 | 28 | POST_INSTALL 29 | 30 | 31 | false 32 | 33 | 34 | POST_DOCKER_BUILD 35 | 36 | 37 | false 38 | 39 | 40 | DOCKER_PUSH_IMAGE 41 | 42 | 43 | false 44 | 45 | 46 | 47 | 48 | 49 | true 50 | false 51 | false 52 | false 53 | 54 | false 55 | 56 | 57 | cd /etc/ckan-cloud/ckan-cloud-docker &&\ 58 | jenkins/scripts/build_ckan_custom.sh 59 | 60 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /jenkins/jobs/CKAN builds/jobs/custom CKAN build/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Build a custom CKAN 5 | false 6 | 7 | 8 | 9 | 10 | GIT_BRANCH_SPECIFIER 11 | 12 | d0e36dbb-d5d2-4b16-905f-8ea0235c5784 13 | PT_BRANCH_TAG 14 | 15 | * 16 | .* 17 | NONE 18 | origin/master 19 | DEFAULT 20 | false 21 | 5 22 | 23 | 24 | DOCKER_IMAGE 25 | 26 | ckan-cloud-custom-ckan-${BUILD_NUMBER} 27 | true 28 | 29 | 30 | CKAN_BRANCH 31 | 32 | ckan-2.8.1 33 | true 34 | 35 | 36 | CKAN_REPO 37 | 38 | ckan/ckan 39 | true 40 | 41 | 42 | POST_INSTALL 43 | 44 | 45 | false 46 | 47 | 48 | POST_DOCKER_BUILD 49 | 50 | 51 | false 52 | 53 | 54 | DOCKER_PUSH_IMAGE 55 | Make sure to use each pushed image from a single Jenkins installation to prevent BUILD_NUMBER collisions. 56 | 57 | 58 | viderum/ckan-cloud-docker:ckan-custom-${BUILD_NUMBER} 59 | orihoch/ckan-cloud-docker:ckan-custom-${BUILD_NUMBER} 60 | 61 | false 62 | 63 | 64 | 65 | 66 | 67 | 2 68 | 69 | 70 | https://github.com/ViderumGlobal/ckan-cloud-docker.git 71 | 72 | 73 | 74 | 75 | ${GIT_BRANCH_SPECIFIER} 76 | 77 | 78 | false 79 | 80 | 81 | 82 | true 83 | false 84 | false 85 | false 86 | 87 | false 88 | 89 | 90 | cd /etc/ckan-cloud/ckan-cloud-docker &&\ 91 | jenkins/scripts/build_ckan_custom.sh 92 | 93 | 94 | 95 | 96 | -------------------------------------------------------------------------------- /jenkins/jobs/cluster administration/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | All 21 | false 22 | false 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | false 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /jenkins/jobs/cluster administration/jobs/create-instance/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | false 6 | 7 | 8 | false 9 | false 10 | 11 | 12 | 13 | 14 | INSTANCE_ID 15 | 16 | jenkins${BUILD_NUMBER} 17 | true 18 | 19 | 20 | VALUES 21 | 22 | # 23 | # Copy and modify the values from the relevant template: 24 | # 25 | # * production deployment: 26 | # * https://github.com/ViderumGlobal/ckan-cloud-helm/blob/master/aws-values.yaml 27 | # 28 | # * development on minikube: 29 | # * https://github.com/ViderumGlobal/ckan-cloud-helm/blob/master/minikube-values.yaml 30 | # 31 | false 32 | 33 | 34 | 35 | 36 | 37 | true 38 | false 39 | false 40 | false 41 | 42 | false 43 | 44 | 45 | cd /etc/ckan-cloud/ckan-cloud-docker && jenkins/scripts/create_instance.sh 46 | 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /jenkins/jobs/cluster administration/jobs/debug/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | false 6 | 7 | 8 | false 9 | false 10 | 11 | 12 | 13 | 14 | INSTANCE_ID 15 | 16 | 17 | false 18 | 19 | 20 | APPS 21 | 22 | ckan db jobs datastore-db solr redis jobs-db 23 | false 24 | 25 | 26 | DEBUG_SCRIPT 27 | 28 | $KUBECTL get pod ${POD_NAME} &&\ 29 | $KUBECTL describe pod ${POD_NAME} &&\ 30 | $KUBECTL logs --tail 15 ${LOG_ARGS} ${POD_NAME} 31 | false 32 | 33 | 34 | KUBECTL 35 | 36 | jenkins/scripts/kubectl.sh -n ${INSTANCE_ID} 37 | false 38 | 39 | 40 | GET_POD_NAME 41 | 42 | POD_NAME=$($KUBECTL get pods -l "app=${APP}" -o "jsonpath={.items[0].metadata.name}") &&\ 43 | echo ${APP}: ${POD_NAME} 44 | false 45 | 46 | 47 | 48 | 49 | 50 | true 51 | false 52 | false 53 | false 54 | 55 | false 56 | 57 | 58 | #!/usr/bin/env bash 59 | 60 | export QUIET=1 61 | 62 | cd /etc/ckan-cloud/ckan-cloud-docker 63 | ERRORS="" 64 | for APP in $APPS; do 65 | if eval "${GET_POD_NAME}"; then 66 | if [ "${APP}" == "jobs" ]; then 67 | LOG_ARGS="-c ckan-jobs" 68 | else 69 | LOG_ARGS="" 70 | fi 71 | ! eval "${DEBUG_SCRIPT}" && echo ERROR! Debug script failed for app ${APP} && ERRORS="${ERRORS}${APP}: Debug script failed\n" 72 | else 73 | echo ERROR! Failed to get pod name for app ${APP} && ERRORS="${ERRORS}${APP}: Failed to get pod name\n" 74 | fi 75 | done 76 | if [ -z "${ERRORS}" ]; then 77 | echo Great Success! 78 | exit 0 79 | else 80 | echo "${ERRORS}" 81 | exit 1 82 | fi 83 | 84 | 85 | 86 | 87 | 88 | -------------------------------------------------------------------------------- /jenkins/jobs/cluster administration/jobs/delete-instance/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | false 6 | 7 | 8 | 9 | 10 | INSTANCE_ID 11 | 12 | 13 | false 14 | 15 | 16 | 17 | 18 | 19 | true 20 | false 21 | false 22 | false 23 | 24 | false 25 | 26 | 27 | cd /etc/ckan-cloud/ckan-cloud-docker && jenkins/scripts/delete_instance.sh 28 | 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /jenkins/jobs/cluster administration/jobs/get instance values/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | false 6 | 7 | 8 | false 9 | false 10 | 11 | 12 | 13 | 14 | INSTANCE_ID 15 | 16 | 17 | false 18 | 19 | 20 | 21 | 22 | 23 | true 24 | false 25 | false 26 | false 27 | 28 | false 29 | 30 | 31 | cd /etc/ckan-cloud/ckan-cloud-docker 32 | export QUIET=1 33 | /etc/ckan-cloud/cca_operator.sh ./get-instance-values.sh "${INSTANCE_ID}" \ 34 | | python3 -c " 35 | import yaml, sys; 36 | print(yaml.dump(yaml.load(sys.stdin), default_flow_style=False, indent=2, width=99999, allow_unicode=True)) 37 | " 38 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /jenkins/jobs/cluster administration/jobs/instance-connection-info/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | false 6 | 7 | 8 | false 9 | false 10 | 11 | 12 | 13 | 14 | INSTANCE_ID 15 | 16 | 17 | false 18 | 19 | 20 | 21 | 22 | 23 | true 24 | false 25 | false 26 | false 27 | 28 | false 29 | 30 | 31 | cd /etc/ckan-cloud/ckan-cloud-docker &&\ 32 | /etc/ckan-cloud/cca_operator.sh ./instance-connection-info.sh "${INSTANCE_ID}" 33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /jenkins/jobs/cluster administration/jobs/list-instances/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | false 6 | 7 | 8 | true 9 | false 10 | false 11 | false 12 | 13 | false 14 | 15 | 16 | cd /etc/ckan-cloud/ckan-cloud-docker && jenkins/scripts/list_instances.sh 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /jenkins/jobs/cluster administration/jobs/logs/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | false 6 | 7 | 8 | false 9 | false 10 | 11 | 12 | 13 | 14 | INSTANCE_ID 15 | 16 | 17 | false 18 | 19 | 20 | APP 21 | 22 | 23 | false 24 | 25 | 26 | KUBECTL 27 | 28 | jenkins/scripts/kubectl.sh -n ${INSTANCE_ID} 29 | false 30 | 31 | 32 | GET_POD_NAME 33 | 34 | POD_NAME=$($KUBECTL get pods -l "app=${APP}" -o "jsonpath={.items[0].metadata.name}") &&\ 35 | echo ${APP}: ${POD_NAME} 36 | false 37 | 38 | 39 | LOG_SCRIPT 40 | 41 | $KUBECTL logs $POD_NAME 42 | false 43 | 44 | 45 | 46 | 47 | 48 | true 49 | false 50 | false 51 | false 52 | 53 | false 54 | 55 | 56 | #!/usr/bin/env bash 57 | 58 | export QUIET=1 59 | 60 | cd /etc/ckan-cloud/ckan-cloud-docker 61 | if eval "${GET_POD_NAME}"; then 62 | if [ "${APP}" == "jobs" ]; then 63 | LOG_ARGS="-c ckan-jobs" 64 | else 65 | LOG_ARGS="" 66 | fi 67 | ! eval "${LOG_SCRIPT}" && echo ERROR! Log script failed && exit 1 68 | else 69 | echo ERROR! Failed to get pod name && exit 1 70 | fi 71 | 72 | exit 0 73 | 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /jenkins/jobs/cluster administration/jobs/recreate-instance/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | false 6 | 7 | 8 | 9 | 10 | UPDATE_VALUES 11 | 12 | siteTitle: "My Custom Title" 13 | false 14 | 15 | 16 | INSTANCE_ID 17 | 18 | 19 | false 20 | 21 | 22 | 23 | 24 | 25 | true 26 | false 27 | false 28 | false 29 | 30 | false 31 | 32 | 33 | cd /etc/ckan-cloud/ckan-cloud-docker && jenkins/scripts/recreate_instance.sh 34 | 35 | 36 | 37 | 38 | -------------------------------------------------------------------------------- /jenkins/jobs/cluster administration/jobs/update-instance/config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | false 6 | 7 | 8 | false 9 | false 10 | 11 | 12 | 13 | 14 | UPDATE_VALUES 15 | 16 | siteTitle: "My Custom Title" 17 | false 18 | 19 | 20 | INSTANCE_ID 21 | 22 | 23 | false 24 | 25 | 26 | 27 | 28 | 29 | true 30 | false 31 | false 32 | false 33 | 34 | false 35 | 36 | 37 | cd /etc/ckan-cloud/ckan-cloud-docker && jenkins/scripts/update_instance.sh 38 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /jenkins/scripts/build_ckan_custom.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | sudo docker-compose -f docker-compose.yaml -f .docker-compose-cache-from.yaml build ckan &&\ 4 | sudo docker tag viderum/ckan-cloud-docker:ckan-latest ${DOCKER_IMAGE} &&\ 5 | if ! [ -z "${DOCKER_PUSH_IMAGE}" ]; then 6 | sudo docker tag ${DOCKER_IMAGE} ${DOCKER_PUSH_IMAGE} &&\ 7 | sudo docker push ${DOCKER_PUSH_IMAGE} 8 | fi &&\ 9 | echo " 10 | 11 | ${DOCKER_PUSH_IMAGE:-$DOCKER_IMAGE} 12 | 13 | " 14 | -------------------------------------------------------------------------------- /jenkins/scripts/create_instance.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if ! [ -z "${VALUES}" ]; then 4 | ! echo "${VALUES}" | tee /dev/stderr | /etc/ckan-cloud/cca_operator.sh ./set-instance-values.sh ${INSTANCE_ID} && exit 1 5 | fi 6 | 7 | /etc/ckan-cloud/cca_operator.sh ./create-instance.sh ${INSTANCE_ID} 8 | -------------------------------------------------------------------------------- /jenkins/scripts/delete_instance.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | /etc/ckan-cloud/cca_operator.sh ./delete-instance.sh "${INSTANCE_ID}" -------------------------------------------------------------------------------- /jenkins/scripts/docker_compose_cca_operator.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ "${QUIET}" == "1" ]; then 4 | sudo docker-compose build cca-operator >/dev/null 2>&1 &&\ 5 | sudo docker-compose run --rm cca-operator 2>/dev/null "$@" 6 | else 7 | sudo docker-compose build cca-operator &&\ 8 | sudo docker-compose run --rm cca-operator "$@" 9 | fi 10 | 11 | -------------------------------------------------------------------------------- /jenkins/scripts/get_instance_values.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export QUIET=1 4 | /etc/ckan-cloud/cca_operator.sh ./get-instance-values.sh "${INSTANCE_ID}" \ 5 | | python3 -c " 6 | import yaml, sys; 7 | print(yaml.dump(yaml.load(sys.stdin), default_flow_style=False, indent=2, width=99999, allow_unicode=True)) 8 | " 9 | -------------------------------------------------------------------------------- /jenkins/scripts/kubectl.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | exec /etc/ckan-cloud/cca_operator.sh ./kubectl.sh "$@" -------------------------------------------------------------------------------- /jenkins/scripts/list_instances.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | /etc/ckan-cloud/cca_operator.sh ./list-instances.sh -------------------------------------------------------------------------------- /jenkins/scripts/recreate_instance.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export UPDATE_INSTANCE_COMMAND=./recreate-instance.sh 4 | exec jenkins/scripts/update_instance.sh "$@" 5 | -------------------------------------------------------------------------------- /jenkins/scripts/update_instance.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | VALUES_TEMPFILE=`mktemp` 4 | export QUIET=1 5 | ! /etc/ckan-cloud/cca_operator.sh ./get-instance-values.sh "${INSTANCE_ID}" > $VALUES_TEMPFILE \ 6 | && echo failed to get instance values && exit 1 7 | export QUIET=0 8 | 9 | TEMPFILE=`mktemp` &&\ 10 | echo "${UPDATE_VALUES}" \ 11 | | python3 -c ' 12 | import yaml,sys; 13 | values = yaml.load(open("'${VALUES_TEMPFILE}'")) 14 | values.update(**yaml.load(sys.stdin)) 15 | print(yaml.dump(values, default_flow_style=False, allow_unicode=True)) 16 | ' > $TEMPFILE &&\ 17 | cat $TEMPFILE | tee /dev/stderr | /etc/ckan-cloud/cca_operator.sh ./set-instance-values.sh ${INSTANCE_ID} &&\ 18 | /etc/ckan-cloud/cca_operator.sh ${UPDATE_INSTANCE_COMMAND:-./update-instance.sh} ${INSTANCE_ID} 19 | -------------------------------------------------------------------------------- /migrate_databases.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | source $CKAN_K8S_SECRETS 4 | 5 | DB_BACKUP=${1} 6 | DATASTORE_DB_BACKUP=${2} 7 | # Switch to postgres user 8 | DB="${SQLALCHEMY_URL/:\/\/[^:]*:/:\/\/postgres:}" 9 | # Remove db name from URI to delete 10 | DB="${DB/\/ckan/}" 11 | DATASTORE_DB="${CKAN_DATASTORE_WRITE_URLB/\/datastore/}" 12 | 13 | echo $DB 14 | 15 | # recover DB 16 | if [ "$DB_BACKUP" != "" ]; then 17 | wget -O db_backup.gz $DB_BACKUP && gunzip db_backup.gz 18 | psql $DB -c "SELECT pg_terminate_backend (pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = 'ckan'" 19 | psql $DB -c "DROP DATABASE ckan;" 20 | psql $DB -c "CREATE DATABASE ckan;" 21 | psql $SQLALCHEMY_URL -f db_backup ckan 22 | psql $DB/ckan -c "CREATE EXTENSION postgis;" 23 | psql $DB/ckan -c "CREATE EXTENSION postgis_topology;" 24 | fi 25 | 26 | # recover datastore DB 27 | if [ "$DATASTORE_DB_BACKUP" != "" ]; then 28 | psql $DB -c "SELECT pg_terminate_backend (pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = 'datastore'" 29 | psql $DB -c "DROP DATABASE datastore;" 30 | psql $DB -c "CREATE DATABASE datastore;" 31 | wget -O db_datastore_backup.gz $DATASTORE_DB_BACKUP && gunzip db_datastore_backup.gz 32 | psql $CKAN_DATASTORE_WRITE_URL -f db_datastore_backup ckan 33 | fi 34 | -------------------------------------------------------------------------------- /migrate_dbs_generic.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | DB_BACKUP=${1} 4 | DATASTORE_DB_BACKUP=${2} 5 | ROOT_DB=${3} 6 | INSTANCE_ID=${4} 7 | 8 | # recover DB 9 | if [ "$DB_BACKUP" != "" ]; then 10 | wget -O db_backup.gz $DB_BACKUP && gunzip db_backup.gz 11 | psql $ROOT_DB -c "SELECT pg_terminate_backend (pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$INSTANCE_ID'" 12 | psql $ROOT_DB -c "DROP DATABASE $INSTANCE_ID;" 13 | psql $ROOT_DB -c "CREATE DATABASE $INSTANCE_ID;" 14 | psql $CKAN_SQLALCHEMY_URL -f db_backup $INSTANCE_ID 15 | psql $ROOT_DB/$INSTANCE_ID -c "CREATE EXTENSION postgis;" 16 | psql $ROOT_DB/$INSTANCE_ID -c "CREATE EXTENSION postgis_topology;" 17 | fi 18 | 19 | # recover datastore DB 20 | if [ "$DATASTORE_DB_BACKUP" != "" ]; then 21 | wget -O db_datastore_backup.gz $DATASTORE_DB_BACKUP && gunzip db_datastore_backup.gz 22 | psql $ROOT_DB -c "SELECT pg_terminate_backend (pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$INSTANCE_ID-datastore'" 23 | psql $ROOT_DB -c "DROP DATABASE $INSTANCE_ID-datastore;" 24 | psql $ROOT_DB -c "CREATE DATABASE $INSTANCE_ID-datastore;" 25 | psql $CKAN__DATASTORE__WRITE_URL -f db_datastore_backup $INSTANCE_ID-datastore 26 | fi 27 | -------------------------------------------------------------------------------- /migrate_filestorage.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Migrate the data from S3 FileStorage to local fyle system 4 | 5 | # FileStorage Server Eg: https://cc-p-minio.ckan.io or https://s3.amazonaws.com 6 | HOST=${1} 7 | ACCESS_KEY=${2} 8 | SECRET_KEY=${3} 9 | BUCKET=${4} 10 | STORAGE_PREFIX=${5}/ 11 | TMP_DATA_RECEIVED='/tmp/data_received' 12 | TMP_DATA='/tmp/data' 13 | 14 | # Download minio client 15 | wget https://dl.min.io/client/mc/release/linux-amd64/mc && chmod +x mc 16 | # Add host 17 | ./mc config host add filestorage $HOST $ACCESS_KEY $SECRET_KEY 18 | echo downloading from storage 19 | ./mc cp --recursive filestorage/$BUCKET/$STORAGE_PREFIX $TMP_DATA_RECEIVED 20 | 21 | echo updating paths 22 | for dir in $(ls $TMP_DATA_RECEIVED/resources); do 23 | FIRST_DIR=${dir:0:3} 24 | SECOND_DIR=${dir:3:3} 25 | REST=${dir:6} 26 | RESOURCE_DIR=$TMP_DATA/resources/$FIRST_DIR/$SECOND_DIR 27 | mkdir -p $RESOURCE_DIR 28 | for file in $TMP_DATA_RECEIVED/resources/$dir/*; do 29 | echo $file 30 | cp $file $RESOURCE_DIR/$REST 31 | done 32 | done 33 | cp -r $TMP_DATA_RECEIVED/storage $TMP_DATA/storage 34 | 35 | echo mounting data into the persistent volumes 36 | docker cp $TMP_DATA ckan-cloud-docker_ckan_1:/var/lib/ckan 37 | docker-compose -f docker-compose.yaml -f .docker-compose-db.yaml -f .docker-compose.${5}-theme.yaml exec -u root ckan chown -R ckan:ckan /var/lib/ckan 38 | -------------------------------------------------------------------------------- /nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:alpine 2 | 3 | COPY default.conf /etc/nginx/conf.d/default.conf 4 | -------------------------------------------------------------------------------- /nginx/default.conf: -------------------------------------------------------------------------------- 1 | proxy_cache_path /tmp/nginx_cache levels=1:2 keys_zone=cache:30m max_size=250m; 2 | proxy_temp_path /tmp/nginx_proxy 1 2; 3 | server { 4 | listen 8080; 5 | server_name _; 6 | client_max_body_size 1000M; 7 | 8 | location /airflow/ { 9 | # extra NGINX for harvester (not always present) 10 | # Nginx will not check if the host is reachable on startup if we set a proxy_pass with a variable 11 | set $harvester http://harvester:8082; 12 | proxy_pass $harvester; 13 | 14 | proxy_set_header X-Forwarded-For $remote_addr; 15 | proxy_set_header Host $host; 16 | proxy_redirect off; 17 | proxy_http_version 1.1; 18 | proxy_set_header Upgrade $http_upgrade; 19 | proxy_set_header Connection "upgrade"; 20 | } 21 | 22 | location / { 23 | proxy_pass http://ckan:5000; 24 | proxy_set_header X-Forwarded-For $remote_addr; 25 | proxy_set_header Host $http_host; 26 | proxy_cache cache; 27 | proxy_cache_bypass $cookie_auth_tkt; 28 | proxy_no_cache $cookie_auth_tkt; 29 | proxy_cache_valid 30m; 30 | proxy_cache_key $http_host$scheme$proxy_host$request_uri; 31 | # In emergency comment out line to force caching 32 | # proxy_ignore_headers X-Accel-Expires Expires Cache-Control; 33 | } 34 | 35 | } 36 | -------------------------------------------------------------------------------- /solr/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM solr:6.6.6 2 | 3 | # Enviroment 4 | ENV SOLR_CORE ckan 5 | 6 | # Create Directories 7 | RUN mkdir -p /opt/solr/server/solr/$SOLR_CORE/conf 8 | RUN mkdir -p /opt/solr/server/solr/$SOLR_CORE/data 9 | 10 | # Adding Files 11 | COPY solrconfig.xml /opt/solr/server/solr/$SOLR_CORE/conf/ 12 | COPY basic-config/ /opt/solr/server/solr/$SOLR_CORE/conf/ 13 | RUN ls /opt/solr/server/solr/$SOLR_CORE/conf/ 14 | 15 | ARG SCHEMA_XML=schemas/schema28.xml 16 | COPY $SCHEMA_XML /opt/solr/server/solr/$SOLR_CORE/conf/schema.xml 17 | 18 | # Create Core.properties 19 | RUN echo name=$SOLR_CORE > /opt/solr/server/solr/$SOLR_CORE/core.properties 20 | 21 | # Giving ownership to Solr 22 | 23 | USER root 24 | RUN chown -R $SOLR_USER:$SOLR_USER /opt/solr/server/solr/$SOLR_CORE 25 | 26 | # User 27 | USER $SOLR_USER:$SOLR_USER 28 | -------------------------------------------------------------------------------- /solr/basic-config/currency.xml: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /solr/basic-config/elevate.xml: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 26 | 27 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /solr/basic-config/protwords.txt: -------------------------------------------------------------------------------- 1 | # The ASF licenses this file to You under the Apache License, Version 2.0 2 | # (the "License"); you may not use this file except in compliance with 3 | # the License. You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | 13 | #----------------------------------------------------------------------- 14 | # Use a protected word file to protect against the stemmer reducing two 15 | # unrelated words to the same base word. 16 | 17 | # Some non-words that normally won't be encountered, 18 | # just to test that they won't be stemmed. 19 | dontstems 20 | zwhacky 21 | 22 | -------------------------------------------------------------------------------- /solr/basic-config/stopwords.txt: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | -------------------------------------------------------------------------------- /solr/basic-config/synonyms.txt: -------------------------------------------------------------------------------- 1 | # The ASF licenses this file to You under the Apache License, Version 2.0 2 | # (the "License"); you may not use this file except in compliance with 3 | # the License. You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | 13 | #----------------------------------------------------------------------- 14 | #some test synonym mappings unlikely to appear in real input text 15 | aaafoo => aaabar 16 | bbbfoo => bbbfoo bbbbar 17 | cccfoo => cccbar cccbaz 18 | fooaaa,baraaa,bazaaa 19 | 20 | # Some synonym groups specific to this example 21 | GB,gib,gigabyte,gigabytes 22 | MB,mib,megabyte,megabytes 23 | Television, Televisions, TV, TVs 24 | #notice we use "gib" instead of "GiB" so any WordDelimiterGraphFilter coming 25 | #after us won't split it into two words. 26 | 27 | # Synonym mappings can be used for spelling correction too 28 | pixima => pixma 29 | 30 | -------------------------------------------------------------------------------- /solr/solr.xml: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 28 | 29 | 30 | 31 | 32 | 33 | ${host:} 34 | ${jetty.port:8983} 35 | ${hostContext:solr} 36 | 37 | ${genericCoreNodeNames:true} 38 | 39 | ${zkClientTimeout:30000} 40 | ${distribUpdateSoTimeout:600000} 41 | ${distribUpdateConnTimeout:60000} 42 | ${zkCredentialsProvider:org.apache.solr.common.cloud.DefaultZkCredentialsProvider} 43 | ${zkACLProvider:org.apache.solr.common.cloud.DefaultZkACLProvider} 44 | 45 | 46 | 47 | 49 | ${socketTimeout:600000} 50 | ${connTimeout:60000} 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /solr/solrcloud-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | mkdir -p /opt/solr/server/solr 4 | 5 | ! [ -e /opt/solr/server/solr/zoo.cfg ] && cp ckan_cloud/zoo.cfg /opt/solr/server/solr/ 6 | ! [ -e /opt/solr/server/solr/solr.xml ] && cp ckan_cloud/solr.xml /opt/solr/server/solr/ 7 | 8 | chown -R $SOLR_USER:$SOLR_USER /opt/solr 9 | 10 | echo #!/usr/bin/env bash > /_solrcloud_entrypoint.sh 11 | echo export PATH='"'"${PATH}"'"' >> /_solrcloud_entrypoint.sh 12 | echo docker-entrypoint.sh "$@" >> /_solrcloud_entrypoint.sh 13 | chmod +x /_solrcloud_entrypoint.sh 14 | chown $SOLR_USER:$SOLR_USER /_solrcloud_entrypoint.sh 15 | exec sudo -HEu $SOLR_USER /_solrcloud_entrypoint.sh 16 | -------------------------------------------------------------------------------- /solr/solrcloud.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM solr:6.6.2 2 | 3 | USER root 4 | 5 | RUN apt-get update && apt-get install -y sudo 6 | 7 | # add default ckan configset 8 | ADD solrconfig.xml \ 9 | https://raw.githubusercontent.com/apache/lucene-solr/releases/lucene-solr/6.6.2/solr/server/solr/configsets/basic_configs/conf/currency.xml \ 10 | https://raw.githubusercontent.com/apache/lucene-solr/releases/lucene-solr/6.6.2/solr/server/solr/configsets/basic_configs/conf/synonyms.txt \ 11 | https://raw.githubusercontent.com/apache/lucene-solr/releases/lucene-solr/6.6.2/solr/server/solr/configsets/basic_configs/conf/stopwords.txt \ 12 | https://raw.githubusercontent.com/apache/lucene-solr/releases/lucene-solr/6.6.2/solr/server/solr/configsets/basic_configs/conf/protwords.txt \ 13 | https://raw.githubusercontent.com/apache/lucene-solr/releases/lucene-solr/6.6.2/solr/server/solr/configsets/data_driven_schema_configs/conf/elevate.xml \ 14 | ckan_default/conf/ 15 | 16 | ARG SCHEMA_XML=schemas/schema28.xml 17 | COPY $SCHEMA_XML ckan_default/conf/schema.xml 18 | 19 | COPY zoo.cfg ckan_cloud/zoo.cfg 20 | COPY solr.xml ckan_cloud/solr.xml 21 | COPY solrcloud-entrypoint.sh /opt/docker-solr/scripts/ 22 | 23 | ENTRYPOINT ["/opt/docker-solr/scripts/solrcloud-entrypoint.sh"] 24 | -------------------------------------------------------------------------------- /solr/zoo.cfg: -------------------------------------------------------------------------------- 1 | # The number of milliseconds of each tick 2 | tickTime=2000 3 | # The number of ticks that the initial 4 | # synchronization phase can take 5 | initLimit=10 6 | # The number of ticks that can pass between 7 | # sending a request and getting an acknowledgement 8 | syncLimit=5 9 | 10 | # the directory where the snapshot is stored. 11 | # dataDir=/opt/zookeeper/data 12 | # NOTE: Solr defaults the dataDir to /zoo_data 13 | 14 | # the port at which the clients will connect 15 | # clientPort=2181 16 | # NOTE: Solr sets this based on zkRun / zkHost params 17 | 18 | # the maximum number of client connections. 19 | # increase this if you need to handle more clients 20 | #maxClientCnxns=60 21 | # 22 | # Be sure to read the maintenance section of the 23 | # administrator guide before turning on autopurge. 24 | # 25 | # http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance 26 | # 27 | # The number of snapshots to retain in dataDir 28 | #autopurge.snapRetainCount=3 29 | # Purge task interval in hours 30 | # Set to "0" to disable auto purge feature 31 | #autopurge.purgeInterval=1 32 | -------------------------------------------------------------------------------- /start-harvester.md: -------------------------------------------------------------------------------- 1 | # Running CKAN NG harvester 2 | 3 | ## Purpose of this document 4 | 5 | This document details the procedure to run the CKAN Next Generation harvesters. 6 | This includes: 7 | - a CKAN instance to harvest to. 8 | - Airflow service to schedule and run harvest jobs periodically. 9 | 10 | ## Related links 11 | 12 | - [CKAN NG harvester](https://gitlab.com/datopian/ckan-ng-harvest) 13 | - [Core CKAN harvester](https://pypi.org/project/ckan-harvester/) 14 | - [Airflow](https://airflow.apache.org/) 15 | - [ViderumGlobal/ckan-cloud-docker GitHub repository](https://github.com/ViderumGlobal/ckan-cloud-docker) 16 | 17 | ## Bring up the CKAN Harvester NG instance 18 | 19 | ### About the Harvester NG image 20 | 21 | The docker image for harvester+airflow is defined in this [GitHub repo](https://gitlab.com/datopian/ckan-ng-harvest). 22 | 23 | ### Running the full docker envirnoment 24 | 25 | In this repo folder run and follow all steps: 26 | 27 | ``` 28 | ./create_secrets.py 29 | ``` 30 | 31 | Start the Docker compose environment with all its components. 32 | 33 | ``` 34 | docker-compose \ 35 | -f docker-compose.yaml \ 36 | -f .docker-compose-db.yaml \ 37 | -f .docker-compose.datagov-theme.yaml \ 38 | -f .docker-compose-harvester_ng.yaml \ 39 | up -d --build nginx harvester 40 | ``` 41 | 42 | Add a hosts entry mapping domain `nginx` to `127.0.0.1`: 43 | 44 | ``` 45 | 127.0.0.1 nginx 46 | 127.0.0.1 ckan 47 | ``` 48 | 49 | Create a CKAN admin user 50 | 51 | ``` 52 | docker-compose \ 53 | exec ckan ckan-paster \ 54 | --plugin=ckan \ 55 | sysadmin add \ 56 | -c /etc/ckan/production.ini \ 57 | admin password=12345678 \ 58 | email=admin@localhost 59 | ``` 60 | 61 | Now you are able to togin to CKAN at http://nginx:8080 with username `admin` and password `12345678` 62 | 63 | ![ckan](docs/imgs/ckan-ready.png) 64 | 65 | Your harvest source list will be empty 66 | 67 | ![harvest empty](docs/imgs/harvest-sources-empty.png) 68 | 69 | ## Harvesting 70 | 71 | After starts, Airflow will [read all the harvest sources](https://gitlab.com/datopian/ckan-ng-harvest/blob/develop/automate-tasks/airflow/dags/harvest_with_airflow.py) (at the moment just _data.json_ and _CSW_ sources) at the CKAN instance. 72 | 73 | The first time this will be empty, you don't have any harvest source defined in this clean CKAN instance. 74 | You could check the Airflow status at http://nginx:8080/airflow/. 75 | 76 | ![ckan](docs/imgs/airflow-ready.png) 77 | 78 | In order to fill the CKAN instance with harvest sources you could add it manually at http://nginx:8080/harvest/new. 79 | ![ckan](docs/imgs/new-harvest-source.png) 80 | 81 | Or you can import all the harvest sources from another CKAN instance with the Harvester NG. 82 | You will need to clone [this repo](https://gitlab.com/datopian/ckan-ng-harvest/blob/develop/automate-tasks/airflow/dags/harvest_with_airflow.py), install the _requirements_ and run the import script. 83 | 84 | In order to define the destination CKAN instance (at http://nginx:8080) you will need to copy the _settings.py_ file as _local_settings.py_ file and define the API key and other required values. 85 | 86 | ``` 87 | # Data.json type 88 | python3 import_harvest_sources.py --import_from_url https://catalog.data.gov --source_type datajson --method GET 89 | # CSW type 90 | python3 import_harvest_sources.py --import_from_url https://catalog.data.gov --source_type csw --method GET 91 | 92 | ``` 93 | 94 | While the harvest sources filled Airflow will read them and create _Dags_ 95 | 96 | ![harvest empty](docs/imgs/dags_ready.png) 97 | 98 | ### Other tools 99 | 100 | Clean all the data at the images 101 | 102 | ``` 103 | docker-compose \ 104 | -f docker-compose.yaml \ 105 | -f .docker-compose-db.yaml \ 106 | -f .docker-compose.datagov-theme.yaml \ 107 | -f .docker-compose-harvester_ng.yaml \ 108 | down -v 109 | ``` 110 | 111 | Check logs 112 | 113 | ``` 114 | $ docker-compose \ 115 | -f docker-compose.yaml \ 116 | -f .docker-compose-db.yaml \ 117 | -f .docker-compose.datagov-theme.yaml \ 118 | logs -f 119 | ``` -------------------------------------------------------------------------------- /traefik/acme.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datopian/ckan-cloud-docker/9cd58625b6c571a4a420b1b21592611548d5a8d9/traefik/acme.json -------------------------------------------------------------------------------- /traefik/certs/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/datopian/ckan-cloud-docker/9cd58625b6c571a4a420b1b21592611548d5a8d9/traefik/certs/.gitkeep -------------------------------------------------------------------------------- /traefik/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | # Update the Traefik configuration file with secrets if not development mode 5 | if [ -f /traefik.dev.toml ]; then 6 | echo "Using development configuration" 7 | cp /traefik.dev.toml /traefik.toml 8 | else 9 | if [ ! -f /traefik.toml.template ]; then 10 | echo "Traefik template file does not exist, exiting" 11 | exit 1 12 | fi 13 | if [ ! -f /traefik-secrets.sh ]; then 14 | echo "Traefik secrets file does not exist. Please run 'make secret' to generate it before starting the container" 15 | exit 1 16 | fi 17 | if [ ! -f /templater.sh ]; then 18 | echo "Templater script does not exist, exiting" 19 | exit 1 20 | fi 21 | 22 | echo "Traefik configuration file does not exist, templating" 23 | 24 | chmod +x /templater.sh 25 | ./templater.sh /traefik.toml.template -f /traefik-secrets.sh > traefik.toml 26 | fi 27 | 28 | # Fix acme.json file permissions: set to 600 29 | chmod 600 /acme.json 30 | 31 | # first arg is `-f` or `--some-option` 32 | if [ "${1#-}" != "$1" ]; then 33 | set -- traefik "$@" 34 | fi 35 | 36 | # if our command is a valid Traefik subcommand, let's invoke it through Traefik instead 37 | # (this allows for "docker run traefik version", etc) 38 | if traefik "$1" --help | grep -s -q "help"; then 39 | set -- traefik "$@" 40 | fi 41 | 42 | exec "$@" 43 | -------------------------------------------------------------------------------- /traefik/traefik.dev.toml: -------------------------------------------------------------------------------- 1 | debug = false 2 | defaultEntryPoints = ["http"] 3 | 4 | [entryPoints] 5 | [entryPoints.http] 6 | address = ":80" 7 | 8 | [entryPoints.api] 9 | address = ":8081" 10 | 11 | [api] 12 | entryPoint = "api" 13 | 14 | [ping] 15 | entryPoint = "http" 16 | 17 | [accessLog] 18 | 19 | [file] 20 | watch = true 21 | 22 | [backends] 23 | [backends.ckan] 24 | [backends.ckan.servers.server1] 25 | url = "http://nginx:8080" 26 | 27 | [frontends] 28 | [frontends.ckan] 29 | backend="ckan" 30 | passHostHeader = true 31 | [frontends.ckan.routes.route1] 32 | rule = "Host:localhost" 33 | -------------------------------------------------------------------------------- /traefik/traefik.toml: -------------------------------------------------------------------------------- 1 | debug = false 2 | defaultEntryPoints = ["http", "https"] 3 | 4 | [entryPoints] 5 | [entryPoints.http] 6 | address = ":80" 7 | 8 | [entryPoints.https] 9 | address = ":443" 10 | [entryPoints.https.tls] 11 | [entryPoints.api] 12 | address = ":8081" 13 | 14 | [api] 15 | entryPoint = "api" 16 | 17 | [ping] 18 | entryPoint = "http" 19 | 20 | [acme] 21 | # TODO: replace email address 22 | email = "admin@example.com" 23 | storage = "/acme.json" 24 | entryPoint = "https" 25 | onHostRule = true 26 | 27 | [[acme.domains]] 28 | # TODO: replace with domain 29 | main = "example.com" 30 | 31 | [acme.httpChallenge] 32 | entryPoint = "http" 33 | 34 | [accessLog] 35 | 36 | [file] 37 | watch = true 38 | 39 | [backends] 40 | [backends.ckan] 41 | [backends.ckan.servers.server1] 42 | url = "http://nginx:8080" 43 | 44 | [frontends] 45 | [frontends.ckan] 46 | backend="ckan" 47 | passHostHeader = true 48 | [frontends.ckan.headers] 49 | SSLRedirect = true 50 | [frontends.ckan.routes.route1] 51 | # TODO: replace with Host:domain 52 | rule = "Host:example.com" 53 | -------------------------------------------------------------------------------- /traefik/traefik.toml.template: -------------------------------------------------------------------------------- 1 | debug = false 2 | defaultEntryPoints = ["http", "https"] 3 | 4 | [entryPoints] 5 | [entryPoints.http] 6 | address = ":80" 7 | 8 | [entryPoints.https] 9 | address = ":443" 10 | [entryPoints.https.tls] 11 | [entryPoints.api] 12 | address = ":8081" 13 | 14 | [api] 15 | entryPoint = "api" 16 | 17 | [ping] 18 | entryPoint = "http" 19 | 20 | [acme] 21 | email = "{{CERTIFICATE_EMAIL}}" 22 | storage = "/acme.json" 23 | entryPoint = "https" 24 | onHostRule = true 25 | 26 | [[acme.domains]] 27 | main = "{{CERTIFICATE_DOMAIN}}" 28 | 29 | [acme.httpChallenge] 30 | entryPoint = "http" 31 | 32 | [accessLog] 33 | 34 | [file] 35 | watch = true 36 | 37 | [backends] 38 | [backends.ckan] 39 | [backends.ckan.servers.server1] 40 | url = "http://nginx:8080" 41 | 42 | [frontends] 43 | [frontends.ckan] 44 | backend="ckan" 45 | passHostHeader = true 46 | [frontends.ckan.headers] 47 | SSLRedirect = true 48 | [frontends.ckan.routes.route1] 49 | rule = "Host:{{CERTIFICATE_DOMAIN}}" 50 | -------------------------------------------------------------------------------- /traefik/traefik_custom_ssl.toml: -------------------------------------------------------------------------------- 1 | debug = false 2 | defaultEntryPoints = ["http", "https"] 3 | 4 | [entryPoints] 5 | [entryPoints.http] 6 | address = ":80" 7 | 8 | [entryPoints.https] 9 | address = ":443" 10 | [entryPoints.https.tls] 11 | [[entryPoints.https.tls.certificates]] 12 | certFile = "certs/domain.cert" 13 | keyFile = "certs/domain.key" 14 | [entryPoints.api] 15 | address = ":8081" 16 | 17 | [api] 18 | entryPoint = "api" 19 | 20 | [ping] 21 | entryPoint = "http" 22 | 23 | [accessLog] 24 | 25 | [file] 26 | watch = true 27 | 28 | [backends] 29 | [backends.ckan] 30 | [backends.ckan.servers.server1] 31 | url = "http://nginx:8080" 32 | 33 | [frontends] 34 | [frontends.ckan] 35 | backend="ckan" 36 | passHostHeader = true 37 | [frontends.ckan.headers] 38 | SSLRedirect = true 39 | [frontends.ckan.routes.route1] 40 | # TODO: replace with Host:domain 41 | rule = "Host:example.com" 42 | -------------------------------------------------------------------------------- /travis_ci_operator.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | GITHUB_WORKSPACE=${2} 4 | 5 | _install_travis_ci_operator() { 6 | ls -alh "${GITHUB_WORKSPACE}/bin/travis_ci_operator.sh" 7 | chmod +x "${GITHUB_WORKSPACE}/bin/travis_ci_operator.sh" 8 | } 9 | 10 | _install_script() { 11 | if [ -e "${1}" ]; then cp "${1}" "${GITHUB_WORKSPACE}/bin/${1}" 12 | else curl -L "https://raw.githubusercontent.com/OriHoch/travis-ci-operator/master/${1}" > "${GITHUB_WORKSPACE}/bin/${1}" 13 | fi && chmod +x "${GITHUB_WORKSPACE}/bin/${1}" 14 | ls -alh "${GITHUB_WORKSPACE}/bin/${1}" 15 | } 16 | 17 | if [ "${1}" == "init" ]; then 18 | GITHUB_WORKSPACE=${2} 19 | _install_travis_ci_operator &&\ 20 | _install_script read_yaml.py &&\ 21 | _install_script update_yaml.py &&\ 22 | if [ -e .travis.banner ]; then cat .travis.banner; else curl -L https://raw.githubusercontent.com/OriHoch/travis-ci-operator/master/.travis.banner; fi &&\ 23 | echo Successfully initialized travis-ci-operator && exit 0 24 | echo Failed to initialize travis-ci-operator && exit 1 25 | 26 | elif [ "${1}" == "docker-login" ]; then 27 | docker login -u "${DOCKER_USER}" -p "${DOCKER_PASSWORD}" &&\ 28 | echo Logged in to Docker && exit 0 29 | echo failed to login to Docker && exit 1 30 | 31 | elif [ "${1}" == "github-update" ]; then 32 | DEPLOY_KEY_NAME="${2}" 33 | GIT_BRANCH="${3}" 34 | UPDATE_SCRIPT="${4}" 35 | COMMIT_MSG="${5}" 36 | GITHUB_WORKSPACE="${6}" 37 | if [ "${DEPLOY_KEY_NAME}" == "self" ]; then 38 | GITHUB_REPO_SLUG="${TRAVIS_REPO_SLUG}" 39 | else 40 | GITHUB_REPO_SLUG="${6}" 41 | fi 42 | [ -z "${DEPLOY_KEY_NAME}" ] || [ -z "${GIT_BRANCH}" ] || [ -z "${UPDATE_SCRIPT}" ] || [ -z "${COMMIT_MSG}" ] \ 43 | && echo missing required arguments && exit 1 44 | [ "${DEPLOY_KEY_NAME}" == "self" ] && [ "${COMMIT_MSG}" == "${TRAVIS_COMMIT_MESSAGE}" ] && [ "${GIT_BRANCH}" == "${TRAVIS_BRANCH}" ] \ 45 | && echo skipping update of self with same commit msg and branch && exit 0 46 | [ -z "${GITHUB_REPO_SLUG}" ] && echo missing GITHUB_REPO_SLUG && exit 1 47 | ls -alh /home/runner/bin/read_yaml.py 48 | ! $(eval echo `python ${GITHUB_WORKSPACE}/bin/read_yaml.py ${GITHUB_WORKSPACE}/bin/.travis-ci-operator.yaml ${DEPLOY_KEY_NAME}DeployKeyDecryptCmd`) \ 49 | && echo Failed to get deploy key && exit 1 50 | GITHUB_DEPLOY_KEY_FILE=".travis_ci_operator_${DEPLOY_KEY_NAME}_github_deploy_key.id_rsa" 51 | if [ -e "${GITHUB_DEPLOY_KEY_FILE}" ]; then 52 | cp -f "${GITHUB_DEPLOY_KEY_FILE}" ~/.ssh/id_rsa && chmod 400 ~/.ssh/id_rsa 53 | [ "$?" != "0" ] && echo failed to setup deploy key for pushing to GitHub && exit 1 54 | else 55 | echo WARNING: deploy key file not found 56 | fi 57 | GIT_REPO="git@github.com:${GITHUB_REPO_SLUG}.git" 58 | TEMPDIR=`mktemp -d` 59 | echo Cloning git repo ${GIT_REPO} branch ${GIT_BRANCH} 60 | ! git clone --branch ${GIT_BRANCH} ${GIT_REPO} ${TEMPDIR} && echo failed to clone repo && exit 1 61 | pushd $TEMPDIR 62 | eval "${UPDATE_SCRIPT}" 63 | echo Committing and pushing to GitHub repo ${GIT_REPO} branch ${GIT_BRANCH} 64 | git commit -m "${COMMIT_MSG}" && ! git push ${GIT_REPO} ${GIT_BRANCH} \ 65 | && echo failed to push change to GitHub && exit 1 66 | popd 67 | rm -rf $TEMPDIR 68 | echo GitHub update completed successfully 69 | exit 0 70 | 71 | elif [ "${1}" == "github-yaml-update" ]; then 72 | DEPLOY_KEY_NAME="${2}" 73 | GIT_BRANCH="${3}" 74 | YAML_FILE="${4}" 75 | UPDATE_VALUES="${5}" 76 | COMMIT_MSG="${6}" 77 | if [ "${DEPLOY_KEY_NAME}" == "self" ]; then 78 | GITHUB_REPO_SLUG="${TRAVIS_REPO_SLUG}" 79 | else 80 | GITHUB_REPO_SLUG="${7}" 81 | fi 82 | [ -z "${DEPLOY_KEY_NAME}" ] || [ -z "${GIT_BRANCH}" ] || [ -z "${YAML_FILE}" ] || [ -z "${UPDATE_VALUES}" ] || [ -z "${COMMIT_MSG}" ] \ 83 | && echo missing required arguments && exit 1 84 | [ "${DEPLOY_KEY_NAME}" == "self" ] && [ "${COMMIT_MSG}" == "${TRAVIS_COMMIT_MESSAGE}" ] && [ "${GIT_BRANCH}" == "${TRAVIS_BRANCH}" ] \ 85 | && echo skipping update of self with same commit msg and branch && exit 0 86 | [ -z "${GITHUB_REPO_SLUG}" ] && echo missing GITHUB_REPO_SLUG && exit 1 87 | ! $(eval echo `read_yaml.py .travis-ci-operator.yaml ${DEPLOY_KEY_NAME}DeployKeyDecryptCmd`) \ 88 | && echo Failed to get deploy key && exit 1 89 | GITHUB_DEPLOY_KEY_FILE=".travis_ci_operator_${DEPLOY_KEY_NAME}_github_deploy_key.id_rsa" 90 | if [ -e "${GITHUB_DEPLOY_KEY_FILE}" ]; then 91 | cp -f "${GITHUB_DEPLOY_KEY_FILE}" ~/.ssh/id_rsa && chmod 400 ~/.ssh/id_rsa 92 | [ "$?" != "0" ] && echo failed to setup deploy key for pushing to GitHub && exit 1 93 | else 94 | echo WARNING: deploy key file not found 95 | fi 96 | GIT_REPO="git@github.com:${GITHUB_REPO_SLUG}.git" 97 | TEMPDIR=`mktemp -d` 98 | echo Cloning git repo ${GIT_REPO} branch ${GIT_BRANCH} 99 | ! git clone --branch ${GIT_BRANCH} ${GIT_REPO} ${TEMPDIR} && echo failed to clone repo && exit 1 100 | pushd $TEMPDIR 101 | ! update_yaml.py "${UPDATE_VALUES}" "${YAML_FILE}" \ 102 | && echo failed to update yaml file && exit 1 103 | echo Committing and pushing to GitHub repo ${GIT_REPO} branch ${GIT_BRANCH} 104 | git add "${YAML_FILE}" 105 | git commit -m "${COMMIT_MSG}" && ! git push ${GIT_REPO} ${GIT_BRANCH} \ 106 | && echo failed to push change to GitHub && exit 1 107 | popd 108 | rm -rf $TEMPDIR 109 | echo GitHub yaml update completed successfully 110 | exit 0 111 | 112 | else 113 | echo unknown command 114 | exit 1 115 | 116 | fi 117 | -------------------------------------------------------------------------------- /varnish/default.vcl: -------------------------------------------------------------------------------- 1 | # Marker to tell the VCL compiler that this VCL has been adapted to the 2 | # new 4.0 format. 3 | vcl 4.0; 4 | 5 | # Default backend definition. Set this to point to your content server. 6 | backend default { 7 | .host = "ckan"; 8 | .port = "5000"; 9 | } 10 | 11 | sub vcl_backend_response { 12 | set beresp.grace = 1h; 13 | unset beresp.http.Server; 14 | # These status codes should always pass through and never cache. 15 | if ( beresp.status >= 500 ) { 16 | set beresp.ttl = 0s; 17 | } 18 | if (beresp.http.content-type ~ "(text|javascript|json|xml|html)") { 19 | set beresp.do_gzip = true; 20 | } 21 | # CKAN cache headers are used by Varnish cache, but should not be propagated to 22 | # the Internet. Tell browsers and proxies not to cache. This means Varnish always 23 | # gets the responsibility to server the right content at all times. 24 | if (beresp.http.Cache-Control ~ "max-age") { 25 | unset beresp.http.set-cookie; 26 | set beresp.http.Cache-Control = "no-cache"; 27 | } 28 | 29 | # Encourage assets to be cached by proxies and browsers 30 | # JS and CSS may be gzipped depending on headers 31 | # see https://developers.google.com/speed/docs/best-practices/caching 32 | if (bereq.url ~ "\.(css|js)") { 33 | set beresp.http.Vary = "Accept-Encoding"; 34 | } 35 | 36 | # Encourage assets to be cached by proxies and browsers for 1 day 37 | if (bereq.url ~ "\.(png|gif|jpg|swf|css|js)") { 38 | unset beresp.http.set-cookie; 39 | set beresp.http.Cache-Control = "public, max-age=86400"; 40 | set beresp.ttl = 1d; 41 | } 42 | 43 | # Encourage CKAN vendor assets (which are versioned) to be cached by 44 | # by proxies and browsers for 1 year 45 | if (bereq.url ~ "^/scripts/vendor/") { 46 | unset beresp.http.set-cookie; 47 | set beresp.http.Cache-Control = "public, max-age=31536000"; 48 | set beresp.ttl = 12m; 49 | } 50 | # # Never cache API requests 51 | # if (bereq.url ~ "^/api/") { 52 | # set beresp.ttl = 0s; 53 | # } 54 | } 55 | sub vcl_recv { 56 | if (req.http.user-agent ~ "Ezooms" || req.http.user-agent ~ "Ahrefs") { 57 | return (synth(403)); 58 | } 59 | if (req.url ~ "^/_tracking") { 60 | // exclude web spiders from statistics 61 | if (req.http.user-agent ~ "Googlebot" || req.http.user-agent ~ "baidu" || req.http.user-agent ~ "bing") { 62 | return (synth(200)); 63 | } else { 64 | return (pass); 65 | } 66 | } 67 | if (req.url ~ "\.(png|gif|jpg|jpeg|swf|css|js|woff|eot)$") { 68 | //Varnish to deliver content from cache even if the request othervise indicates that the request should be passed 69 | return(hash); 70 | } 71 | 72 | // Remove has_js and Google Analytics cookies. Evan added sharethis cookies 73 | set req.http.Cookie = regsuball(req.http.Cookie, "(^|;\s*)(__[a-z]+|has_js|cookie-agreed-en|_csoot|_csuid|_chartbeat2)=[^;]*", ""); 74 | 75 | // Remove a ";" prefix, if present. 76 | set req.http.Cookie = regsub(req.http.Cookie, "^;\s*", ""); 77 | // Remove empty cookies. 78 | if (req.http.Cookie ~ "^\s*$") { 79 | unset req.http.Cookie; 80 | } 81 | 82 | unset req.http.X-Forwarded-For; 83 | set req.http.X-Forwarded-For = req.http.X-Real-IP; 84 | } 85 | 86 | sub vcl_hash { 87 | # http://serverfault.com/questions/112531/ignoring-get-parameters-in-varnish-vcl 88 | hash_data(req.url); 89 | if (req.http.host) { 90 | hash_data(req.http.host); 91 | } else { 92 | hash_data(server.ip); 93 | } 94 | if (req.http.Cookie) { 95 | hash_data(req.http.Cookie); 96 | } 97 | if (req.http.Origin) { 98 | hash_data(req.http.Origin); 99 | } 100 | } 101 | 102 | sub vcl_deliver { 103 | if (!resp.http.Vary) { 104 | set resp.http.Vary = "Accept-Encoding"; 105 | } else if (resp.http.Vary !~ "(?i)Accept-Encoding") { 106 | set resp.http.Vary = resp.http.Vary + ",Accept-Encoding"; 107 | } 108 | unset resp.http.X-Varnish; 109 | unset resp.http.Via; 110 | unset resp.http.Age; 111 | unset resp.http.X-Powered-By; 112 | } 113 | 114 | sub vcl_backend_error { 115 | unset beresp.http.Server; 116 | if (beresp.status == 751) { 117 | set beresp.http.Location = beresp.http.response; 118 | set beresp.status = 301; 119 | return (deliver); 120 | } 121 | if (beresp.status == 753) { 122 | set beresp.http.Location = beresp.http.response; 123 | set beresp.status = 301; 124 | return (deliver); 125 | } 126 | } 127 | --------------------------------------------------------------------------------