├── tests
├── __init__.py
├── test_template.py
├── test_run.py
├── utils.py
└── test_config.py
├── helpers
├── __init__.py
├── utils.py
├── singleton.py
├── updater.py
├── aws_validation.py
├── cli.py
├── setup.py
├── network.py
├── upgrading.py
├── template.py
└── command.py
├── .github
├── FUNDING.yml
├── workflows
│ └── pytest.yml
└── ISSUE_TEMPLATE.md
├── .gitignore
├── tox.ini
├── requirements_tests.txt
├── templates
├── kobo-env
│ ├── envfiles
│ │ ├── external_services.txt.tpl
│ │ ├── smtp.txt.tpl
│ │ ├── domains.txt.tpl
│ │ ├── django.txt.tpl
│ │ ├── aws.txt.tpl
│ │ └── databases.txt.tpl
│ ├── postgres
│ │ └── conf
│ │ │ └── postgres.conf.tpl
│ └── enketo_express
│ │ └── config.json.tpl
├── kobo-docker
│ ├── docker-compose.maintenance.override.yml.tpl
│ ├── docker-compose.backend.override.yml.tpl
│ └── docker-compose.frontend.override.yml.tpl
└── nginx-certbot
│ ├── docker-compose.yml.tpl
│ ├── data
│ └── nginx
│ │ └── app.conf.tpl
│ └── init-letsencrypt.sh.tpl
├── setup.py
├── conftest.py
├── run.py
└── readme.md
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/helpers/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | custom: ['https://kobotoolbox.org/donate']
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | .run.conf
3 | .run.conf.*
4 | *.pyc
5 | __pycache__
6 | .pytest_cache/
7 | .tox
8 | *.egg-info
9 |
--------------------------------------------------------------------------------
/helpers/utils.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 |
4 | def run_docker_compose(config_dict: dict, command: list[str]) -> list[str]:
5 |
6 | return ['docker', 'compose'] + command
7 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | # content of: tox.ini , put in same dir as setup.py
2 | [tox]
3 | skipsdist=True
4 | envlist = py38,py310,py312
5 |
6 | [testenv]
7 | deps = -rrequirements_tests.txt
8 | commands =
9 | pytest -vv {posargs} --disable-pytest-warnings
10 |
--------------------------------------------------------------------------------
/requirements_tests.txt:
--------------------------------------------------------------------------------
1 | atomicwrites==1.4.0
2 | attrs==21.4.0
3 | iniconfig==1.1.1
4 | more-itertools==8.12.0
5 | netifaces==0.11.0
6 | packaging==21.3
7 | pathlib2==2.3.2
8 | pluggy==1.0.0
9 | py==1.11.0
10 | pyparsing==3.0.8
11 | pytest==7.1.1
12 | six==1.16.0
13 | tomli==2.0.1
14 |
--------------------------------------------------------------------------------
/helpers/singleton.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | class Singleton(type):
3 | _instances = {}
4 |
5 | def __call__(cls, *args, **kwargs):
6 | if cls not in cls._instances:
7 | cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
8 |
9 | return cls._instances[cls]
10 |
--------------------------------------------------------------------------------
/templates/kobo-env/envfiles/external_services.txt.tpl:
--------------------------------------------------------------------------------
1 | ############################################################################
2 | # GOOGLE_ANALYTICS_TOKEN must be changed in enketo_express/config.json too #
3 | ############################################################################
4 | GOOGLE_ANALYTICS_TOKEN=${GOOGLE_UA}
5 |
6 | SENTRY_DSN=${KPI_RAVEN_DSN}
7 | SENTRY_JS_DSN=${KPI_RAVEN_JS_DSN}
8 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from distutils.core import setup
2 | from setuptools import find_packages
3 | from helpers.config import Config
4 |
5 | setup(
6 | name='kobo-install',
7 | version=Config.KOBO_INSTALL_VERSION,
8 | # Include all the python modules except `tests`,
9 | packages=find_packages(exclude=['tests']),
10 | url='https://github.com/kobotoolbox/kobo-install/',
11 | license='',
12 | author='KoboToolbox',
13 | author_email='',
14 | description='Installer for KoboToolbox'
15 | )
16 |
--------------------------------------------------------------------------------
/templates/kobo-env/postgres/conf/postgres.conf.tpl:
--------------------------------------------------------------------------------
1 | #------------------------------------------------------------------------------------
2 | # TUNING
3 | #------------------------------------------------------------------------------------
4 | # These settings are based on server configuration
5 | # https://www.pgconfig.org/#/tuning
6 | # DB Version: 14
7 | # OS Type: linux
8 | # App profile: ${POSTGRES_APP_PROFILE}
9 | # Hard-drive: SSD
10 | # Total Memory (RAM): ${POSTGRES_RAM}GB
11 |
12 | ${POSTGRES_SETTINGS}
13 |
--------------------------------------------------------------------------------
/templates/kobo-docker/docker-compose.maintenance.override.yml.tpl:
--------------------------------------------------------------------------------
1 | # For public, HTTPS servers.
2 |
3 | services:
4 |
5 | maintenance:
6 | environment:
7 | - ETA=${MAINTENANCE_ETA}
8 | - DATE_STR=${MAINTENANCE_DATE_STR}
9 | - DATE_ISO=${MAINTENANCE_DATE_ISO}
10 | - EMAIL=${MAINTENANCE_EMAIL}
11 | ${USE_LETSENSCRYPT}ports:
12 | ${USE_LETSENSCRYPT} - ${NGINX_EXPOSED_PORT}:80
13 | networks:
14 | kobo-fe-network:
15 | aliases:
16 | - nginx
17 | - nginx.internal
18 |
19 | networks:
20 | kobo-fe-network:
21 | name: ${DOCKER_NETWORK_FRONTEND_PREFIX}_kobo-fe-network
22 |
--------------------------------------------------------------------------------
/conftest.py:
--------------------------------------------------------------------------------
1 | # coding: utf-8
2 | import os
3 | import pytest
4 |
5 |
6 | def clean_up():
7 | """
8 | Removes files created by tests
9 | """
10 | files = ['.uniqid',
11 | 'upsert_db_users']
12 | for file_ in files:
13 | try:
14 | os.remove(os.path.join('/tmp', file_))
15 | except FileNotFoundError:
16 | pass
17 |
18 |
19 | @pytest.fixture(scope="session", autouse=True)
20 | def setup(request):
21 | # Clean up before tests begin in case of orphan files.
22 | clean_up()
23 | request.addfinalizer(_tear_down)
24 |
25 |
26 | def _tear_down():
27 | clean_up()
28 | pass
29 |
--------------------------------------------------------------------------------
/templates/kobo-env/envfiles/smtp.txt.tpl:
--------------------------------------------------------------------------------
1 | ##################################
2 | # For sending e-mail using SMTP. #
3 | ##################################
4 |
5 | # NOTE: To send from GMail, the sending account must enable "Allowing less secure apps to access your account" (https://support.google.com/accounts/answer/6010255).
6 | # NOTE: To send from AWS EC2 instances, SNS must be used instead of SMTP. These and the SNS e-mail settings from `envfiles/aws.txt` are mutually exclusive; do not use both.
7 | # See https://docs.djangoproject.com/en/1.8/topics/email/#smtp-backend.
8 |
9 | EMAIL_BACKEND=django.core.mail.backends.smtp.EmailBackend
10 | EMAIL_HOST=${SMTP_HOST}
11 | EMAIL_PORT=${SMTP_PORT}
12 | EMAIL_HOST_USER=${SMTP_USER}
13 | EMAIL_HOST_PASSWORD=${SMTP_PASSWORD}
14 | EMAIL_USE_TLS=${SMTP_USE_TLS}
15 | DEFAULT_FROM_EMAIL=${DEFAULT_FROM_EMAIL}
--------------------------------------------------------------------------------
/.github/workflows/pytest.yml:
--------------------------------------------------------------------------------
1 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
2 |
3 | name: pytest
4 |
5 | on:
6 | push:
7 | branches: [ master ]
8 | pull_request:
9 | branches: [ master ]
10 |
11 | jobs:
12 | build:
13 |
14 | runs-on: ubuntu-24.04
15 | strategy:
16 | matrix:
17 | python-version: ['3.10']
18 |
19 | steps:
20 | - uses: actions/checkout@v2
21 | - name: Set up Python ${{ matrix.python-version }}
22 | uses: actions/setup-python@v2
23 | with:
24 | python-version: ${{ matrix.python-version }}
25 | - name: Upgrade pip
26 | run: python -m pip install --upgrade pip
27 | - name: Install Python dependencies
28 | run: pip install -r requirements_tests.txt
29 | - name: Run pytest
30 | run: pytest -vv -rf
31 |
--------------------------------------------------------------------------------
/templates/kobo-env/envfiles/domains.txt.tpl:
--------------------------------------------------------------------------------
1 | # Choose between http or https
2 | PUBLIC_REQUEST_SCHEME=${PUBLIC_REQUEST_SCHEME}
3 | # The publicly-accessible domain where your KoBo Toolbox instance will be reached (e.g. example.com).
4 | PUBLIC_DOMAIN_NAME=${PUBLIC_DOMAIN_NAME}
5 | # The private domain used in docker network. Useful for communication between containers without passing through
6 | # a load balancer. No need to be resolved by a public DNS.
7 | INTERNAL_DOMAIN_NAME=${INTERNAL_DOMAIN_NAME}
8 | # The publicly-accessible subdomain for the KoBoForm form building and management interface (e.g. koboform).
9 | KOBOFORM_PUBLIC_SUBDOMAIN=${KOBOFORM_SUBDOMAIN}
10 | # The publicly-accessible subdomain for the KoBoCAT data collection and project management interface (e.g.kobocat).
11 | KOBOCAT_PUBLIC_SUBDOMAIN=${KOBOCAT_SUBDOMAIN}
12 | # The publicly-accessible subdomain for the Enketo Express web forms (e.g. enketo).
13 | ENKETO_EXPRESS_PUBLIC_SUBDOMAIN=${ENKETO_SUBDOMAIN}
14 |
--------------------------------------------------------------------------------
/templates/nginx-certbot/docker-compose.yml.tpl:
--------------------------------------------------------------------------------
1 | services:
2 | nginx_ssl_proxy:
3 | image: nginx:1.26-alpine
4 | restart: unless-stopped
5 | volumes:
6 | - ./data/nginx:/etc/nginx/conf.d
7 | - ./data/certbot/conf:/etc/letsencrypt
8 | - ./data/certbot/www:/var/www/certbot
9 | ports:
10 | - "80:80"
11 | - "443:443"
12 | command: "/bin/sh -c 'while :; do sleep 6h & wait $$$${!}; nginx -s reload; done & nginx -g \"daemon off;\"'"
13 | networks:
14 | kobo-fe-network:
15 | aliases:
16 | - nginx_ssl_proxy
17 | certbot:
18 | image: certbot/certbot
19 | restart: unless-stopped
20 | volumes:
21 | - ./data/certbot/conf:/etc/letsencrypt
22 | - ./data/certbot/www:/var/www/certbot
23 | entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $$$${!}; done;'"
24 |
25 | networks:
26 | kobo-fe-network:
27 | name: ${DOCKER_NETWORK_FRONTEND_PREFIX}_kobo-fe-network
28 | external: true
29 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
4 |
5 |
6 | **Description**
7 |
8 |
9 | **Steps to Reproduce**
10 |
11 |
12 | **Expected behavior**
13 |
14 |
15 | **Desktop**
16 |
17 |
18 | - OS:
19 | - Python Version:
20 | - Docker Version:
21 | - Docker Compose Version:
22 |
23 | **Additional context**
24 |
25 |
--------------------------------------------------------------------------------
/templates/nginx-certbot/data/nginx/app.conf.tpl:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name ${KOBOFORM_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME} ${KOBOCAT_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME} ${ENKETO_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME};
4 | server_tokens off;
5 |
6 | location /.well-known/acme-challenge/ {
7 | root /var/www/certbot;
8 | }
9 |
10 | location / {
11 | return 301 https://$$host$$request_uri;
12 | }
13 | }
14 |
15 | server {
16 | listen 443 ssl;
17 | server_name ${KOBOFORM_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME} ${KOBOCAT_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME} ${ENKETO_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME};
18 | server_tokens off;
19 |
20 | ssl_certificate /etc/letsencrypt/live/${KOBOFORM_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}/fullchain.pem;
21 | ssl_certificate_key /etc/letsencrypt/live/${KOBOFORM_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}/privkey.pem;
22 | include /etc/letsencrypt/options-ssl-nginx.conf;
23 | ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
24 |
25 | # Allow 100M upload
26 | client_max_body_size 100M;
27 |
28 | location / {
29 | proxy_pass http://nginx;
30 | proxy_set_header Host $$http_host;
31 | proxy_set_header X-Real-IP $$remote_addr;
32 | proxy_set_header X-Forwarded-For $$proxy_add_x_forwarded_for;
33 | proxy_set_header X-Forwarded-Proto https;
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/templates/kobo-docker/docker-compose.backend.override.yml.tpl:
--------------------------------------------------------------------------------
1 | # Override for primary back-end server
2 |
3 | services:
4 |
5 | postgres:
6 | volumes:
7 | - ../kobo-env/postgres/conf/postgres.conf:/kobo-docker-scripts/conf/postgres.conf
8 | ${EXPOSE_BACKEND_PORTS}ports:
9 | ${EXPOSE_BACKEND_PORTS} - ${POSTGRES_PORT}:5432
10 | ${USE_BACKEND_NETWORK}networks:
11 | ${USE_BACKEND_NETWORK} kobo-be-network:
12 | ${USE_BACKEND_NETWORK} aliases:
13 | ${USE_BACKEND_NETWORK} - postgres.${PRIVATE_DOMAIN_NAME}
14 |
15 | mongo:
16 | ${EXPOSE_BACKEND_PORTS}ports:
17 | ${EXPOSE_BACKEND_PORTS} - ${MONGO_PORT}:27017
18 | ${USE_BACKEND_NETWORK}networks:
19 | ${USE_BACKEND_NETWORK} kobo-be-network:
20 | ${USE_BACKEND_NETWORK} aliases:
21 | ${USE_BACKEND_NETWORK} - mongo.${PRIVATE_DOMAIN_NAME}
22 |
23 | redis_main:
24 | ${EXPOSE_BACKEND_PORTS}ports:
25 | ${EXPOSE_BACKEND_PORTS} - ${REDIS_MAIN_PORT}:6379
26 | ${USE_BACKEND_NETWORK}networks:
27 | ${USE_BACKEND_NETWORK} kobo-be-network:
28 | ${USE_BACKEND_NETWORK} aliases:
29 | ${USE_BACKEND_NETWORK} - redis-main.${PRIVATE_DOMAIN_NAME}
30 |
31 | redis_cache:
32 | ${EXPOSE_BACKEND_PORTS}ports:
33 | ${EXPOSE_BACKEND_PORTS} - ${REDIS_CACHE_PORT}:6380
34 | ${USE_BACKEND_NETWORK}networks:
35 | ${USE_BACKEND_NETWORK} kobo-be-network:
36 | ${USE_BACKEND_NETWORK} aliases:
37 | ${USE_BACKEND_NETWORK} - redis-cache.${PRIVATE_DOMAIN_NAME}
38 |
39 | ${USE_BACKEND_NETWORK}networks:
40 | ${USE_BACKEND_NETWORK} kobo-be-network:
41 | ${USE_BACKEND_NETWORK} driver: bridge
42 |
--------------------------------------------------------------------------------
/helpers/updater.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import os
3 | import sys
4 |
5 | from helpers.cli import CLI
6 | from helpers.setup import Setup
7 |
8 |
9 | class Updater:
10 | """
11 | Updates kobo-install (this utility), restarts this script, and updates
12 | kobo-docker
13 | """
14 | NO_UPDATE_SELF_OPTION = '--no-update-self'
15 |
16 | @classmethod
17 | def run(cls, version=None, cron=False, update_self=True):
18 | # Validate kobo-docker already exists and is valid
19 | Setup.validate_already_run()
20 |
21 | if version is None:
22 | git_commit_version_command = [
23 | 'git',
24 | 'rev-parse',
25 | '--abbrev-ref',
26 | 'HEAD',
27 | ]
28 | version = CLI.run_command(git_commit_version_command).strip()
29 |
30 | if update_self:
31 | # Update kobo-install first
32 | Setup.update_koboinstall(version)
33 | CLI.colored_print('kobo-install has been updated',
34 | CLI.COLOR_SUCCESS)
35 |
36 | # Reload this script to use `version`.
37 | # NB:`argv[0]` does not automatically get set to the executable
38 | # path as it usually would, so we have to do it manually--hence the
39 | # double `sys.executable`
40 | sys.argv.append(cls.NO_UPDATE_SELF_OPTION)
41 | os.execl(sys.executable, sys.executable, *sys.argv)
42 |
43 | # Update kobo-docker
44 | Setup.update_kobodocker()
45 | CLI.colored_print('kobo-docker has been updated', CLI.COLOR_SUCCESS)
46 | Setup.post_update(cron)
47 |
--------------------------------------------------------------------------------
/templates/kobo-env/envfiles/django.txt.tpl:
--------------------------------------------------------------------------------
1 | DJANGO_DEBUG=${DEBUG}
2 | TEMPLATE_DEBUG=${DEBUG}
3 | ${USE_X_FORWARDED_HOST}USE_X_FORWARDED_HOST=True
4 |
5 | DJANGO_SECRET_KEY=${DJANGO_SECRET_KEY}
6 | DJANGO_SESSION_COOKIE_AGE=${DJANGO_SESSION_COOKIE_AGE}
7 | DJANGO_ALLOWED_HOSTS=.${PUBLIC_DOMAIN_NAME} .${INTERNAL_DOMAIN_NAME}
8 | KPI_PREFIX=/
9 |
10 | SESSION_COOKIE_DOMAIN=".${PUBLIC_DOMAIN_NAME}"
11 |
12 | CELERY_BROKER_URL=redis://{% if REDIS_PASSWORD %}:${REDIS_PASSWORD}@{% endif REDIS_PASSWORD %}redis-main.${PRIVATE_DOMAIN_NAME}:${REDIS_MAIN_PORT}/1
13 | CELERY_AUTOSCALE_MIN=2
14 | CELERY_AUTOSCALE_MAX=6
15 |
16 | # See "api key" here: https://github.com/kobotoolbox/enketo-express/tree/master/config#linked-form-and-data-server.
17 | ENKETO_API_KEY=${ENKETO_API_KEY}
18 |
19 | # The initial superuser's username.
20 | KOBO_SUPERUSER_USERNAME=${KOBO_SUPERUSER_USERNAME}
21 | # The initial superuser's password.
22 | KOBO_SUPERUSER_PASSWORD=${KOBO_SUPERUSER_PASSWORD}
23 | # The e-mail address where your users can contact you.
24 | KOBO_SUPPORT_EMAIL=${DEFAULT_FROM_EMAIL}
25 |
26 | KOBOFORM_URL=${PUBLIC_REQUEST_SCHEME}://${KOBOFORM_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}${NGINX_PUBLIC_PORT}
27 | KOBOFORM_INTERNAL_URL=http://${KOBOFORM_SUBDOMAIN}.${INTERNAL_DOMAIN_NAME} # Always use HTTP internally.
28 | ENKETO_URL=${PUBLIC_REQUEST_SCHEME}://${ENKETO_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}${NGINX_PUBLIC_PORT}
29 | ENKETO_INTERNAL_URL=http://${ENKETO_SUBDOMAIN}.${INTERNAL_DOMAIN_NAME} # Always use HTTP internally.
30 | KOBOCAT_URL=${PUBLIC_REQUEST_SCHEME}://${KOBOCAT_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}${NGINX_PUBLIC_PORT}
31 | KOBOCAT_INTERNAL_URL=http://${KOBOCAT_SUBDOMAIN}.${INTERNAL_DOMAIN_NAME} # Always use HTTP internally.
32 |
--------------------------------------------------------------------------------
/templates/kobo-env/envfiles/aws.txt.tpl:
--------------------------------------------------------------------------------
1 | ####################
2 | # Account settings #
3 | ####################
4 |
5 | ${USE_AWS}AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
6 | ${USE_AWS}AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
7 |
8 | ####################
9 | # Storage settings #
10 | ####################
11 |
12 | # To use S3, the specified buckets must already exist and the owner of your `AWS_ACCESS_KEY_ID` must have the appropriate S3 permissions.
13 |
14 | ${USE_AWS}KOBOCAT_DEFAULT_FILE_STORAGE=storages.backends.s3boto3.S3Boto3Storage
15 | ${USE_AWS}KOBOCAT_AWS_STORAGE_BUCKET_NAME=${AWS_BUCKET_NAME}
16 |
17 | ${USE_AWS}KPI_DEFAULT_FILE_STORAGE=storages.backends.s3boto3.S3Boto3Storage
18 | ${USE_AWS}KPI_AWS_STORAGE_BUCKET_NAME=${AWS_BUCKET_NAME}
19 |
20 | ${USE_AWS}AWS_S3_REGION_NAME=${AWS_S3_REGION_NAME}
21 |
22 | ###################
23 | # Backup settings #
24 | ###################
25 |
26 | ${USE_AWS_BACKUP}BACKUP_AWS_STORAGE_BUCKET_NAME=${AWS_BACKUP_BUCKET_NAME}
27 | #Backups files deletion is handled by bucket rules when True
28 | ${USE_AWS_BACKUP}AWS_BACKUP_BUCKET_DELETION_RULE_ENABLED=${AWS_BACKUP_BUCKET_DELETION_RULE_ENABLED}
29 | ${USE_AWS_BACKUP}AWS_BACKUP_YEARLY_RETENTION=${AWS_BACKUP_YEARLY_RETENTION}
30 | ${USE_AWS_BACKUP}AWS_BACKUP_MONTHLY_RETENTION=${AWS_BACKUP_MONTHLY_RETENTION}
31 | ${USE_AWS_BACKUP}AWS_BACKUP_WEEKLY_RETENTION=${AWS_BACKUP_WEEKLY_RETENTION}
32 | ${USE_AWS_BACKUP}AWS_BACKUP_DAILY_RETENTION=${AWS_BACKUP_DAILY_RETENTION}
33 |
34 | # In MB
35 | ${USE_AWS_BACKUP}AWS_MONGO_BACKUP_MINIMUM_SIZE=${AWS_MONGO_BACKUP_MINIMUM_SIZE}
36 | ${USE_AWS_BACKUP}AWS_POSTGRES_BACKUP_MINIMUM_SIZE=${AWS_POSTGRES_BACKUP_MINIMUM_SIZE}
37 | ${USE_AWS_BACKUP}AWS_REDIS_BACKUP_MINIMUM_SIZE=${AWS_REDIS_BACKUP_MINIMUM_SIZE}
38 | ${USE_AWS_BACKUP}AWS_BACKUP_UPLOAD_CHUNK_SIZE=${AWS_BACKUP_UPLOAD_CHUNK_SIZE}
39 |
40 |
--------------------------------------------------------------------------------
/tests/test_template.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | from unittest.mock import patch, MagicMock
4 |
5 | from helpers.template import Template
6 | from .utils import mock_read_config as read_config
7 |
8 |
9 | WORK_DIR = '/tmp/kobo-install-tests'
10 |
11 | @patch(
12 | 'helpers.template.Template._Template__read_unique_id',
13 | MagicMock(return_value='123456789')
14 | )
15 | @patch(
16 | 'helpers.template.Template._Template__write_unique_id',
17 | MagicMock(return_value='123456789')
18 | )
19 | @patch(
20 | 'helpers.template.Template._get_templates_path_parent',
21 | MagicMock(return_value=f'{WORK_DIR}/templates/')
22 | )
23 | @patch(
24 | 'helpers.config.Config.get_env_files_path',
25 | MagicMock(return_value=f'{WORK_DIR}/kobo-env/')
26 | )
27 | @patch(
28 | 'helpers.config.Config.get_letsencrypt_repo_path',
29 | MagicMock(return_value=f'{WORK_DIR}/nginx-certbot/')
30 | )
31 | def test_render_templates():
32 | config = read_config()
33 | config._Config__dict['unique_id'] = '123456789'
34 | config._Config__dict['kobodocker_path'] = f'{WORK_DIR}/kobo-docker/'
35 | try:
36 | _copy_templates()
37 | assert not os.path.exists(
38 | f'{WORK_DIR}/kobo-docker/docker-compose.frontend.override.yml'
39 | )
40 | assert not os.path.exists(
41 | f'{WORK_DIR}/kobo-docker/docker-compose.backend.override.yml'
42 | )
43 | assert not os.path.exists(f'{WORK_DIR}/kobo-env/envfiles/django.txt')
44 | Template.render(config)
45 | assert os.path.exists(
46 | f'{WORK_DIR}/kobo-docker/docker-compose.frontend.override.yml'
47 | )
48 | assert os.path.exists(
49 | f'{WORK_DIR}/kobo-docker/docker-compose.backend.override.yml'
50 | )
51 | assert os.path.exists(f'{WORK_DIR}/kobo-env/envfiles/django.txt')
52 | finally:
53 | shutil.rmtree(WORK_DIR)
54 |
55 |
56 | def _copy_templates(src: str = None, dst: str = None):
57 | if not src:
58 | src = os.path.dirname(os.path.realpath(__file__)) + '/../templates/'
59 | if not dst:
60 | dst = f'{WORK_DIR}/templates/'
61 |
62 | # Create the destination directory if needed
63 | os.makedirs(dst, exist_ok=True)
64 |
65 | for entry in os.listdir(src):
66 | src_path = os.path.join(src, entry)
67 | dst_path = os.path.join(dst, entry)
68 |
69 | if os.path.isdir(src_path):
70 | # Recursively copy subdirectories
71 | _copy_templates(src_path, dst_path)
72 | else:
73 | # Copy files (overwrite if exists)
74 | shutil.copy2(src_path, dst_path)
75 |
--------------------------------------------------------------------------------
/templates/nginx-certbot/init-letsencrypt.sh.tpl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function join_by { local d=$$1; shift; echo -n "$$1"; shift; printf "%s" "$${@/#/$$d}"; }
4 |
5 | DOMAINS=(${KOBOFORM_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME} ${KOBOCAT_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME} ${ENKETO_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME})
6 | DOMAINS_CSV=$$(join_by , "$${DOMAINS[@]}")
7 | RSA_KEY_SIZE=4096
8 | DATA_PATH="./data/certbot"
9 | EMAIL="" # Adding a valid address is strongly recommended
10 | STAGING=0 # Set to 1 if you're testing your setup to avoid hitting request limits
11 | MKDIR_CMD=$$(which mkdir)
12 | DOCKER_COMPOSE_CMD="$$(which ${DOCKER_COMPOSE_CMD})"
13 | CURL_CMD=$$(which curl)
14 |
15 |
16 | if [ -d "$$DATA_PATH/conf/live/$$DOMAINS" ]; then
17 | read -p "Existing data found for $$DOMAINS_CSV. Continue and replace existing certificate? (y/N) " decision
18 | if [ "$$decision" != "Y" ] && [ "$$decision" != "y" ]; then
19 | exit
20 | fi
21 | fi
22 |
23 | if [ ! -e "$$DATA_PATH/conf/options-ssl-nginx.conf" ] || [ ! -e "$$DATA_PATH/conf/ssl-dhparams.pem" ]; then
24 | echo "### Downloading recommended TLS parameters ..."
25 | $$MKDIR_CMD -p "$$DATA_PATH/conf"
26 | $$CURL_CMD -s https://raw.githubusercontent.com/kobotoolbox/nginx-certbot/master/certbot/options-ssl-nginx.conf > "$$DATA_PATH/conf/options-ssl-nginx.conf"
27 | $$CURL_CMD -s https://raw.githubusercontent.com/kobotoolbox/nginx-certbot/master/certbot/ssl-dhparams.pem > "$$DATA_PATH/conf/ssl-dhparams.pem"
28 | echo
29 | fi
30 |
31 | echo "### Creating dummy certificate for $${DOMAINS_CSV} ..."
32 | DOMAINS_PATH="/etc/letsencrypt/live/$$DOMAINS"
33 | $$MKDIR_CMD -p "$$DATA_PATH/conf/live/$$DOMAINS"
34 | $$DOCKER_COMPOSE_CMD ${DOCKER_COMPOSE_SUFFIX} run --rm --entrypoint "\
35 | openssl req -x509 -nodes -newkey rsa:2048 -days 1\
36 | -keyout '$$DOMAINS_PATH/privkey.pem' \
37 | -out '$$DOMAINS_PATH/fullchain.pem' \
38 | -subj '/CN=localhost'" certbot
39 | echo
40 |
41 |
42 | echo "### Starting nginx ..."
43 | $$DOCKER_COMPOSE_CMD ${DOCKER_COMPOSE_SUFFIX} up --force-recreate -d nginx_ssl_proxy
44 | echo
45 |
46 | echo "### Deleting dummy certificate for $${DOMAINS_CSV} ..."
47 | $$DOCKER_COMPOSE_CMD ${DOCKER_COMPOSE_SUFFIX} run --rm --entrypoint "\
48 | rm -Rf /etc/letsencrypt/live/$$DOMAINS && \
49 | rm -Rf /etc/letsencrypt/archive/$$DOMAINS && \
50 | rm -Rf /etc/letsencrypt/renewal/$$DOMAINS.conf" certbot
51 | echo
52 |
53 |
54 | echo "### Requesting Let's Encrypt certificate for $${DOMAINS_CSV} ..."
55 | #Join $$DOMAINS to -d args
56 | DOMAIN_ARGS=""
57 | for DOMAIN in "$${DOMAINS[@]}"; do
58 | DOMAIN_ARGS="$$DOMAIN_ARGS -d $$DOMAIN"
59 | done
60 |
61 | # Select appropriate EMAIL arg
62 | case "$$EMAIL" in
63 | "") EMAIL_ARG="--register-unsafely-without-email" ;;
64 | *) EMAIL_ARG="--email $$EMAIL" ;;
65 | esac
66 |
67 | # Enable staging mode if needed
68 | if [ $$STAGING != "0" ]; then STAGING_ARG="--staging"; fi
69 |
70 | $$DOCKER_COMPOSE_CMD ${DOCKER_COMPOSE_SUFFIX} run --rm --entrypoint "\
71 | certbot certonly --webroot -w /var/www/certbot \
72 | $$STAGING_ARG \
73 | $$EMAIL_ARG \
74 | $$DOMAIN_ARGS \
75 | --rsa-key-size $$RSA_KEY_SIZE \
76 | --agree-tos \
77 | --force-renewal" certbot
78 | echo
79 |
80 | echo "### Reloading nginx ..."
81 | $$DOCKER_COMPOSE_CMD ${DOCKER_COMPOSE_SUFFIX} exec nginx_ssl_proxy nginx -s reload
82 |
--------------------------------------------------------------------------------
/templates/kobo-env/envfiles/databases.txt.tpl:
--------------------------------------------------------------------------------
1 | #--------------------------------------------------------------------------------
2 | # MONGO
3 | #--------------------------------------------------------------------------------
4 | # These `KOBO_MONGO_` settings only affect the mongo container itself and the
5 | # `wait_for_mongo.bash` init script that runs within the kpi and kobocat.
6 | # Please see kobocat.txt to set container variables
7 | KOBO_MONGO_PORT=${MONGO_PORT}
8 | KOBO_MONGO_HOST=mongo.${PRIVATE_DOMAIN_NAME}
9 | MONGO_INITDB_ROOT_USERNAME=${MONGO_ROOT_USERNAME}
10 | MONGO_INITDB_ROOT_PASSWORD=${MONGO_ROOT_PASSWORD}
11 | MONGO_INITDB_DATABASE=formhub
12 | KOBO_MONGO_USERNAME=${MONGO_USER_USERNAME}
13 | KOBO_MONGO_PASSWORD=${MONGO_USER_PASSWORD}
14 | MONGO_DB_NAME=formhub
15 | MONGO_DB_URL=mongodb://${MONGO_USER_USERNAME}:${MONGO_USER_PASSWORD}@mongo.${PRIVATE_DOMAIN_NAME}:${MONGO_PORT}/formhub
16 |
17 | # Default MongoDB backup schedule is weekly at 01:00 AM UTC on Sunday.
18 | ${USE_BACKUP}MONGO_BACKUP_SCHEDULE=${MONGO_BACKUP_SCHEDULE}
19 |
20 | #--------------------------------------------------------------------------------
21 | # POSTGRES
22 | #--------------------------------------------------------------------------------
23 |
24 | # These `KOBO_POSTGRES_` settings only affect the postgres container itself and the
25 | # `wait_for_postgres.bash` init script that runs within the kpi and kobocat
26 | # containers. To control Django database connections, please see the
27 | # `DATABASE_URL` environment variable.
28 | POSTGRES_PORT=${POSTGRES_PORT}
29 | POSTGRES_HOST=postgres.${PRIVATE_DOMAIN_NAME}
30 | POSTGRES_USER=${POSTGRES_USER}
31 | POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
32 | KC_POSTGRES_DB=${KC_POSTGRES_DB}
33 | KPI_POSTGRES_DB=${KPI_POSTGRES_DB}
34 |
35 | # Postgres database used by kpi and kobocat Django apps
36 | KC_DATABASE_URL=postgis://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres.${PRIVATE_DOMAIN_NAME}:${POSTGRES_PORT}/${KC_POSTGRES_DB}
37 | KPI_DATABASE_URL=postgis://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres.${PRIVATE_DOMAIN_NAME}:${POSTGRES_PORT}/${KPI_POSTGRES_DB}
38 | DATABASE_URL=postgis://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres.${PRIVATE_DOMAIN_NAME}:${POSTGRES_PORT}/${KPI_POSTGRES_DB}
39 |
40 | # Default Postgres backup schedule is weekly at 02:00 AM UTC on Sunday.
41 | ${USE_BACKUP}POSTGRES_BACKUP_SCHEDULE=${POSTGRES_BACKUP_SCHEDULE}
42 |
43 | #--------------------------------------------------------------------------------
44 | # REDIS
45 | #--------------------------------------------------------------------------------
46 |
47 | # Default Redis backup schedule is weekly at 03:00 AM UTC on Sunday.
48 | ${USE_BACKUP}REDIS_BACKUP_SCHEDULE=${REDIS_BACKUP_SCHEDULE}
49 |
50 | REDIS_SESSION_URL=redis://{% if REDIS_PASSWORD %}:${REDIS_PASSWORD}@{% endif REDIS_PASSWORD %}redis-cache.${PRIVATE_DOMAIN_NAME}:${REDIS_CACHE_PORT}/2
51 | REDIS_PASSWORD=${REDIS_PASSWORD}
52 | CACHE_URL=redis://{% if REDIS_PASSWORD %}:${REDIS_PASSWORD}@{% endif REDIS_PASSWORD %}redis-cache.${PRIVATE_DOMAIN_NAME}:${REDIS_CACHE_PORT}/5
53 | REDIS_CACHE_MAX_MEMORY=${REDIS_CACHE_MAX_MEMORY}
54 | SERVICE_ACCOUNT_BACKEND_URL=redis://{% if REDIS_PASSWORD %}:${REDIS_PASSWORD}@{% endif REDIS_PASSWORD %}redis-cache.${PRIVATE_DOMAIN_NAME}:${REDIS_CACHE_PORT}/6
55 | ENKETO_REDIS_MAIN_URL=redis://{% if REDIS_PASSWORD %}:${REDIS_PASSWORD}@{% endif REDIS_PASSWORD %}redis-main.${PRIVATE_DOMAIN_NAME}:${REDIS_MAIN_PORT}/0
56 |
--------------------------------------------------------------------------------
/helpers/aws_validation.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import datetime
3 | import hashlib
4 | import hmac
5 | from urllib.error import HTTPError
6 | from urllib.request import Request, urlopen
7 |
8 |
9 | class AWSValidation:
10 | """
11 | A class to validate AWS credentials without using boto3 as a dependency.
12 |
13 | The structure and methods have been adapted from the AWS documentation:
14 | http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
15 | """
16 |
17 | METHOD = 'POST'
18 | SERVICE = 'sts'
19 | REGION = 'us-east-1'
20 | HOST = 'sts.amazonaws.com'
21 | ENDPOINT = 'https://sts.amazonaws.com'
22 | REQUEST_PARAMETERS = 'Action=GetCallerIdentity&Version=2011-06-15'
23 | CANONICAL_URI = '/'
24 | SIGNED_HEADERS = 'host;x-amz-date'
25 | PAYLOAD_HASH = hashlib.sha256(''.encode()).hexdigest()
26 | ALGORITHM = 'AWS4-HMAC-SHA256'
27 |
28 | def __init__(self, aws_access_key_id, aws_secret_access_key):
29 | self.access_key = aws_access_key_id
30 | self.secret_key = aws_secret_access_key
31 |
32 | @staticmethod
33 | def _sign(key, msg):
34 | return hmac.new(key, msg.encode(), hashlib.sha256).digest()
35 |
36 | @classmethod
37 | def _get_signature_key(cls, key, date_stamp, region_name, service_name):
38 | k_date = cls._sign(('AWS4' + key).encode(), date_stamp)
39 | k_region = cls._sign(k_date, region_name)
40 | k_service = cls._sign(k_region, service_name)
41 | return cls._sign(k_service, 'aws4_request')
42 |
43 | def _get_request_url_and_headers(self):
44 | t = datetime.datetime.utcnow()
45 | amzdate = t.strftime('%Y%m%dT%H%M%SZ')
46 | datestamp = t.strftime('%Y%m%d')
47 |
48 | canonical_querystring = self.REQUEST_PARAMETERS
49 |
50 | canonical_headers = '\n'.join(
51 | [
52 | 'host:{host}'.format(host=self.HOST),
53 | 'x-amz-date:{amzdate}'.format(amzdate=amzdate),
54 | '',
55 | ]
56 | )
57 |
58 | canonical_request = '\n'.join(
59 | [
60 | self.METHOD,
61 | self.CANONICAL_URI,
62 | canonical_querystring,
63 | canonical_headers,
64 | self.SIGNED_HEADERS,
65 | self.PAYLOAD_HASH,
66 | ]
67 | )
68 |
69 | credential_scope = '/'.join(
70 | [datestamp, self.REGION, self.SERVICE, 'aws4_request']
71 | )
72 |
73 | string_to_sign = '\n'.join(
74 | [
75 | self.ALGORITHM,
76 | amzdate,
77 | credential_scope,
78 | hashlib.sha256(canonical_request.encode()).hexdigest(),
79 | ]
80 | )
81 |
82 | signing_key = self._get_signature_key(
83 | self.secret_key, datestamp, self.REGION, self.SERVICE
84 | )
85 |
86 | signature = hmac.new(
87 | signing_key, string_to_sign.encode(), hashlib.sha256
88 | ).hexdigest()
89 |
90 | authorization_header = (
91 | '{} Credential={}/{}, SignedHeaders={}, Signature={}'.format(
92 | self.ALGORITHM,
93 | self.access_key,
94 | credential_scope,
95 | self.SIGNED_HEADERS,
96 | signature,
97 | )
98 | )
99 |
100 | headers = {'x-amz-date': amzdate, 'Authorization': authorization_header}
101 | request_url = '?'.join([self.ENDPOINT, canonical_querystring])
102 |
103 | return request_url, headers
104 |
105 | def validate_credentials(self):
106 | request_url, headers = self._get_request_url_and_headers()
107 | req = Request(request_url, headers=headers, method=self.METHOD)
108 |
109 | try:
110 | with urlopen(req) as res:
111 | if res.status == 200:
112 | return True
113 | else:
114 | return False
115 | except HTTPError as e:
116 | return False
117 |
118 |
--------------------------------------------------------------------------------
/run.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | import platform
4 | import sys
5 |
6 | if (
7 | sys.version_info[0] == 2
8 | or (sys.version_info[0] == 3 and sys.version_info[1] <= 5)
9 | ):
10 | # Do not import any classes because they can contain not supported syntax
11 | # for older python versions. (i.e. avoid SyntaxError on imports)
12 | message = (
13 | '╔══════════════════════════════════════════════════════╗\n'
14 | '║ ║\n'
15 | '║ Your Python version has reached the end of its life. ║\n'
16 | '║ Please upgrade it as it is not maintained anymore. ║\n'
17 | '║ ║\n'
18 | '╚══════════════════════════════════════════════════════╝'
19 | )
20 | print("\033[1;31m" + message + '\033[0;0m')
21 | sys.exit(1)
22 |
23 | from helpers.cli import CLI
24 | from helpers.command import Command
25 | from helpers.config import Config
26 | from helpers.setup import Setup
27 | from helpers.template import Template
28 | from helpers.updater import Updater
29 |
30 |
31 | def run(force_setup=False):
32 |
33 | if not platform.system() in ['Linux', 'Darwin']:
34 | CLI.colored_print('Not compatible with this OS', CLI.COLOR_ERROR)
35 | else:
36 | config = Config()
37 | dict_ = config.get_dict()
38 | if config.first_time:
39 | force_setup = True
40 |
41 | if force_setup:
42 | dict_ = config.build()
43 | Setup.clone_kobodocker(config)
44 | Template.render(config)
45 | Setup.update_hosts(dict_)
46 | else:
47 | if config.auto_detect_network():
48 | Template.render(config)
49 | Setup.update_hosts(dict_)
50 |
51 | config.validate_passwords()
52 | Command.start(force_setup=force_setup)
53 |
54 |
55 | if __name__ == '__main__':
56 | try:
57 |
58 | # avoid infinite self-updating loops
59 | update_self = Updater.NO_UPDATE_SELF_OPTION not in sys.argv
60 | while True:
61 | try:
62 | sys.argv.remove(Updater.NO_UPDATE_SELF_OPTION)
63 | except ValueError:
64 | break
65 |
66 | if len(sys.argv) > 2:
67 | if sys.argv[1] == '-cf' or sys.argv[1] == '--compose-frontend':
68 | Command.compose_frontend(sys.argv[2:])
69 | elif sys.argv[1] == '-cb' or sys.argv[1] == '--compose-backend':
70 | Command.compose_backend(sys.argv[2:])
71 | elif sys.argv[1] == '-u' or sys.argv[1] == '--update':
72 | Updater.run(sys.argv[2], update_self=update_self)
73 | elif sys.argv[1] == '--upgrade':
74 | Updater.run(sys.argv[2], update_self=update_self)
75 | elif sys.argv[1] == '--auto-update':
76 | Updater.run(sys.argv[2], cron=True, update_self=update_self)
77 | else:
78 | CLI.colored_print("Bad syntax. Try 'run.py --help'",
79 | CLI.COLOR_ERROR)
80 | elif len(sys.argv) == 2:
81 | if sys.argv[1] == '-h' or sys.argv[1] == '--help':
82 | Command.help()
83 | elif sys.argv[1] == '-u' or sys.argv[1] == '--update':
84 | Updater.run(update_self=update_self)
85 | elif sys.argv[1] == '--upgrade':
86 | # 'update' was called 'upgrade' in a previous release; accept
87 | # either 'update' or 'upgrade' here to ease the transition
88 | Updater.run(update_self=update_self)
89 | elif sys.argv[1] == '--auto-update':
90 | Updater.run(cron=True, update_self=update_self)
91 | elif sys.argv[1] == '-i' or sys.argv[1] == '--info':
92 | Command.info(0)
93 | elif sys.argv[1] == '-s' or sys.argv[1] == '--setup':
94 | run(force_setup=True)
95 | elif sys.argv[1] == '-S' or sys.argv[1] == '--stop':
96 | Command.stop()
97 | elif sys.argv[1] == '-l' or sys.argv[1] == '--logs':
98 | Command.logs()
99 | elif sys.argv[1] == '-b' or sys.argv[1] == '--build':
100 | Command.build()
101 | elif sys.argv[1] == '-v' or sys.argv[1] == '--version':
102 | Command.version()
103 | elif sys.argv[1] == '-m' or sys.argv[1] == '--maintenance':
104 | Command.configure_maintenance()
105 | elif sys.argv[1] == '-sm' or sys.argv[1] == '--stop-maintenance':
106 | Command.stop_maintenance()
107 | else:
108 | CLI.colored_print("Bad syntax. Try 'run.py --help'",
109 | CLI.COLOR_ERROR)
110 | else:
111 | run()
112 |
113 | except KeyboardInterrupt:
114 | CLI.colored_print('\nUser interrupted execution', CLI.COLOR_INFO)
115 |
--------------------------------------------------------------------------------
/tests/test_run.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from unittest.mock import patch, MagicMock
3 |
4 | from helpers.command import Command
5 | from .utils import (
6 | mock_read_config as read_config,
7 | MockCommand,
8 | MockDocker,
9 | MockUpgrading,
10 | )
11 |
12 |
13 | @patch('helpers.network.Network.is_port_open',
14 | MagicMock(return_value=False))
15 | @patch('helpers.command.Upgrading.migrate_single_to_two_databases',
16 | new=MockUpgrading.migrate_single_to_two_databases)
17 | @patch('helpers.command.Command.info',
18 | MagicMock(return_value=True))
19 | @patch('helpers.cli.CLI.run_command',
20 | new=MockCommand.run_command)
21 | def test_toggle_trivial():
22 | config = read_config()
23 | Command.start()
24 | mock_docker = MockDocker()
25 | expected_containers = MockDocker.FRONTEND_CONTAINERS + \
26 | MockDocker.BACKEND_CONTAINERS + \
27 | MockDocker.LETSENCRYPT
28 | assert sorted(mock_docker.ps()) == sorted(expected_containers)
29 |
30 | Command.stop()
31 | assert len(mock_docker.ps()) == 0
32 | del mock_docker
33 |
34 |
35 | @patch('helpers.network.Network.is_port_open',
36 | MagicMock(return_value=False))
37 | @patch('helpers.command.Upgrading.migrate_single_to_two_databases',
38 | new=MockUpgrading.migrate_single_to_two_databases)
39 | @patch('helpers.command.Command.info',
40 | MagicMock(return_value=True))
41 | @patch('helpers.cli.CLI.run_command',
42 | new=MockCommand.run_command)
43 | def test_toggle_no_letsencrypt():
44 | config_object = read_config()
45 | config_object._Config__dict['use_letsencrypt'] = False
46 | Command.start()
47 | mock_docker = MockDocker()
48 | expected_containers = (
49 | MockDocker.FRONTEND_CONTAINERS + MockDocker.BACKEND_CONTAINERS
50 | )
51 | assert sorted(mock_docker.ps()) == sorted(expected_containers)
52 |
53 | Command.stop()
54 | assert len(mock_docker.ps()) == 0
55 | del mock_docker
56 |
57 |
58 | @patch('helpers.network.Network.is_port_open',
59 | MagicMock(return_value=False))
60 | @patch('helpers.command.Upgrading.migrate_single_to_two_databases',
61 | new=MockUpgrading.migrate_single_to_two_databases)
62 | @patch('helpers.command.Command.info',
63 | MagicMock(return_value=True))
64 | @patch('helpers.cli.CLI.run_command',
65 | new=MockCommand.run_command)
66 | def test_toggle_frontend():
67 | config_object = read_config()
68 | Command.start(frontend_only=True)
69 | mock_docker = MockDocker()
70 | expected_containers = MockDocker.FRONTEND_CONTAINERS + \
71 | MockDocker.LETSENCRYPT
72 | assert sorted(mock_docker.ps()) == sorted(expected_containers)
73 |
74 | Command.stop()
75 | assert len(mock_docker.ps()) == 0
76 | del mock_docker
77 |
78 |
79 | @patch('helpers.network.Network.is_port_open',
80 | MagicMock(return_value=False))
81 | @patch('helpers.command.Upgrading.migrate_single_to_two_databases',
82 | new=MockUpgrading.migrate_single_to_two_databases)
83 | @patch('helpers.command.Command.info',
84 | MagicMock(return_value=True))
85 | @patch('helpers.cli.CLI.run_command',
86 | new=MockCommand.run_command)
87 | def test_toggle_backend():
88 | config_object = read_config()
89 | config_object._Config__dict['server_role'] = 'backend'
90 | config_object._Config__dict['multi'] = True
91 |
92 | Command.start()
93 | mock_docker = MockDocker()
94 | expected_containers = MockDocker.BACKEND_CONTAINERS
95 | assert sorted(mock_docker.ps()) == sorted(expected_containers)
96 |
97 | Command.stop()
98 | assert len(mock_docker.ps()) == 0
99 | del mock_docker
100 |
101 |
102 | @patch('helpers.network.Network.is_port_open',
103 | MagicMock(return_value=False))
104 | @patch('helpers.command.Upgrading.migrate_single_to_two_databases',
105 | new=MockUpgrading.migrate_single_to_two_databases)
106 | @patch('helpers.command.Command.info',
107 | MagicMock(return_value=True))
108 | @patch('helpers.cli.CLI.run_command',
109 | new=MockCommand.run_command)
110 | def test_toggle_maintenance():
111 | config_object = read_config()
112 | mock_docker = MockDocker()
113 | Command.start()
114 | expected_containers = (
115 | MockDocker.FRONTEND_CONTAINERS
116 | + MockDocker.BACKEND_CONTAINERS
117 | + MockDocker.LETSENCRYPT
118 | )
119 | assert sorted(mock_docker.ps()) == sorted(expected_containers)
120 |
121 | config_object._Config__dict['maintenance_enabled'] = True
122 | Command.start()
123 | maintenance_containers = (
124 | MockDocker.BACKEND_CONTAINERS
125 | + MockDocker.MAINTENANCE_CONTAINERS
126 | + MockDocker.LETSENCRYPT
127 | )
128 | assert sorted(mock_docker.ps()) == sorted(maintenance_containers)
129 | config_object._Config__dict['maintenance_enabled'] = False
130 | Command.start()
131 | assert sorted(mock_docker.ps()) == sorted(expected_containers)
132 | Command.stop()
133 | assert len(mock_docker.ps()) == 0
134 | del mock_docker
135 |
--------------------------------------------------------------------------------
/tests/utils.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import json
3 | from unittest.mock import patch, mock_open
4 |
5 | from helpers.config import Config
6 | from helpers.singleton import Singleton
7 |
8 |
9 | def mock_read_config(overrides=None):
10 |
11 | config_dict = dict(Config.get_template())
12 | config_dict['kobodocker_path'] = '/tmp'
13 | if overrides is not None:
14 | config_dict.update(overrides)
15 |
16 | str_config = json.dumps(config_dict)
17 | # `Config()` constructor calls `read_config()` internally
18 | # We need to mock `open()` twice.
19 | # - Once to read kobo-install config file (i.e. `.run.conf`)
20 | # - Once to read value of `unique_id` (i.e. `/tmp/.uniqid`)
21 | with patch('builtins.open', spec=open) as mock_file:
22 | mock_file.side_effect = iter([
23 | mock_open(read_data=str_config).return_value,
24 | mock_open(read_data='').return_value,
25 | ])
26 | config = Config()
27 |
28 | # We call `read_config()` another time to be sure to reset the config
29 | # before each test. Thanks to `mock_open`, `Config.get_dict()` always
30 | # returns `config_dict`.
31 | with patch('builtins.open', spec=open) as mock_file:
32 | mock_file.side_effect = iter([
33 | mock_open(read_data=str_config).return_value,
34 | mock_open(read_data='').return_value,
35 | ])
36 | config.read_config()
37 |
38 | dict_ = config.get_dict()
39 | assert config_dict['kobodocker_path'] == dict_['kobodocker_path']
40 |
41 | return config
42 |
43 | def mock_reset_config(config):
44 |
45 | dict_ = dict(Config.get_template())
46 | dict_['kobodocker_path'] = '/tmp'
47 | config.__dict = dict_
48 |
49 |
50 | def mock_write_trigger_upsert_db_users(*args):
51 |
52 | content = args[1]
53 | with open('/tmp/upsert_db_users', 'w') as f:
54 | f.write(content)
55 |
56 |
57 | class MockCommand:
58 | """
59 | Create a mock class for Python2 retro compatibility.
60 | Python2 does not pass the class as the first argument explicitly when
61 | `run_command` (as a standalone method) is used as a mock.
62 | """
63 | @classmethod
64 | def run_command(cls, command, cwd=None, polling=False):
65 | if not ('docker' == command[0] and len(command) > 1 and command[1] == 'compose'):
66 | message = f'Command: `{command[0]}` is not implemented!'
67 | raise Exception(message)
68 |
69 | mock_docker = MockDocker()
70 | return mock_docker.compose(command, cwd)
71 |
72 |
73 | class MockDocker(metaclass=Singleton):
74 |
75 | BACKEND_CONTAINERS = [
76 | 'primary_postgres',
77 | 'mongo',
78 | 'redis_main',
79 | 'redis_cache',
80 | ]
81 | FRONTEND_CONTAINERS = ['nginx', 'kpi', 'enketo_express']
82 | MAINTENANCE_CONTAINERS = ['maintenance', 'kpi', 'enketo_express']
83 | LETSENCRYPT = ['letsencrypt_nginx', 'certbot']
84 |
85 | def __init__(self):
86 | self.__containers = []
87 |
88 | def ps(self):
89 | return self.__containers
90 |
91 | def compose(self, command, cwd):
92 | config_object = Config()
93 | letsencrypt = cwd == config_object.get_letsencrypt_repo_path()
94 |
95 | if command[-2] == 'config':
96 | return '\n'.join([c
97 | for c in self.FRONTEND_CONTAINERS
98 | if c != 'nginx'])
99 | if command[-2] == 'up':
100 | if letsencrypt:
101 | self.__containers += self.LETSENCRYPT
102 | elif 'backend' in command[3]:
103 | self.__containers += self.BACKEND_CONTAINERS
104 | elif 'maintenance' in command[3]:
105 | self.__containers += self.MAINTENANCE_CONTAINERS
106 | elif 'frontend' in command[3]:
107 | self.__containers += self.FRONTEND_CONTAINERS
108 | elif command[-1] == 'down':
109 | try:
110 | if letsencrypt:
111 | for container in self.LETSENCRYPT:
112 | self.__containers.remove(container)
113 | elif 'backend' in command[3]:
114 | for container in self.BACKEND_CONTAINERS:
115 | self.__containers.remove(container)
116 | elif 'maintenance' in command[3]:
117 | for container in self.MAINTENANCE_CONTAINERS:
118 | self.__containers.remove(container)
119 | elif 'frontend' in command[3]:
120 | for container in self.FRONTEND_CONTAINERS:
121 | self.__containers.remove(container)
122 | except ValueError:
123 | # Try to take a container down but was not up before.
124 | pass
125 |
126 | return True
127 |
128 |
129 | class MockUpgrading:
130 |
131 | @staticmethod
132 | def migrate_single_to_two_databases(config):
133 | pass
134 |
135 |
136 | class MockAWSValidation:
137 |
138 | def validate_credentials(self):
139 | if (
140 | self.access_key == 'test_access_key'
141 | and self.secret_key == 'test_secret_key'
142 | ):
143 | return True
144 | else:
145 | return False
146 |
--------------------------------------------------------------------------------
/helpers/cli.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import subprocess
3 | import sys
4 | import re
5 | import textwrap
6 |
7 |
8 | class CLI:
9 |
10 | NO_COLOR = '\033[0;0m'
11 | COLOR_ERROR = '\033[0;31m' # dark red
12 | COLOR_SUCCESS = '\033[0;32m' # dark green
13 | COLOR_INFO = '\033[1;34m' # blue
14 | COLOR_WARNING = '\033[1;31m' # red
15 | COLOR_QUESTION = '\033[1;33m' # dark yellow
16 | COLOR_DEFAULT = '\033[1;37m' # white
17 |
18 | EMPTY_CHARACTER = '-'
19 |
20 | DEFAULT_CHOICES = {
21 | '1': True,
22 | '2': False,
23 | }
24 | # We need an inverted dict version of `DEFAULT_CHOICES` to be able to
25 | # retrieve keys from the values
26 | DEFAULT_RESPONSES = dict(zip(DEFAULT_CHOICES.values(),
27 | DEFAULT_CHOICES.keys()))
28 |
29 | @classmethod
30 | def colored_input(cls, message, color=NO_COLOR, default=None):
31 | text = cls.get_message_with_default(message, default)
32 | input_ = input(cls.colorize(text, color))
33 |
34 | # User wants to delete value previously entered.
35 | if input_ == '-':
36 | default = ''
37 | input_ = ''
38 |
39 | return input_ if input_ is not None and input_ != '' else default
40 |
41 | @classmethod
42 | def colored_print(cls, message, color=NO_COLOR):
43 | print(cls.colorize(message, color))
44 |
45 | @classmethod
46 | def colorize(cls, message, color=NO_COLOR):
47 | return f'{color}{message}{cls.NO_COLOR}'
48 |
49 | @classmethod
50 | def framed_print(cls, message, color=COLOR_WARNING, columns=70):
51 | border = '═' * (columns - 2)
52 | blank_line = ' ' * (columns - 2)
53 | framed_message = [
54 | f'╔{border}╗',
55 | f'║{blank_line}║',
56 | ]
57 |
58 | if not isinstance(message, list):
59 | paragraphs = message.split('\n')
60 | else:
61 | paragraphs = ''.join(message).split('\n')
62 |
63 | for paragraph in paragraphs:
64 | if paragraph == '':
65 | framed_message.append(
66 | f'║{blank_line}║'
67 | )
68 | continue
69 |
70 | for line in textwrap.wrap(paragraph, columns - 4):
71 | message_length = len(line)
72 | spacer = ' ' * (columns - 4 - message_length)
73 | framed_message.append(
74 | f'║ {line}{spacer} ║'
75 | )
76 |
77 | framed_message.append(f'║{blank_line}║')
78 | framed_message.append(f'╚{border}╝')
79 | cls.colored_print('\n'.join(framed_message), color=color)
80 |
81 | @classmethod
82 | def get_response(cls, validators=None, default='', to_lower=True,
83 | error_msg="Sorry, I didn't understand that!"):
84 |
85 | use_default = False
86 | # If not validators are provided, let's use default validation
87 | # "Yes/No", where "Yes" equals 1, and "No" equals 2
88 | # Example:
89 | # Are you sure?
90 | # 1) Yes
91 | # 2) No
92 | if validators is None:
93 | use_default = True
94 | default = cls.DEFAULT_RESPONSES[default]
95 | validators = cls.DEFAULT_CHOICES.keys()
96 |
97 | while True:
98 | try:
99 | response = cls.colored_input('', cls.COLOR_QUESTION, default)
100 |
101 | if (
102 | response.lower() in map(lambda x: x.lower(), validators)
103 | or validators is None
104 | or (
105 | isinstance(validators, str)
106 | and validators.startswith('~')
107 | and re.match(validators[1:], response)
108 | )
109 | ):
110 | break
111 | else:
112 | cls.colored_print(error_msg,
113 | cls.COLOR_ERROR)
114 | except ValueError:
115 | cls.colored_print("Sorry, I didn't understand that.",
116 | cls.COLOR_ERROR)
117 |
118 | if use_default:
119 | return cls.DEFAULT_CHOICES[response]
120 |
121 | return response.lower() if to_lower else response
122 |
123 | @classmethod
124 | def get_message_with_default(cls, message, default):
125 | message = f'{message} ' if message else ''
126 |
127 | if default is None:
128 | default = ''
129 | else:
130 | default = '{white}[{off}{default}{white}]{off}: '.format(
131 | white=cls.COLOR_DEFAULT,
132 | off=cls.NO_COLOR,
133 | default=default
134 | )
135 |
136 | if message:
137 | message = f'{message.strip()}: ' if not default else message
138 |
139 | return f'{message}{default}'
140 |
141 | @classmethod
142 | def run_command(cls, command, cwd=None, polling=False):
143 | if polling:
144 | process = subprocess.Popen(command, stdout=subprocess.PIPE, cwd=cwd)
145 | while True:
146 | output = process.stdout.readline()
147 | if output == '' and process.poll() is not None:
148 | break
149 | if output:
150 | print(output.decode().strip())
151 | return process.poll()
152 | else:
153 | try:
154 | stdout = subprocess.check_output(command,
155 | universal_newlines=True,
156 | cwd=cwd)
157 | except subprocess.CalledProcessError as cpe:
158 | # Error will be display by above command.
159 | # ^^^ this doesn't seem to be true? let's write it explicitly
160 | # see https://docs.python.org/3/library/subprocess.html#subprocess.check_output
161 | sys.stderr.write(cpe.output)
162 | cls.colored_print('An error has occurred', CLI.COLOR_ERROR)
163 | sys.exit(1)
164 | return stdout
165 |
166 | @classmethod
167 | def yes_no_question(cls, question, default=True,
168 | labels=['Yes', 'No']):
169 | cls.colored_print(question, color=cls.COLOR_QUESTION)
170 | for index, label in enumerate(labels):
171 | choice_number = index + 1
172 | cls.colored_print(f'\t{choice_number}) {label}')
173 | return cls.get_response(default=default)
174 |
--------------------------------------------------------------------------------
/helpers/setup.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import os
3 | import shutil
4 | import sys
5 | import tempfile
6 |
7 | from helpers.cli import CLI
8 | from helpers.command import Command
9 | from helpers.config import Config
10 | from helpers.template import Template
11 |
12 |
13 | class Setup:
14 |
15 | @classmethod
16 | def clone_kobodocker(cls, config):
17 | """
18 | Args:
19 | config (helpers.config.Config)
20 | """
21 | dict_ = config.get_dict()
22 | do_update = config.first_time
23 |
24 | if not os.path.isdir(os.path.join(dict_['kobodocker_path'], '.git')):
25 | # Move unique id file to /tmp in order to clone without errors
26 | # (e.g. not empty directory)
27 | tmp_dirpath = tempfile.mkdtemp()
28 | shutil.move(os.path.join(dict_['kobodocker_path'],
29 | Config.UNIQUE_ID_FILE),
30 | os.path.join(tmp_dirpath, Config.UNIQUE_ID_FILE))
31 |
32 | # clone project
33 | git_command = [
34 | 'git', 'clone', 'https://github.com/kobotoolbox/kobo-docker',
35 | dict_['kobodocker_path']
36 | ]
37 | CLI.run_command(git_command, cwd=os.path.dirname(
38 | dict_['kobodocker_path']))
39 |
40 | shutil.move(os.path.join(tmp_dirpath, Config.UNIQUE_ID_FILE),
41 | os.path.join(dict_['kobodocker_path'],
42 | Config.UNIQUE_ID_FILE))
43 | shutil.rmtree(tmp_dirpath)
44 | do_update = True # Force update
45 |
46 | if do_update:
47 | cls.update_kobodocker(dict_)
48 |
49 | @classmethod
50 | def post_update(cls, cron):
51 |
52 | config = Config()
53 |
54 | # When `cron` is True, we want to bypass question and just recreate
55 | # YML and environment files from new templates
56 | if cron is True:
57 | current_dict = config.get_upgraded_dict()
58 | config.set_config(current_dict)
59 | config.write_config()
60 | Template.render(config, force=True)
61 | sys.exit(0)
62 |
63 | message = (
64 | 'After an update, it is strongly recommended to run\n'
65 | '`python3 run.py --setup` to regenerate environment files.'
66 | )
67 | CLI.framed_print(message, color=CLI.COLOR_INFO)
68 | response = CLI.yes_no_question('Do you want to proceed?')
69 | if response is True:
70 | current_dict = config.build()
71 | Template.render(config)
72 | Setup.update_hosts(current_dict)
73 | question = 'Do you want to (re)start containers?'
74 | response = CLI.yes_no_question(question)
75 | if response is True:
76 | Command.start(force_setup=True)
77 |
78 | @staticmethod
79 | def update_kobodocker(dict_=None):
80 | """
81 | Args:
82 | dict_ (dict): Dictionary provided by `Config.get_dict()`
83 | """
84 | if not dict_:
85 | config = Config()
86 | dict_ = config.get_dict()
87 |
88 | # fetch new tags and prune
89 | git_command = ['git', 'fetch', '-p']
90 | CLI.run_command(git_command, cwd=dict_['kobodocker_path'])
91 |
92 | # checkout branch
93 | git_command = ['git', 'checkout', '--force', Config.KOBO_DOCKER_BRANCH]
94 | CLI.run_command(git_command, cwd=dict_['kobodocker_path'])
95 |
96 | # update code
97 | git_command = ['git', 'pull', 'origin', Config.KOBO_DOCKER_BRANCH]
98 | CLI.run_command(git_command, cwd=dict_['kobodocker_path'])
99 |
100 | @staticmethod
101 | def update_koboinstall(version):
102 | # fetch new tags and prune
103 | git_fetch_prune_command = ['git', 'fetch', '-p']
104 | CLI.run_command(git_fetch_prune_command)
105 |
106 | # checkout branch
107 | git_command = ['git', 'checkout', '--force', version]
108 | CLI.run_command(git_command)
109 |
110 | # update code
111 | git_command = ['git', 'pull', 'origin', version]
112 | CLI.run_command(git_command)
113 |
114 | @classmethod
115 | def update_hosts(cls, dict_):
116 | """
117 |
118 | Args:
119 | dict_ (dict): Dictionary provided by `Config.get_dict()`
120 | """
121 | if dict_['local_installation']:
122 | start_sentence = '### (BEGIN) KoboToolbox local routes'
123 | end_sentence = '### (END) KoboToolbox local routes'
124 |
125 | _, tmp_file_path = tempfile.mkstemp()
126 |
127 | with open('/etc/hosts', 'r') as f:
128 | tmp_host = f.read()
129 |
130 | start_position = tmp_host.lower().find(start_sentence.lower())
131 | end_position = tmp_host.lower().find(end_sentence.lower())
132 |
133 | if start_position > -1:
134 | tmp_host = tmp_host[0: start_position] \
135 | + tmp_host[end_position + len(end_sentence) + 1:]
136 |
137 | public_domain_name = dict_['public_domain_name']
138 | routes = (
139 | f"{dict_['local_interface_ip']} "
140 | f"{dict_['kpi_subdomain']}.{public_domain_name} "
141 | f"{dict_['kc_subdomain']}.{public_domain_name} "
142 | f"{dict_['ee_subdomain']}.{public_domain_name}"
143 | )
144 |
145 | bof = tmp_host.strip()
146 | tmp_host = (
147 | f'{bof}'
148 | f'\n{start_sentence}'
149 | f'\n{routes}'
150 | f'\n{end_sentence}'
151 | )
152 |
153 | with open(tmp_file_path, 'w') as f:
154 | f.write(tmp_host)
155 |
156 | message = (
157 | 'Privileges escalation is required to update '
158 | 'your `/etc/hosts`.'
159 | )
160 | CLI.framed_print(message, color=CLI.COLOR_INFO)
161 | dict_['review_host'] = CLI.yes_no_question(
162 | 'Do you want to review your /etc/hosts file '
163 | 'before overwriting it?',
164 | default=dict_['review_host']
165 | )
166 | if dict_['review_host']:
167 | print(tmp_host)
168 | CLI.colored_input('Press any keys when ready')
169 |
170 | # Save 'review_host'
171 | config = Config()
172 | config.write_config()
173 |
174 | cmd = (
175 | 'sudo cp /etc/hosts /etc/hosts.old '
176 | '&& sudo cp {tmp_file_path} /etc/hosts'
177 | ).format(tmp_file_path=tmp_file_path)
178 |
179 | return_value = os.system(cmd)
180 |
181 | os.unlink(tmp_file_path)
182 |
183 | if return_value != 0:
184 | sys.exit(1)
185 |
186 | @staticmethod
187 | def validate_already_run():
188 | """
189 | Validates that Setup has been run at least once and kobo-docker has been
190 | pulled and checked out before going further.
191 | """
192 |
193 | config = Config()
194 | dict_ = config.get_dict()
195 |
196 | def display_error_message(message):
197 | message += '\nPlease run `python3 run.py --setup` first.'
198 | CLI.framed_print(message, color=CLI.COLOR_ERROR)
199 | sys.exit(1)
200 |
201 | try:
202 | dict_['kobodocker_path']
203 | except KeyError:
204 | display_error_message('No configuration file found.')
205 |
206 | if not os.path.isdir(os.path.join(dict_['kobodocker_path'], '.git')):
207 | display_error_message('`kobo-docker` repository is missing!')
208 |
--------------------------------------------------------------------------------
/helpers/network.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import array
3 | import fcntl
4 | import platform
5 | import socket
6 | import struct
7 | import sys
8 | from http import client as httplib
9 | from urllib.request import urlopen
10 |
11 | from helpers.cli import CLI
12 |
13 |
14 | class Network:
15 |
16 | STATUS_OK_200 = 200
17 |
18 | @staticmethod
19 | def get_local_interfaces(all_=False):
20 | """
21 | Returns a dictionary of name:ip key value pairs.
22 | Linux Only!
23 | Source: https://gist.github.com/bubthegreat/24c0c43ad159d8dfed1a5d3f6ca99f9b
24 |
25 | Args:
26 | all_ (bool): If False, filter virtual interfaces such VMWare,
27 | Docker etc...
28 | Returns:
29 | dict
30 | """
31 | ip_dict = {}
32 | excluded_interfaces = ('lo', 'docker', 'br-', 'veth', 'vmnet')
33 |
34 | if platform.system() == 'Linux':
35 | # Max possible bytes for interface result.
36 | # Will truncate if more than 4096 characters to describe interfaces.
37 | MAX_BYTES = 4096
38 |
39 | # We're going to make a blank byte array to operate on.
40 | # This is our fill char.
41 | FILL_CHAR = b'\0'
42 |
43 | # Command defined in ioctl.h for the system operation for get iface
44 | # list.
45 | # Defined at https://code.woboq.org/qt5/include/bits/ioctls.h.html
46 | # under /* Socket configuration controls. */ section.
47 | SIOCGIFCONF = 0x8912
48 |
49 | # Make a dgram socket to use as our file descriptor that we'll
50 | # operate on.
51 | sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
52 |
53 | # Make a byte array with our fill character.
54 | names = array.array('B', MAX_BYTES * FILL_CHAR)
55 |
56 | # Get the address of our names byte array for use in our struct.
57 | names_address, names_length = names.buffer_info()
58 |
59 | # Create a mutable byte buffer to store the data in
60 | mutable_byte_buffer = struct.pack('iL', MAX_BYTES, names_address)
61 |
62 | # mutate our mutable_byte_buffer with the results of get_iface_list.
63 | # NOTE: mutated_byte_buffer is just a reference to
64 | # mutable_byte_buffer - for the sake of clarity we've defined
65 | # them as separate variables, however they are the same address
66 | # space - that's how fcntl.ioctl() works since the mutate_flag=True
67 | # by default.
68 | mutated_byte_buffer = fcntl.ioctl(sock.fileno(),
69 | SIOCGIFCONF,
70 | mutable_byte_buffer)
71 |
72 | # Get our max_bytes of our mutated byte buffer
73 | # that points to the names variable address space.
74 | max_bytes_out, names_address_out = struct.unpack(
75 | 'iL',
76 | mutated_byte_buffer)
77 |
78 | # Convert names to a bytes array - keep in mind we've mutated the
79 | # names array, so now our bytes out should represent the bytes
80 | # results of the get iface list ioctl command.
81 | namestr = names.tobytes()
82 |
83 | namestr[:max_bytes_out]
84 |
85 | bytes_out = namestr[:max_bytes_out]
86 |
87 | # Each entry is 40 bytes long. The first 16 bytes are the
88 | # name string. The 20-24th bytes are IP address octet strings in
89 | # byte form - one for each byte.
90 | # Don't know what 17-19 are, or bytes 25:40.
91 |
92 | for i in range(0, max_bytes_out, 40):
93 | name = namestr[i: i + 16].split(FILL_CHAR, 1)[0]
94 | name = name.decode()
95 | ip_bytes = namestr[i + 20:i + 24]
96 | full_addr = []
97 | for netaddr in ip_bytes:
98 | if isinstance(netaddr, int):
99 | full_addr.append(str(netaddr))
100 | elif isinstance(netaddr, str):
101 | full_addr.append(str(ord(netaddr)))
102 | if not name.startswith(excluded_interfaces) or all_:
103 | ip_dict[name] = '.'.join(full_addr)
104 | else:
105 | try:
106 | import netifaces
107 | except ImportError:
108 | CLI.colored_print('You must install netinfaces first! Please '
109 | 'type `pip install netifaces --user`',
110 | CLI.COLOR_ERROR)
111 | sys.exit(1)
112 |
113 | for interface in netifaces.interfaces():
114 | if not interface.startswith(excluded_interfaces) or all_:
115 | ifaddresses = netifaces.ifaddresses(interface)
116 | if (
117 | ifaddresses.get(netifaces.AF_INET)
118 | and ifaddresses.get(netifaces.AF_INET)[0].get('addr')
119 | ):
120 | addresses = ifaddresses.get(netifaces.AF_INET)
121 | ip_dict[interface] = addresses[0].get('addr')
122 | for i in range(1, len(addresses)):
123 | virtual_interface = '{interface}:{idx}'.format(
124 | interface=interface,
125 | idx=i
126 | )
127 | ip_dict[virtual_interface] = addresses[i]['addr']
128 |
129 | return ip_dict
130 |
131 | @staticmethod
132 | def get_primary_ip():
133 | """
134 | https://stackoverflow.com/a/28950776/1141214
135 | :return:
136 | """
137 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
138 | try:
139 | # doesn't even have to be reachable
140 | # …but it can't be a broadcast address, or you'll get
141 | # `Permission denied`. See recent comments on the same SO answer:
142 | # https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib/28950776#comment128390746_28950776
143 | s.connect(('10.255.255.254', 1))
144 | ip_address = s.getsockname()[0]
145 | except:
146 | ip_address = None
147 | finally:
148 | s.close()
149 | return ip_address
150 |
151 | @classmethod
152 | def get_primary_interface(cls):
153 | """
154 | :return: string
155 | """
156 | primary_ip = cls.get_primary_ip()
157 | local_interfaces = cls.get_local_interfaces()
158 |
159 | for interface, ip_address in local_interfaces.items():
160 | if ip_address == primary_ip:
161 | return interface
162 |
163 | return 'eth0'
164 |
165 | @staticmethod
166 | def status_check(hostname, endpoint, port=80, secure=False):
167 | try:
168 | if secure:
169 | conn = httplib.HTTPSConnection(
170 | f'{hostname}:{port}',
171 | timeout=10)
172 | else:
173 | conn = httplib.HTTPConnection(
174 | f'{hostname}:{port}',
175 | timeout=10)
176 | conn.request('GET', endpoint)
177 | response = conn.getresponse()
178 | return response.status
179 | except:
180 | pass
181 |
182 | return
183 |
184 | @staticmethod
185 | def is_port_open(port):
186 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
187 | result = sock.connect_ex(('127.0.0.1', int(port)))
188 | return result == 0
189 |
190 | @staticmethod
191 | def curl(url):
192 | try:
193 | response = urlopen(url)
194 | data = response.read()
195 | if isinstance(data, str):
196 | # Python 2
197 | return data
198 | else:
199 | # Python 3
200 | return data.decode(response.headers.get_content_charset())
201 | except Exception as e:
202 | pass
203 | return
204 |
--------------------------------------------------------------------------------
/templates/kobo-env/enketo_express/config.json.tpl:
--------------------------------------------------------------------------------
1 | {
2 | "app name": "Enketo Express for KoboToolbox",
3 | "linked form and data server": {
4 | "name": "KoboToolbox",
5 | "server url": "",
6 | "api key": "${ENKETO_API_KEY}"
7 | },
8 | "ip filtering": {
9 | "allowPrivateIPAddress": ${ENKETO_ALLOW_PRIVATE_IP_ADDRESS},
10 | "allowMetaIPAddress": false,
11 | "allowIPAddressList": [],
12 | "denyAddressList": []
13 | },
14 | "encryption key": "${ENKETO_ENCRYPTION_KEY}",
15 | "less secure encryption key": "${ENKETO_LESS_SECURE_ENCRYPTION_KEY}",
16 | "support": {
17 | "email": "${DEFAULT_FROM_EMAIL}"
18 | },
19 | "widgets": [
20 | "note",
21 | "select-desktop",
22 | "select-mobile",
23 | "autocomplete",
24 | "geo",
25 | "textarea",
26 | "url",
27 | "table",
28 | "radio",
29 | "date",
30 | "time",
31 | "datetime",
32 | "select-media",
33 | "file",
34 | "draw",
35 | "rank",
36 | "likert",
37 | "range",
38 | "columns",
39 | "image-view",
40 | "comment",
41 | "image-map",
42 | "date-native",
43 | "date-native-ios",
44 | "date-mobile",
45 | "text-max",
46 | "text-print",
47 | "rating",
48 | "thousands-sep",
49 | "integer",
50 | "decimal",
51 | "../../../node_modules/enketo-image-customization-widget/image-customization",
52 | "../../../node_modules/enketo-literacy-test-widget/literacywidget"
53 | ],
54 | "redis": {
55 | "cache": {
56 | "host": "redis-cache.${PRIVATE_DOMAIN_NAME}",
57 | "port": "${REDIS_CACHE_PORT}"{% if REDIS_PASSWORD %},{% endif REDIS_PASSWORD %}
58 | {% if REDIS_PASSWORD %}
59 | "password": ${REDIS_PASSWORD_JS_ENCODED}
60 | {% endif REDIS_PASSWORD %}
61 | },
62 | "main": {
63 | "host": "redis-main.${PRIVATE_DOMAIN_NAME}",
64 | "port": "${REDIS_MAIN_PORT}"{% if REDIS_PASSWORD %},{% endif REDIS_PASSWORD %}
65 | {% if REDIS_PASSWORD %}
66 | "password": ${REDIS_PASSWORD_JS_ENCODED}
67 | {% endif REDIS_PASSWORD %}
68 | }
69 | },
70 | "google": {
71 | "api key": "${GOOGLE_API_KEY}",
72 | "analytics": {
73 | "ua": "${GOOGLE_UA}",
74 | "domain": "${ENKETO_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}"
75 | }
76 | },
77 | "logo": {
78 | "source": "data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz48c3ZnIGlkPSJhIiB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHg9IjBweCIgeT0iMHB4IiB3aWR0aD0iMjQzLjYyNCIgaGVpZ2h0PSI2MS44MDYiIHZpZXdCb3g9IjAgMCA0MzQuNDEgNTYuODUiPjxkZWZzPjxzdHlsZT4uYntmaWxsOiMyMDk1ZjM7fS5je2ZpbGw6IzU2NWU3Njt9PC9zdHlsZT48L2RlZnM+PHBhdGggY2xhc3M9ImMiIGQ9Ik0xMTIuODQsMTMuNzNjLTEwLjgsMC0xOS4yLDguNy0xOS4yLDE5LjJzOC43LDE5LjIsMTkuMiwxOS4yLDE5LjItOC43LDE5LjItMTkuMmMuMy0xMC44LTguNC0xOS4yLTE5LjItMTkuMlptMCwzMC45Yy02LjMsMC0xMS40LTUuMS0xMS40LTExLjRzNS4xLTExLjQsMTEuNC0xMS40LDExLjQsNS4xLDExLjQsMTEuNGMuMyw2LTUuMSwxMS40LTExLjQsMTEuNFoiLz48cGF0aCBjbGFzcz0iYyIgZD0iTTE1Ny44NCwxMy43M2MtNS40LS45LTEwLjUsLjYtMTQuNCwzLjZWNy40M2MwLTMtMi40LTUuMS01LjEtNS4xaDBjLTEuNSwwLTIuNCwxLjItMi40LDIuNFY0Ni43M2MwLDMsMi40LDUuMSw1LjEsNS4xaDBjMS41LDAsMi40LTEuMiwyLjQtMi40di0uOWMzLjMsMi40LDcuMiwzLjksMTEuNCwzLjksMTEuNCwwLDIwLjQtOS45LDE5LjItMjEuNi0uOS04LjctNy44LTE1LjYtMTYuMi0xNy4xWm0tNS40LDMwLjZjLTUuNC0xLjItOS02LjMtOS0xMS43aDBjMC00LjIsMi4xLTcuOCw1LjctOS42LDguNy00LjIsMTcuMSwyLjEsMTcuMSwxMC4yLC4zLDYuOS02LjMsMTIuNi0xMy44LDExLjFaIi8+PHBhdGggY2xhc3M9ImMiIGQ9Ik0xOTYuNTQsMTMuNzNjLTEwLjgsMC0xOS4yLDguNy0xOS4yLDE5LjJzOC43LDE5LjIsMTkuMiwxOS4yLDE5LjItOC43LDE5LjItMTkuMmMuMy0xMC44LTguNC0xOS4yLTE5LjItMTkuMlptMCwzMC45Yy02LjMsMC0xMS40LTUuMS0xMS40LTExLjRzNS4xLTExLjQsMTEuNC0xMS40LDExLjQsNS4xLDExLjQsMTEuNGMuMyw2LTUuMSwxMS40LTExLjQsMTEuNFoiLz48Zz48cGF0aCBjbGFzcz0iYiIgZD0iTTI1Ni44NCwxNi4xM2MtOS45LDAtMTgsOC4xLTE4LDE4czguMSwxOCwxOCwxOCwxOC04LjEsMTgtMTgtOC4xLTE4LTE4LTE4Wm0wLDMwLjZjLTYuOSwwLTEyLjYtNS43LTEyLjYtMTIuNnM1LjctMTIuNiwxMi42LTEyLjYsMTIuNiw1LjcsMTIuNiwxMi42LTUuNywxMi42LTEyLjYsMTIuNloiLz48cGF0aCBjbGFzcz0iYiIgZD0iTTI5NS41NCwxNi4xM2MtOS45LDAtMTgsOC4xLTE4LDE4czguMSwxOCwxOCwxOCwxOC04LjEsMTgtMTgtOC4xLTE4LTE4LTE4Wm0wLDMwLjZjLTYuOSwwLTEyLjYtNS43LTEyLjYtMTIuNnM1LjctMTIuNiwxMi42LTEyLjYsMTIuNiw1LjcsMTIuNiwxMi42LTUuNywxMi42LTEyLjYsMTIuNloiLz48cGF0aCBjbGFzcz0iYiIgZD0iTTM0Ni44NCwxNi4xM2MtNC44LDAtOS4zLDEuOC0xMi42LDUuMVY5LjUzYzAtMi4xLTEuOC0zLjYtMy42LTMuNi0uOSwwLTEuOCwuOS0xLjgsMS44VjQ4LjUzYzAsMi4xLDEuOCwzLjYsMy42LDMuNiwuOSwwLDEuOC0uOSwxLjgtMS44di0zYzMuMywzLDcuNSw1LjEsMTIuNiw1LjEsOS45LDAsMTgtOC4xLDE4LTE4cy03LjgtMTguMy0xOC0xOC4zWm0wLDMwLjZjLTYuOSwwLTEyLjYtNS43LTEyLjYtMTIuNnM1LjctMTIuNiwxMi42LTEyLjYsMTIuNiw1LjcsMTIuNiwxMi42LTUuNywxMi42LTEyLjYsMTIuNloiLz48cGF0aCBjbGFzcz0iYiIgZD0iTTM4NS44NCwxNi4xM2MtOS45LDAtMTgsOC4xLTE4LDE4czguMSwxOCwxOCwxOCwxOC04LjEsMTgtMTgtOC4xLTE4LTE4LTE4Wm0wLDMwLjZjLTYuOSwwLTEyLjYtNS43LTEyLjYtMTIuNnM1LjctMTIuNiwxMi42LTEyLjYsMTIuNiw1LjcsMTIuNiwxMi42LTUuNywxMi42LTEyLjYsMTIuNloiLz48cGF0aCBjbGFzcz0iYiIgZD0iTTMxOS4yNCw1LjYzYy0uOSwwLTEuOCwuOS0xLjgsMS44VjQ4LjIzYzAsMi4xLDEuOCwzLjYsMy42LDMuNiwuOSwwLDEuOC0uOSwxLjgtMS44VjkuNTNjLjMtMi4xLTEuNS0zLjktMy42LTMuOVoiLz48cGF0aCBjbGFzcz0iYiIgZD0iTTQyMC45NCwzNC4xM2wxMC4yLTE1LjljLjYtLjksMC0yLjQtMS4yLTIuNGgwYy0yLjQsMC00LjgsMS4yLTYsMy4zbC02LDkuNi02LTkuNmMtMS4yLTIuMS0zLjYtMy4zLTYtMy4zLTEuMiwwLTEuOCwxLjItMS4yLDIuNGwxMC4yLDE1LjktMTAuMiwxNS45Yy0uNiwuOSwwLDIuNCwxLjIsMi40aDBjMi40LDAsNC44LTEuMiw2LTMuM2w2LTkuNiw2LDkuNmMxLjIsMi4xLDMuNiwzLjMsNiwzLjNoMGMxLjIsMCwxLjgtMS4yLDEuMi0yLjRsLTEwLjItMTUuOVoiLz48cGF0aCBjbGFzcz0iYiIgZD0iTTI0Mi43NCwxMy43M2MyLjEsMCwzLjYtMS44LDMuNi0zLjYsMC0uOS0uOS0xLjgtMS44LTEuOGgtMjcuM2MtMi4xLDAtMy42LDEuOC0zLjYsMy42LDAsLjksLjksMS44LDEuOCwxLjhoMTEuN1Y0OC41M2MwLDIuMSwxLjgsMy42LDMuNiwzLjYsLjksMCwxLjgtLjksMS44LTEuOFYxMy43M2gxMC4yWiIvPjwvZz48cGF0aCBjbGFzcz0iYyIgZD0iTTk2LjY0LDQ5LjEzbC0yMS4zLTIxLjMsMTcuNy0xNy43Yy45LS45LC4zLTIuNC0uOS0yLjRoLTMuM2MtMi43LDAtNS40LDEuMi03LjIsM2wtMTUuOSwxNS45VjEyLjgzYzAtMy0yLjQtNS4xLTUuMS01LjFoMGMtMS4yLDAtMi40LDEuMi0yLjQsMi40VjQ2LjczYzAsMywyLjQsNS4xLDUuMSw1LjFoMGMxLjIsMCwyLjQtMS4yLDIuNC0yLjR2LTcuOGMwLTIuNCwuOS00LjUsMi40LTZsMi4xLTIuMSwxNSwxNWMxLjgsMS44LDQuNSwzLDcuMiwzaDMuM2MxLjIsMCwxLjgtMS41LC45LTIuNFoiLz48Zz48cGF0aCBjbGFzcz0iYiIgZD0iTTMxLjU0LDM4LjAzdjUuMWMwLDMtMi40LDUuMS01LjEsNS4xSDE1LjM0Yy0zLDAtNS4xLTIuNC01LjEtNS4xVjE0LjMzYzAtMywyLjQtNS4xLDUuMS01LjFoMTEuMWMzLDAsNS4xLDIuNCw1LjEsNS4xdi42Yy45LS4zLDIuMS0uNiwzLS42LDEuMiwwLDIuNCwuMywzLjYsLjZ2LS45YzAtNi42LTUuNC0xMi0xMi0xMkgxNS4wNEM4LjQ0LDIuMDMsMy4wNCw3LjQzLDMuMDQsMTQuMDN2MjguOGMwLDYuNiw1LjQsMTIsMTIsMTJoMTEuMWM2LjYsMCwxMi01LjQsMTItMTJ2LTEyLjlsLTYuNiw4LjFaIi8+PHBhdGggY2xhc3M9ImIiIGQ9Ik0yNi4xNCwzNi4yM2wxMi0xMy44Yy4zLS4zLC4zLTEuMiwwLTEuNWgwYy0yLjEtMS44LTUuNC0xLjUtNy4yLC42bC03LjUsOC43Yy0uMywuMy0uNiwuMy0uNiwwbC0yLjctMy4zYy0uNi0uNi0xLjUtLjYtMi4xLDBoMGMtMS44LDEuOC0xLjgsNC44LS4zLDYuOWwyLjQsMi43YzEuMiwxLjgsNC4yLDEuOCw2LS4zWiIvPjwvZz48L3N2Zz4K",
79 | "href": ""
80 | },
81 | "payload limit": "1mb",
82 | "text field character limit": 1000000,
83 | "maps": [
84 | {
85 | "name": "humanitarian",
86 | "tiles": [ "https://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png" ],
87 | "attribution": "© OpenStreetMap & Yohan Boniface & Humanitarian OpenStreetMap Team | Terms"
88 | }, {
89 | "name": "satellite",
90 | "tiles": [ "https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}" ],
91 | "attribution": "Tiles © Esri — Source: Esri, i-cubed, USDA, USGS, AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User Community"
92 | }, {
93 | "name": "terrain",
94 | "tiles": [ "https://{s}.tile.opentopomap.org/{z}/{x}/{y}.png" ],
95 | "attribution": "© OpenStreetMap | Terms"
96 | }, {
97 | "name": "streets",
98 | "tiles": [ "https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png" ],
99 | "attribution": "© OpenStreetMap | Terms"
100 | }
101 | ]
102 | }
103 |
--------------------------------------------------------------------------------
/helpers/upgrading.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import annotations
3 |
4 | import subprocess
5 | import sys
6 | from shutil import which
7 |
8 | from helpers.cli import CLI
9 | from helpers.utils import run_docker_compose
10 |
11 |
12 | class Upgrading:
13 |
14 | @staticmethod
15 | def migrate_single_to_two_databases(config: 'helpers.Config'):
16 | """
17 | Check the contents of the databases. If KPI's is empty or doesn't exist
18 | while KoboCAT's has user data, then we are migrating from a
19 | single-database setup
20 |
21 | Args
22 | config (helpers.config.Config)
23 | """
24 | dict_ = config.get_dict()
25 |
26 | def _kpi_db_alias_kludge(command):
27 | """
28 | Sorry, this is not very nice. See
29 | https://github.com/kobotoolbox/kobo-docker/issues/264.
30 | """
31 | set_env = 'DATABASE_URL="${KPI_DATABASE_URL}"'
32 | return ['bash', '-c', f'{set_env} {command}']
33 |
34 | kpi_run_command = run_docker_compose(dict_, [
35 | '-f', 'docker-compose.frontend.yml',
36 | '-f', 'docker-compose.frontend.override.yml',
37 | '-p', config.get_prefix('frontend'),
38 | 'run', '--rm', 'kpi'
39 | ])
40 |
41 | # Make sure Postgres is running
42 | # We add this message to users because when AWS backups are activated,
43 | # it takes a long time to install the virtualenv in PostgreSQL
44 | # container, so the `wait_for_database` below sits there a while.
45 | # It makes us think kobo-install is frozen.
46 | CLI.colored_print(
47 | 'Waiting for PostgreSQL database to be up & running...',
48 | CLI.COLOR_INFO)
49 | frontend_command = kpi_run_command + _kpi_db_alias_kludge(' '.join([
50 | 'python', 'manage.py',
51 | 'wait_for_database', '--retries', '45'
52 | ]))
53 | CLI.run_command(frontend_command, dict_['kobodocker_path'])
54 | CLI.colored_print('The PostgreSQL database is running!',
55 | CLI.COLOR_SUCCESS)
56 |
57 | frontend_command = kpi_run_command + _kpi_db_alias_kludge(' '.join([
58 | 'python', 'manage.py',
59 | 'is_database_empty', 'kpi', 'kobocat'
60 | ]))
61 | output = CLI.run_command(frontend_command, dict_['kobodocker_path'])
62 | # TODO: read only stdout and don't consider stderr unless the exit code
63 | # is non-zero. Currently, `output` combines both stdout and stderr
64 | kpi_kc_db_empty = output.strip().split('\n')[-1]
65 |
66 | if kpi_kc_db_empty == 'True\tFalse':
67 | # KPI empty but KC is not: run the two-database upgrade script
68 | CLI.colored_print(
69 | 'Upgrading from single-database setup to separate databases '
70 | 'for KPI and KoboCAT',
71 | CLI.COLOR_INFO
72 | )
73 | message = (
74 | 'Upgrading to separate databases is required to run the latest '
75 | 'release of KoboToolbox, but it may be a slow process if you '
76 | 'have a lot of data. Expect at least one minute of downtime '
77 | 'for every 1,500 KPI assets. Assets are surveys and library '
78 | 'items: questions, blocks, and templates.\n'
79 | '\n'
80 | 'To postpone this process, downgrade to the last '
81 | 'single-database release by stopping this script and executing '
82 | 'the following commands:\n'
83 | '\n'
84 | ' python3 run.py --stop\n'
85 | ' git fetch\n'
86 | ' git checkout shared-database-obsolete\n'
87 | ' python3 run.py --update\n'
88 | ' python3 run.py --setup\n'
89 | )
90 | CLI.framed_print(message)
91 | message = (
92 | 'For help, visit https://community.kobotoolbox.org/t/upgrading-'
93 | 'to-separate-databases-for-kpi-and-kobocat/7202.'
94 | )
95 | CLI.colored_print(message, CLI.COLOR_WARNING)
96 | response = CLI.yes_no_question(
97 | 'Do you want to proceed?',
98 | default=False
99 | )
100 | if response is False:
101 | sys.exit(0)
102 |
103 | backend_command = run_docker_compose(dict_, [
104 | '-f', f'docker-compose.backend.yml',
105 | '-f', f'docker-compose.backend.override.yml',
106 | '-p', config.get_prefix('backend'),
107 | 'exec', 'postgres', 'bash',
108 | '/kobo-docker-scripts/scripts/clone_data_from_kc_to_kpi.sh',
109 | '--noinput'
110 | ])
111 | try:
112 | subprocess.check_call(
113 | backend_command, cwd=dict_['kobodocker_path']
114 | )
115 | except subprocess.CalledProcessError:
116 | CLI.colored_print('An error has occurred', CLI.COLOR_ERROR)
117 | sys.exit(1)
118 |
119 | elif kpi_kc_db_empty not in [
120 | 'True\tTrue',
121 | 'False\tTrue',
122 | 'False\tFalse',
123 | ]:
124 | # The output was invalid
125 | CLI.colored_print('An error has occurred', CLI.COLOR_ERROR)
126 | sys.stderr.write(kpi_kc_db_empty)
127 | sys.exit(1)
128 |
129 | @staticmethod
130 | def two_databases(upgraded_dict: dict, current_dict: dict) -> dict:
131 | """
132 | If the configuration came from a previous version that had a single
133 | Postgres database, we need to make sure the new `kc_postgres_db` is
134 | set to the name of that single database, *not* the default from
135 | `Config.get_template()`
136 |
137 | Args:
138 | upgraded_dict (dict): Configuration values to be upgraded
139 | current_dict (dict): Current configuration values
140 | (i.e. `Config.get_dict()`)
141 | Returns:
142 | dict
143 |
144 | """
145 |
146 | try:
147 | current_dict['postgres_db']
148 | except KeyError:
149 | # Install has been made with two databases.
150 | return upgraded_dict
151 |
152 | try:
153 | current_dict['kc_postgres_db']
154 | except KeyError:
155 | # Configuration does not have names of KPI and KoboCAT databases.
156 | # Let's copy old single database name to KoboCAT database name
157 | upgraded_dict['kc_postgres_db'] = current_dict['postgres_db']
158 |
159 | # Force this property to False. It helps to detect whether the
160 | # database names have changed in `Config.__questions_postgres()`
161 | upgraded_dict['two_databases'] = False
162 |
163 | return upgraded_dict
164 |
165 | @staticmethod
166 | def use_booleans(upgraded_dict: dict) -> dict:
167 | """
168 | Until version 3.x, two constants (`Config.TRUE` and `Config.FALSE`) were
169 | used to store "Yes/No" users' responses. It made the code more
170 | complex than it should have been.
171 | This method converts these values to boolean.
172 | - `Config.TRUE` -> `True`
173 | - `Config.FALSE` -> False`
174 | Args:
175 | upgraded_dict (dict): Configuration values to be upgraded
176 |
177 | Returns:
178 | dict
179 | """
180 | try:
181 | upgraded_dict['use_booleans_v4']
182 | except KeyError:
183 | pass
184 | else:
185 | return upgraded_dict
186 |
187 | boolean_properties = [
188 | 'advanced',
189 | 'aws_backup_bucket_deletion_rule_enabled',
190 | 'backup_from_primary',
191 | 'block_common_http_ports',
192 | 'custom_secret_keys',
193 | 'customized_ports',
194 | 'debug',
195 | 'dev_mode',
196 | 'expose_backend_ports',
197 | 'https',
198 | 'local_installation',
199 | 'multi',
200 | 'npm_container',
201 | 'postgres_settings',
202 | 'proxy',
203 | 'raven_settings',
204 | 'review_host',
205 | 'smtp_use_tls',
206 | 'staging_mode',
207 | 'two_databases',
208 | 'use_aws',
209 | 'use_backup',
210 | 'use_letsencrypt',
211 | 'use_private_dns',
212 | 'uwsgi_settings',
213 | ]
214 | for property_ in boolean_properties:
215 | try:
216 | if isinstance(upgraded_dict[property_], bool):
217 | continue
218 | except KeyError:
219 | pass
220 | else:
221 | upgraded_dict[property_] = True \
222 | if upgraded_dict[property_] == '1' else False
223 |
224 | upgraded_dict['use_booleans_v4'] = True
225 |
226 | return upgraded_dict
227 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | The purpose of the script is to install KoboToolbox in minutes without messing with configuration files.
2 | It prompts the user to answer some questions to create configuration files automatically and to start docker containers based on [`kobo-docker`](https://github.com/kobotoolbox/kobo-docker "").
3 |
4 | ## :warning: You _must observe_ the following when upgrading:
5 |
6 | ### …from any release older than [`2.022.44`](https://github.com/kobotoolbox/kobo-install/releases/tag/2.022.44) (November 2022)
7 |
8 | If you have already installed KoboToolbox between March 2019 and November 2022, you **must** complete [a manual upgrade process](https://github.com/kobotoolbox/kobo-docker/blob/master/doc/November-2022-Upgrade.md) before trying to upgrade. **If you do not, `kobo-install` will not be able to start.**
9 |
10 | ### …from any release older than [`2.020.18`](https://github.com/kobotoolbox/kobo-install/releases/tag/2.020.18) (May 2020)
11 |
12 | Prior to release [`2.020.18`](https://github.com/kobotoolbox/kobo-install/releases/tag/2.020.18), [KPI](https://github.com/kobotoolbox/kpi) and [KoBoCAT](https://github.com/kobotoolbox/kobocat) both shared a common Postgres database. They now each have their own. **If you are upgrading an existing single-database installation, you must follow [these instructions](https://community.kobotoolbox.org/t/upgrading-to-separate-databases-for-kpi-and-kobocat/7202)** to migrate the KPI tables to a new database and adjust your configuration appropriately.
13 |
14 | If you do not want to upgrade at this time, please use the [`shared-database-obsolete`](https://github.com/kobotoolbox/kobo-install/tree/shared-database-obsolete) branch instead.
15 |
16 | ### …installations made prior to March 2019
17 |
18 | If you have already installed KoboToolbox with `kobo-docker` prior March 2019,
19 | you **must** complete [a manual upgrade process](https://github.com/kobotoolbox/kobo-docker/#important-notice-when-upgrading-from-commit-5c2ef02-march-4-2019-or-earlier)
20 | before using this repository. **If you do not, `kobo-install` will not be able to start.**
21 |
22 | ## Versions
23 |
24 | Release branches `release/*` (e.g. `release/2.024.36`) are the recommended branches to use with `kobo-install` on your production environment. From the `kpi` folder run `git branch -rl 'origin/release/*'` to list release branches and then switch to a release branch of your choice.
25 |
26 | Branch `main` is a pre-release of the next version. It contains new features and bug fixes.
27 |
28 | Other branches are for development purposes.
29 |
30 | ## Usage
31 |
32 | `$kobo-install> python3 run.py`
33 |
34 | First time the command is executed, setup will be launched.
35 | Subsequent executions will launch docker containers directly.
36 |
37 | Rebuild configuration:
38 | `$kobo-install> python3 run.py --setup`
39 |
40 | Get info:
41 | `$kobo-install> python3 run.py --info`
42 |
43 | Get docker logs:
44 | `$kobo-install> python3 run.py --logs`
45 |
46 | Update KoboToolbox:
47 | `$kobo-install> python3 run.py --update [branch or tag]`
48 |
49 | By default, fetch the latest version of `master` branch
50 |
51 |
52 | Stop KoboToolbox:
53 | `$kobo-install> python3 run.py --stop`
54 |
55 | Get help:
56 | `$kobo-install> python3 run.py --help`
57 |
58 | Get version:
59 | `$kobo-install> python3 run.py --version`
60 |
61 | Build kpi and kobocat (dev mode):
62 | `$kobo-install> python3 run.py --build`
63 |
64 | Run docker commands on front-end containers:
65 | `$kobo-install> python3 run.py --compose-frontend [docker-compose arguments]`
66 |
67 | Run docker commands on back-end containers:
68 | `$kobo-install> python3 run.py --compose-backend [docker-compose arguments]`
69 |
70 | Start maintenance mode:
71 | `$kobo-install> python3 run.py --maintenance`
72 |
73 | Stop maintenance mode:
74 | `$kobo-install> python3 run.py --stop-maintenance`
75 |
76 |
77 | ## Build the configuration
78 | User can choose between 2 types of installations:
79 |
80 | - `Workstation`: KoboToolbox doesn't need to be accessible from anywhere except the computer where it's installed. No DNS needed
81 | - `Server`: KoboToolbox needs to be accessible from the local network or from the Internet. DNS are needed
82 |
83 | ### Options
84 |
85 | |Option|Default|Workstation|Server
86 | |---|---|---|---|
87 | |Installation directory| **../kobo-docker** | ✓ | ✓ |
88 | |SMTP information| | ✓ | ✓ (front end only) |
89 | |Public domain name| **kobo.local** | | ✓ (front end only) |
90 | |Subdomain names| **kf, kc, ee** | | ✓ (front end only) |
91 | |Use HTTPS1| **False** (Workstation)
**True** (Server) | | ✓ (front end only) |
92 | |Super user's username| **super_admin** | ✓ | ✓ (front end only) |
93 | |Super user's password| **Random string** | ✓ | ✓ (front end only) |
94 | |Activate backups2| **False** | ✓ | ✓ (back end only) |
95 |
96 | ### Advanced Options
97 |
98 | | Option |Default|Workstation|Server
99 | |-------------------------------------------------|---|---|---|
100 | | Webserver port | **80** | ✓ | |
101 | | Reverse proxy internal port | **8080** | | ✓ (front end only) |
102 | | Network interface | **Autodetected** | ✓ | ✓ (front end only) |
103 | | Use separate servers | **No** | | ✓ |
104 | | Use DNS for private routes | **No** | | ✓ (front end only) |
105 | | Back-end server IP _(if previous answer is no)_ | **Local IP** | | ✓ (front end only) |
106 | | PostgreSQL DB | **kobo** | ✓ | ✓ |
107 | | PostgreSQL user's username | **kobo** | ✓ | ✓ |
108 | | PostgreSQL user's password | **Autogenerate** | ✓ | ✓ |
109 | | PostgreSQL number of connections3 | **100** | ✓ | ✓ (back end only) |
110 | | PostgreSQL RAM3 | **2** | ✓ | ✓ (back end only) |
111 | | PostgreSQL Application Profile3 | **Mixed** | ✓ | ✓ (back end only) |
112 | | PostgreSQL Storage3 | **HDD** | ✓ | ✓ (back end only) |
113 | | MongoDB super user's username | **root** | ✓ | ✓ |
114 | | MongoDB super user's password | **Autogenerate** | ✓ | ✓ |
115 | | MongoDB user's username | **kobo** | ✓ | ✓ |
116 | | MongoDB user's password | **Autogenerate** | ✓ | ✓ |
117 | | Redis password4 | **Autogenerate** | ✓ | ✓ |
118 | | Use AWS storage5 | **No** | ✓ | ✓ |
119 | | uWGI workers | **start: 2, max: 4** | ✓ | ✓ (front end only) |
120 | | uWGI memory limit | **128 MB** | ✓ | ✓ (front end only) |
121 | | uWGI harakiri timeout | **120s** | ✓ | ✓ (front end only) |
122 | | uWGI worker reload timeout | **120s** | ✓ | ✓ (front end only) |
123 | | Google UA | | ✓ | ✓ (front end only) |
124 | | Google API Key | | ✓ | ✓ (front end only) |
125 | | Sentry tokens | | ✓ | ✓ (front end only) |
126 | | Debug | **False** | ✓ | |
127 | | Developer mode | **False** | ✓ | |
128 | | Staging mode | **False** | | ✓ (front end only) |
129 |
130 | 1) _HTTPS certificates must be installed on a Reverse Proxy.
131 | `kobo-install` can install one and use `Let's Encrypt` to generate certificates
132 | thanks
133 | to [nginx-certbot project](https://github.com/wmnnd/nginx-certbot "")_
134 |
135 | 2) _If AWS credentials are provided, backups are sent to configured bucket_
136 |
137 | 3) _Custom settings are provided by [PostgreSQL Configuration Tool API](https://github.com/sebastianwebber/pgconfig-api "")_
138 |
139 | 4) _Redis password is optional but **strongly** recommended_
140 |
141 | 5) _If AWS storage is selected, credentials must be provided if backups are activated_
142 |
143 | ## Requirements
144 |
145 | - Linux 5 / macOS 6
146 | - Python 3.10+
147 | - [Docker](https://www.docker.com/get-started "") 7
148 | - Available TCP Ports: 8
149 |
150 | 1. 80 NGINX
151 | 1. 443 NGINX (if you use kobo-install with LetsEncrypt proxy)
152 | 2. Additional ports when `expose ports` advanced option has been selected
153 | 1. 5432 PostgreSQL
154 | 3. 6379-6380 redis
155 | 4. 27017 MongoDB
156 |
157 | _**WARNING:**_
158 |
159 | - _If you use a firewall, be sure to open traffic publicly on NGINX port, otherwise kobo-install cannot work_
160 | - _By default, additional ports are not exposed except when using multi servers configuration. If you choose to expose them, **be sure to not expose them publicly** (e.g. use a firewall and allow traffic between front-end and back-end containers only. NGINX port still has to stay publicly opened though)._
161 |
162 | 5) _It has been tested with Ubuntu 20.04, 22.04 and 24.04_
163 |
164 | 6) _Docker on macOS is slow. First boot usually takes a while to be ready. You may have to answer `Yes` once or twice to question `Wait for another 600 seconds?` when prompted_
165 |
166 | 7) _Compose V1 is **NOT** supported anymore. It has reached its EOL from July 2023_
167 |
168 | 8) _These are defaults but can be customized with advanced options_
169 |
170 |
171 | ## Development
172 |
173 | ### React files: Hot Module Reload (HMR) by Webpack
174 | For frontend file changes to take effect, run watch in terminal and open/refresh http://kf.kobo.local to see your changes hot reloaded (don’t worry about first timeout error, it’s still building):
175 |
176 | ```shell
177 | ./run.py -cf run --rm --publish 3000:3000 kpi npm run watch && ./run.py -cf restart kpi
178 | ```
179 |
180 | The script creates a new docker container for frontend in `npm run watch` mode within the same docker network with the same (internal) port.
181 | Using the same port will overshadow the original kpi container’s ports and nginx will instantly serve the new container instead.
182 | Unfortunately the port overshadowing doesn’t nicely undo itself and a restart of `kpi` is required.
183 | Once the container exits (hit CTRL+C **once**), the container will automatically remove itself and initiate kpi restart which will take up to few minutes.
184 |
185 | It should as well handle dependency changes, maybe except for webpack itself.
186 |
187 | You can also [this gist](https://gist.github.com/jnm/dd323e0ff5be0d79e12e76bb9dfb7aed) to refresh front-end files without rebuilding the container.
188 |
189 | ### Tests
190 |
191 | Tests can be run with `tox`.
192 | Be sure it is installed before running the tests.
193 |
194 | ```
195 | $kobo-install> sudo apt install python3-pip
196 | $kobo-install> pip3 install tox
197 | $kobo-install> tox
198 | ```
199 | or
200 |
201 | ```
202 | $kobo-install> sudo apt install tox
203 | $kobo-install> tox
204 | ```
205 |
--------------------------------------------------------------------------------
/templates/kobo-docker/docker-compose.frontend.override.yml.tpl:
--------------------------------------------------------------------------------
1 | # For public, HTTPS servers.
2 |
3 | services:
4 | kpi:
5 | ${USE_KPI_DEV_MODE} build: ${KPI_PATH}
6 | ${USE_KPI_DEV_MODE} image: kpi:dev.${KPI_DEV_BUILD_ID}
7 | ${USE_KPI_DEV_MODE} volumes:
8 | ${USE_KPI_DEV_MODE} - ${KPI_PATH}:/srv/src/kpi
9 | environment:
10 | - UWSGI_WORKERS_COUNT=${UWSGI_WORKERS_MAX}
11 | - UWSGI_CHEAPER_WORKERS_COUNT=${UWSGI_WORKERS_START}
12 | - UWSGI_MAX_REQUESTS=${UWSGI_MAX_REQUESTS}
13 | - UWSGI_CHEAPER_RSS_LIMIT_SOFT=${UWSGI_SOFT_LIMIT}
14 | - UWSGI_HARAKIRI=${UWSGI_HARAKIRI}
15 | - UWSGI_WORKER_RELOAD_MERCY=${UWSGI_WORKER_RELOAD_MERCY}
16 | - WSGI=${WSGI}
17 | ${USE_CELERY} - SKIP_CELERY=True
18 | ${USE_DEV_MODE} - DJANGO_SETTINGS_MODULE=kobo.settings.dev
19 | ${USE_HTTPS} - SECURE_PROXY_SSL_HEADER=HTTP_X_FORWARDED_PROTO,https
20 | ${USE_NPM_FROM_HOST} - FRONTEND_DEV_MODE=host
21 | ${USE_EXTRA_HOSTS}extra_hosts:
22 | ${USE_FAKE_DNS} - ${KOBOFORM_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
23 | ${USE_FAKE_DNS} - ${KOBOCAT_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
24 | ${USE_FAKE_DNS} - ${ENKETO_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
25 | ${ADD_BACKEND_EXTRA_HOSTS} - postgres.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
26 | ${ADD_BACKEND_EXTRA_HOSTS} - mongo.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
27 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-main.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
28 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-cache.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
29 | ${USE_BACKEND_NETWORK}networks:
30 | ${USE_BACKEND_NETWORK} kobo-be-network:
31 | ${USE_BACKEND_NETWORK} aliases:
32 | ${USE_BACKEND_NETWORK} - kpi
33 | ${USE_BACKEND_NETWORK} - kpi.docker.container
34 |
35 | worker:
36 | ${USE_KPI_DEV_MODE} build: ${KPI_PATH}
37 | ${USE_KPI_DEV_MODE} image: kpi:dev.${KPI_DEV_BUILD_ID}
38 | ${USE_KPI_DEV_MODE} volumes:
39 | ${USE_KPI_DEV_MODE} - ${KPI_PATH}:/srv/src/kpi
40 | environment:
41 | - WSGI=${WSGI}
42 | ${USE_DEV_MODE} - DJANGO_SETTINGS_MODULE=kobo.settings.dev
43 | ${USE_HTTPS} - SECURE_PROXY_SSL_HEADER=HTTP_X_FORWARDED_PROTO,https
44 | ${USE_EXTRA_HOSTS}extra_hosts:
45 | ${USE_FAKE_DNS} - ${KOBOFORM_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
46 | ${USE_FAKE_DNS} - ${KOBOCAT_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
47 | ${USE_FAKE_DNS} - ${ENKETO_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
48 | ${ADD_BACKEND_EXTRA_HOSTS} - postgres.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
49 | ${ADD_BACKEND_EXTRA_HOSTS} - mongo.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
50 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-main.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
51 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-cache.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
52 | ${USE_BACKEND_NETWORK}networks:
53 | ${USE_BACKEND_NETWORK} kobo-be-network:
54 | ${USE_BACKEND_NETWORK} aliases:
55 | ${USE_BACKEND_NETWORK} - worker
56 | ${USE_BACKEND_NETWORK} - worker.docker.container
57 |
58 | worker_kobocat:
59 | ${USE_KPI_DEV_MODE} build: ${KPI_PATH}
60 | ${USE_KPI_DEV_MODE} image: kpi:dev.${KPI_DEV_BUILD_ID}
61 | ${USE_KPI_DEV_MODE} volumes:
62 | ${USE_KPI_DEV_MODE} - ${KPI_PATH}:/srv/src/kpi
63 | environment:
64 | - WSGI=${WSGI}
65 | ${USE_DEV_MODE} - DJANGO_SETTINGS_MODULE=kobo.settings.dev
66 | ${USE_HTTPS} - SECURE_PROXY_SSL_HEADER=HTTP_X_FORWARDED_PROTO,https
67 | ${USE_EXTRA_HOSTS}extra_hosts:
68 | ${USE_FAKE_DNS} - ${KOBOFORM_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
69 | ${USE_FAKE_DNS} - ${KOBOCAT_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
70 | ${USE_FAKE_DNS} - ${ENKETO_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
71 | ${ADD_BACKEND_EXTRA_HOSTS} - postgres.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
72 | ${ADD_BACKEND_EXTRA_HOSTS} - mongo.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
73 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-main.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
74 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-cache.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
75 | ${USE_BACKEND_NETWORK}networks:
76 | ${USE_BACKEND_NETWORK} kobo-be-network:
77 | ${USE_BACKEND_NETWORK} aliases:
78 | ${USE_BACKEND_NETWORK} - worker_kobocat
79 | ${USE_BACKEND_NETWORK} - worker_kobocat.docker.container
80 |
81 | worker_low_priority:
82 | ${USE_KPI_DEV_MODE} build: ${KPI_PATH}
83 | ${USE_KPI_DEV_MODE} image: kpi:dev.${KPI_DEV_BUILD_ID}
84 | ${USE_KPI_DEV_MODE} volumes:
85 | ${USE_KPI_DEV_MODE} - ${KPI_PATH}:/srv/src/kpi
86 | environment:
87 | - WSGI=${WSGI}
88 | ${USE_DEV_MODE} - DJANGO_SETTINGS_MODULE=kobo.settings.dev
89 | ${USE_HTTPS} - SECURE_PROXY_SSL_HEADER=HTTP_X_FORWARDED_PROTO,https
90 | ${USE_EXTRA_HOSTS}extra_hosts:
91 | ${USE_FAKE_DNS} - ${KOBOFORM_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
92 | ${USE_FAKE_DNS} - ${KOBOCAT_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
93 | ${USE_FAKE_DNS} - ${ENKETO_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
94 | ${ADD_BACKEND_EXTRA_HOSTS} - postgres.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
95 | ${ADD_BACKEND_EXTRA_HOSTS} - mongo.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
96 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-main.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
97 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-cache.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
98 | ${USE_BACKEND_NETWORK}networks:
99 | ${USE_BACKEND_NETWORK} kobo-be-network:
100 | ${USE_BACKEND_NETWORK} aliases:
101 | ${USE_BACKEND_NETWORK} - worker_low_priority
102 | ${USE_BACKEND_NETWORK} - worker_low_priority.docker.container
103 |
104 | worker_long_running_tasks:
105 | ${USE_KPI_DEV_MODE} build: ${KPI_PATH}
106 | ${USE_KPI_DEV_MODE} image: kpi:dev.${KPI_DEV_BUILD_ID}
107 | ${USE_KPI_DEV_MODE} volumes:
108 | ${USE_KPI_DEV_MODE} - ${KPI_PATH}:/srv/src/kpi
109 | environment:
110 | - WSGI=${WSGI}
111 | ${USE_DEV_MODE} - DJANGO_SETTINGS_MODULE=kobo.settings.dev
112 | ${USE_HTTPS} - SECURE_PROXY_SSL_HEADER=HTTP_X_FORWARDED_PROTO,https
113 | ${USE_EXTRA_HOSTS}extra_hosts:
114 | ${USE_FAKE_DNS} - ${KOBOFORM_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
115 | ${USE_FAKE_DNS} - ${KOBOCAT_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
116 | ${USE_FAKE_DNS} - ${ENKETO_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
117 | ${ADD_BACKEND_EXTRA_HOSTS} - postgres.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
118 | ${ADD_BACKEND_EXTRA_HOSTS} - mongo.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
119 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-main.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
120 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-cache.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
121 | ${USE_BACKEND_NETWORK}networks:
122 | ${USE_BACKEND_NETWORK} kobo-be-network:
123 | ${USE_BACKEND_NETWORK} aliases:
124 | ${USE_BACKEND_NETWORK} - worker_long_running_tasks
125 | ${USE_BACKEND_NETWORK} - worker_long_running_tasks.docker.container
126 |
127 | beat:
128 | ${USE_KPI_DEV_MODE} build: ${KPI_PATH}
129 | ${USE_KPI_DEV_MODE} image: kpi:dev.${KPI_DEV_BUILD_ID}
130 | ${USE_KPI_DEV_MODE} volumes:
131 | ${USE_KPI_DEV_MODE} - ${KPI_PATH}:/srv/src/kpi
132 | environment:
133 | - WSGI=${WSGI}
134 | ${USE_DEV_MODE} - DJANGO_SETTINGS_MODULE=kobo.settings.dev
135 | ${USE_HTTPS} - SECURE_PROXY_SSL_HEADER=HTTP_X_FORWARDED_PROTO,https
136 | ${USE_EXTRA_HOSTS}extra_hosts:
137 | ${USE_FAKE_DNS} - ${KOBOFORM_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
138 | ${USE_FAKE_DNS} - ${KOBOCAT_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
139 | ${USE_FAKE_DNS} - ${ENKETO_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
140 | ${ADD_BACKEND_EXTRA_HOSTS} - postgres.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
141 | ${ADD_BACKEND_EXTRA_HOSTS} - mongo.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
142 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-main.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
143 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-cache.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
144 | ${USE_BACKEND_NETWORK}networks:
145 | ${USE_BACKEND_NETWORK} kobo-be-network:
146 | ${USE_BACKEND_NETWORK} aliases:
147 | ${USE_BACKEND_NETWORK} - beat
148 | ${USE_BACKEND_NETWORK} - beat.docker.container
149 |
150 | nginx:
151 | environment:
152 | - NGINX_PUBLIC_PORT=${NGINX_PUBLIC_PORT}
153 | - UWSGI_PASS_TIMEOUT=${UWSGI_PASS_TIMEOUT}
154 | - WSGI=${WSGI}
155 | ${USE_LETSENSCRYPT}ports:
156 | ${USE_LETSENSCRYPT} - ${NGINX_EXPOSED_PORT}:80
157 | ${USE_EXTRA_HOSTS}extra_hosts:
158 | ${USE_FAKE_DNS} - ${KOBOFORM_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
159 | ${USE_FAKE_DNS} - ${KOBOCAT_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
160 | ${USE_FAKE_DNS} - ${ENKETO_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
161 | ${ADD_BACKEND_EXTRA_HOSTS} - postgres.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
162 | ${ADD_BACKEND_EXTRA_HOSTS} - mongo.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
163 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-main.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
164 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-cache.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
165 | networks:
166 | kobo-fe-network:
167 | aliases:
168 | - ${KOBOFORM_SUBDOMAIN}.${INTERNAL_DOMAIN_NAME}
169 | - ${KOBOCAT_SUBDOMAIN}.${INTERNAL_DOMAIN_NAME}
170 | - ${ENKETO_SUBDOMAIN}.${INTERNAL_DOMAIN_NAME}
171 |
172 | enketo_express:
173 | # `DUMMY_ENV` is only there to avoid extra complex condition to override
174 | # `enketo_express` section or not. It allows to always this section whatever
175 | # `USE_EXTRA_HOSTS` and `USE_BACKEND_NETWORK` values are.
176 | environment:
177 | - DUMMY_ENV=True
178 | ${USE_EXTRA_HOSTS}extra_hosts:
179 | ${USE_FAKE_DNS} - ${KOBOFORM_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
180 | ${USE_FAKE_DNS} - ${KOBOCAT_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
181 | ${USE_FAKE_DNS} - ${ENKETO_SUBDOMAIN}.${PUBLIC_DOMAIN_NAME}:${LOCAL_INTERFACE_IP}
182 | ${ADD_BACKEND_EXTRA_HOSTS} - postgres.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
183 | ${ADD_BACKEND_EXTRA_HOSTS} - mongo.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
184 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-main.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
185 | ${ADD_BACKEND_EXTRA_HOSTS} - redis-cache.${PRIVATE_DOMAIN_NAME}:${PRIMARY_BACKEND_IP}
186 | ${USE_BACKEND_NETWORK}networks:
187 | ${USE_BACKEND_NETWORK} kobo-be-network:
188 | ${USE_BACKEND_NETWORK} aliases:
189 | ${USE_BACKEND_NETWORK} - enketo_express
190 |
191 | ${USE_BACKEND_NETWORK}networks:
192 | ${USE_BACKEND_NETWORK} kobo-be-network:
193 | ${USE_BACKEND_NETWORK} name: ${DOCKER_NETWORK_BACKEND_PREFIX}_kobo-be-network
194 | ${USE_BACKEND_NETWORK} external: true
195 |
--------------------------------------------------------------------------------
/helpers/template.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import fnmatch
3 | import json
4 | import os
5 | import re
6 | import stat
7 | import sys
8 | from string import Template as PyTemplate
9 |
10 | from helpers.cli import CLI
11 | from helpers.config import Config
12 |
13 |
14 | class Template:
15 | UNIQUE_ID_FILE = '.uniqid'
16 |
17 | @classmethod
18 | def render(cls, config, force=False):
19 | """
20 | Write configuration files based on `config`
21 |
22 | Args:
23 | config (helpers.config.Config)
24 | force (bool)
25 | """
26 |
27 | dict_ = config.get_dict()
28 | template_variables = cls.__get_template_variables(config)
29 |
30 | environment_directory = config.get_env_files_path()
31 | unique_id = cls.__read_unique_id(environment_directory)
32 |
33 | if (
34 | not force and unique_id
35 | and str(dict_.get('unique_id', '')) != str(unique_id)
36 | ):
37 | message = (
38 | 'WARNING!\n\n'
39 | 'Existing environment files are detected. Files will be '
40 | 'overwritten.'
41 | )
42 | CLI.framed_print(message)
43 | response = CLI.yes_no_question(
44 | 'Do you want to continue?',
45 | default=False
46 | )
47 | if not response:
48 | sys.exit(0)
49 |
50 | cls.__write_unique_id(environment_directory, dict_['unique_id'])
51 |
52 | # Environment
53 | templates_path_parent = cls._get_templates_path_parent()
54 | templates_path = os.path.join(
55 | templates_path_parent, Config.ENV_FILES_DIR, ''
56 | )
57 | for root, dirnames, filenames in os.walk(templates_path):
58 | destination_directory = cls.__create_directory(
59 | environment_directory,
60 | root,
61 | templates_path
62 | )
63 | cls.__write_templates(
64 | template_variables, root, destination_directory, filenames
65 | )
66 |
67 | # kobo-docker
68 | templates_path = os.path.join(templates_path_parent, 'kobo-docker')
69 | for root, dirnames, filenames in os.walk(templates_path):
70 | destination_directory = cls.__create_directory(dict_['kobodocker_path'])
71 | cls.__write_templates(
72 | template_variables, root, destination_directory, filenames
73 | )
74 |
75 | # nginx-certbox
76 | if config.use_letsencrypt:
77 | templates_path = os.path.join(
78 | templates_path_parent, Config.LETSENCRYPT_DOCKER_DIR, ''
79 | )
80 | for root, dirnames, filenames in os.walk(templates_path):
81 | destination_directory = cls.__create_directory(
82 | config.get_letsencrypt_repo_path(),
83 | root,
84 | templates_path)
85 | cls.__write_templates(template_variables,
86 | root,
87 | destination_directory,
88 | filenames)
89 |
90 | @classmethod
91 | def render_maintenance(cls, config):
92 |
93 | dict_ = config.get_dict()
94 | template_variables = cls.__get_template_variables(config)
95 |
96 | base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
97 | templates_path_parent = os.path.join(base_dir, 'templates')
98 |
99 | # kobo-docker
100 | templates_path = os.path.join(templates_path_parent, 'kobo-docker')
101 | for root, dirnames, filenames in os.walk(templates_path):
102 | filenames = [filename
103 | for filename in filenames if 'maintenance' in filename]
104 | destination_directory = dict_['kobodocker_path']
105 | cls.__write_templates(template_variables,
106 | root,
107 | destination_directory,
108 | filenames)
109 |
110 | @classmethod
111 | def __create_directory(cls, template_root_directory, path='', base_dir=''):
112 |
113 | # Handle case when path is root and equals ''.
114 | path = os.path.join(path, '')
115 |
116 | destination_directory = os.path.realpath(os.path.join(
117 | template_root_directory,
118 | path.replace(base_dir, '')
119 | ))
120 |
121 | if not os.path.isdir(destination_directory):
122 | try:
123 | os.makedirs(destination_directory)
124 | except OSError:
125 | CLI.colored_print(
126 | f'Cannot create {destination_directory}. '
127 | 'Please verify permissions!',
128 | CLI.COLOR_ERROR)
129 | sys.exit(1)
130 |
131 | return destination_directory
132 |
133 | @staticmethod
134 | def __get_template_variables(config):
135 | """
136 | Write configuration files based on `config`
137 |
138 | Args:
139 | config (helpers.config.Config)
140 | """
141 |
142 | dict_ = config.get_dict()
143 |
144 | def _get_value(property_, true_value='', false_value='#',
145 | comparison_value=True):
146 | return (
147 | true_value
148 | if dict_[property_] == comparison_value
149 | else false_value
150 | )
151 |
152 | if config.proxy:
153 | nginx_port = dict_['nginx_proxy_port']
154 | else:
155 | nginx_port = dict_['exposed_nginx_docker_port']
156 |
157 | return {
158 | 'PUBLIC_REQUEST_SCHEME': _get_value('https', 'https', 'http'),
159 | 'USE_HTTPS': _get_value('https'),
160 | 'USE_AWS': _get_value('use_aws'),
161 | 'AWS_ACCESS_KEY_ID': dict_['aws_access_key'],
162 | 'AWS_SECRET_ACCESS_KEY': dict_['aws_secret_key'],
163 | 'AWS_BUCKET_NAME': dict_['aws_bucket_name'],
164 | 'AWS_S3_REGION_NAME': dict_['aws_s3_region_name'],
165 | 'GOOGLE_UA': dict_['google_ua'],
166 | 'GOOGLE_API_KEY': dict_['google_api_key'],
167 | 'INTERNAL_DOMAIN_NAME': dict_['internal_domain_name'],
168 | 'PRIVATE_DOMAIN_NAME': dict_['private_domain_name'],
169 | 'PUBLIC_DOMAIN_NAME': dict_['public_domain_name'],
170 | 'KOBOFORM_SUBDOMAIN': dict_['kpi_subdomain'],
171 | 'KOBOCAT_SUBDOMAIN': dict_['kc_subdomain'],
172 | 'ENKETO_SUBDOMAIN': dict_['ee_subdomain'],
173 | 'KOBO_SUPERUSER_USERNAME': dict_['super_user_username'],
174 | 'KOBO_SUPERUSER_PASSWORD': dict_['super_user_password'],
175 | 'ENKETO_API_KEY': dict_['enketo_api_token'],
176 | 'DJANGO_SECRET_KEY': dict_['django_secret_key'],
177 | 'DJANGO_SESSION_COOKIE_AGE': dict_['django_session_cookie_age'],
178 | 'ENKETO_ENCRYPTION_KEY': dict_['enketo_encryption_key'],
179 | 'ENKETO_LESS_SECURE_ENCRYPTION_KEY': dict_[
180 | 'enketo_less_secure_encryption_key'
181 | ],
182 | 'KOBOCAT_RAVEN_DSN': dict_['kobocat_raven'],
183 | 'KPI_RAVEN_DSN': dict_['kpi_raven'],
184 | 'KPI_RAVEN_JS_DSN': dict_['kpi_raven_js'],
185 | 'KC_POSTGRES_DB': dict_['kc_postgres_db'],
186 | 'KPI_POSTGRES_DB': dict_['kpi_postgres_db'],
187 | 'POSTGRES_USER': dict_['postgres_user'],
188 | 'POSTGRES_PASSWORD': dict_['postgres_password'],
189 | 'DEBUG': dict_['debug'],
190 | 'SMTP_HOST': dict_['smtp_host'],
191 | 'SMTP_PORT': dict_['smtp_port'],
192 | 'SMTP_USER': dict_['smtp_user'],
193 | 'SMTP_PASSWORD': dict_['smtp_password'],
194 | 'SMTP_USE_TLS': dict_['smtp_use_tls'],
195 | 'DEFAULT_FROM_EMAIL': dict_['default_from_email'],
196 | 'PRIMARY_BACKEND_IP': dict_['primary_backend_ip'],
197 | 'LOCAL_INTERFACE_IP': dict_['local_interface_ip'],
198 | 'KPI_PATH': dict_['kpi_path'],
199 | 'USE_KPI_DEV_MODE': _get_value(
200 | 'kpi_path', true_value='#', false_value='', comparison_value=''
201 | ),
202 | 'KPI_DEV_BUILD_ID': dict_['kpi_dev_build_id'],
203 | 'NGINX_PUBLIC_PORT': (
204 | ''
205 | if dict_['exposed_nginx_docker_port'] == '80'
206 | else f":{dict_['exposed_nginx_docker_port']}"
207 | ),
208 | 'NGINX_EXPOSED_PORT': nginx_port,
209 | 'UWSGI_WORKERS_MAX': dict_['uwsgi_workers_max'],
210 | # Deactivate cheaper algorithm if defaults are 1 worker to start and
211 | # 2 maximum.
212 | 'UWSGI_WORKERS_START': (
213 | ''
214 | if dict_['uwsgi_workers_start'] == '1'
215 | and dict_['uwsgi_workers_max'] == '2'
216 | else dict_['uwsgi_workers_start']
217 | ),
218 | 'UWSGI_MAX_REQUESTS': dict_['uwsgi_max_requests'],
219 | 'UWSGI_SOFT_LIMIT': int(dict_['uwsgi_soft_limit']) * 1024 * 1024,
220 | 'UWSGI_HARAKIRI': dict_['uwsgi_harakiri'],
221 | 'UWSGI_WORKER_RELOAD_MERCY': dict_['uwsgi_worker_reload_mercy'],
222 | 'UWSGI_PASS_TIMEOUT': int(dict_['uwsgi_harakiri']) + 10,
223 | 'POSTGRES_REPLICATION_PASSWORD': dict_[
224 | 'postgres_replication_password'
225 | ],
226 | 'WSGI': 'runserver_plus' if config.dev_mode else 'uWSGI',
227 | 'USE_X_FORWARDED_HOST': '' if config.dev_mode else '#',
228 | 'OVERRIDE_POSTGRES_SETTINGS': _get_value('postgres_settings'),
229 | 'POSTGRES_APP_PROFILE': dict_['postgres_profile'],
230 | 'POSTGRES_RAM': dict_['postgres_ram'],
231 | 'POSTGRES_SETTINGS': dict_['postgres_settings_content'],
232 | 'POSTGRES_PORT': dict_['postgresql_port'],
233 | 'MONGO_PORT': dict_['mongo_port'],
234 | 'REDIS_MAIN_PORT': dict_['redis_main_port'],
235 | 'REDIS_CACHE_PORT': dict_['redis_cache_port'],
236 | 'REDIS_CACHE_MAX_MEMORY': dict_['redis_cache_max_memory'],
237 | 'USE_BACKUP': '' if dict_['use_backup'] else '#',
238 | 'USE_AWS_BACKUP': (
239 | ''
240 | if (
241 | config.aws
242 | and dict_['aws_backup_bucket_name'] != ''
243 | and dict_['use_backup']
244 | )
245 | else '#'
246 | ),
247 | 'USE_MEDIA_BACKUP': (
248 | '' if (not config.aws and dict_['use_backup']) else '#'
249 | ),
250 | 'KOBOCAT_MEDIA_BACKUP_SCHEDULE': dict_[
251 | 'kobocat_media_backup_schedule'
252 | ],
253 | 'MONGO_BACKUP_SCHEDULE': dict_['mongo_backup_schedule'],
254 | 'POSTGRES_BACKUP_SCHEDULE': dict_['postgres_backup_schedule'],
255 | 'REDIS_BACKUP_SCHEDULE': dict_['redis_backup_schedule'],
256 | 'AWS_BACKUP_BUCKET_NAME': dict_['aws_backup_bucket_name'],
257 | 'AWS_BACKUP_YEARLY_RETENTION': dict_['aws_backup_yearly_retention'],
258 | 'AWS_BACKUP_MONTHLY_RETENTION': dict_[
259 | 'aws_backup_monthly_retention'
260 | ],
261 | 'AWS_BACKUP_WEEKLY_RETENTION': dict_['aws_backup_weekly_retention'],
262 | 'AWS_BACKUP_DAILY_RETENTION': dict_['aws_backup_daily_retention'],
263 | 'AWS_MONGO_BACKUP_MINIMUM_SIZE': dict_[
264 | 'aws_mongo_backup_minimum_size'
265 | ],
266 | 'AWS_POSTGRES_BACKUP_MINIMUM_SIZE': dict_[
267 | 'aws_postgres_backup_minimum_size'
268 | ],
269 | 'AWS_REDIS_BACKUP_MINIMUM_SIZE': dict_[
270 | 'aws_redis_backup_minimum_size'
271 | ],
272 | 'AWS_BACKUP_UPLOAD_CHUNK_SIZE': dict_[
273 | 'aws_backup_upload_chunk_size'
274 | ],
275 | 'AWS_BACKUP_BUCKET_DELETION_RULE_ENABLED': _get_value(
276 | 'aws_backup_bucket_deletion_rule_enabled', 'True', 'False'
277 | ),
278 | 'LETSENCRYPT_EMAIL': dict_['letsencrypt_email'],
279 | 'MAINTENANCE_ETA': dict_['maintenance_eta'],
280 | 'MAINTENANCE_DATE_ISO': dict_['maintenance_date_iso'],
281 | 'MAINTENANCE_DATE_STR': dict_['maintenance_date_str'],
282 | 'MAINTENANCE_EMAIL': dict_['maintenance_email'],
283 | 'USE_NPM_FROM_HOST': (
284 | '' if (config.dev_mode and not dict_['npm_container']) else '#'
285 | ),
286 | 'DOCKER_NETWORK_BACKEND_PREFIX': config.get_prefix('backend'),
287 | 'DOCKER_NETWORK_FRONTEND_PREFIX': config.get_prefix('frontend'),
288 | 'USE_BACKEND_NETWORK': _get_value(
289 | 'expose_backend_ports', comparison_value=False
290 | ),
291 | 'EXPOSE_BACKEND_PORTS': _get_value('expose_backend_ports'),
292 | 'USE_FAKE_DNS': _get_value('local_installation'),
293 | 'ADD_BACKEND_EXTRA_HOSTS': (
294 | ''
295 | if (config.expose_backend_ports and not config.use_private_dns)
296 | else '#'
297 | ),
298 | 'USE_EXTRA_HOSTS': (
299 | ''
300 | if (
301 | config.local_install
302 | or config.expose_backend_ports
303 | and not config.use_private_dns
304 | )
305 | else '#'
306 | ),
307 | 'MONGO_ROOT_USERNAME': dict_['mongo_root_username'],
308 | 'MONGO_ROOT_PASSWORD': dict_['mongo_root_password'],
309 | 'MONGO_USER_USERNAME': dict_['mongo_user_username'],
310 | 'MONGO_USER_PASSWORD': dict_['mongo_user_password'],
311 | 'REDIS_PASSWORD': dict_['redis_password'],
312 | 'REDIS_PASSWORD_JS_ENCODED': json.dumps(dict_['redis_password']),
313 | 'USE_DEV_MODE': _get_value('dev_mode'),
314 | 'USE_CELERY': _get_value('use_celery', comparison_value=False),
315 | 'ENKETO_ALLOW_PRIVATE_IP_ADDRESS': _get_value(
316 | 'local_installation', true_value='true', false_value='false'
317 | ),
318 | 'USE_REDIS_CACHE_MAX_MEMORY': _get_value(
319 | 'redis_cache_max_memory',
320 | true_value='#',
321 | false_value='',
322 | comparison_value='',
323 | ),
324 | 'USE_LETSENSCRYPT': '#' if config.use_letsencrypt else '',
325 | 'DOCKER_COMPOSE_CMD': 'docker',
326 | # Keep leading space in front of suffix if any
327 | 'DOCKER_COMPOSE_SUFFIX': ' compose'
328 | }
329 |
330 | @staticmethod
331 | def _get_templates_path_parent():
332 | base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
333 | templates_path_parent = os.path.join(base_dir, 'templates')
334 | return templates_path_parent
335 |
336 | @staticmethod
337 | def __read_unique_id(destination_directory):
338 | """
339 | Reads unique id from file `Template.UNIQUE_ID_FILE`
340 | :return: str
341 | """
342 | unique_id = ''
343 |
344 | if os.path.isdir(destination_directory):
345 | try:
346 | unique_id_file = os.path.join(destination_directory,
347 | Template.UNIQUE_ID_FILE)
348 | with open(unique_id_file, 'r') as f:
349 | unique_id = f.read().strip()
350 | except IOError:
351 | pass
352 | else:
353 | unique_id = None
354 |
355 | return unique_id
356 |
357 | @staticmethod
358 | def __write_templates(
359 | template_variables_, root_, destination_directory_, filenames_
360 | ):
361 | for filename in fnmatch.filter(filenames_, '*.tpl'):
362 | with open(os.path.join(root_, filename), 'r') as template:
363 | t = ExtendedPyTemplate(template.read(), template_variables_)
364 | with open(
365 | os.path.join(destination_directory_, filename[:-4]), 'w'
366 | ) as f:
367 | f.write(t.substitute(template_variables_))
368 |
369 | @classmethod
370 | def __write_unique_id(cls, destination_directory, unique_id):
371 | try:
372 | unique_id_file = os.path.join(destination_directory,
373 | Template.UNIQUE_ID_FILE)
374 | # Ensure kobo-deployment is created.
375 | cls.__create_directory(destination_directory)
376 |
377 | with open(unique_id_file, 'w') as f:
378 | f.write(str(unique_id))
379 |
380 | os.chmod(unique_id_file, stat.S_IWRITE | stat.S_IREAD)
381 |
382 | except (IOError, OSError):
383 | CLI.colored_print('Could not write unique_id file', CLI.COLOR_ERROR)
384 | return False
385 |
386 | return True
387 |
388 |
389 | class ExtendedPyTemplate(PyTemplate):
390 | """
391 | Basic class to add conditional substitution to `string.Template`
392 |
393 | Usage example:
394 | ```
395 | {
396 | 'host': 'redis-cache.kobo.local',
397 | 'port': '6379'{% if REDIS_PASSWORD %},{% endif REDIS_PASSWORD %}
398 | {% if REDIS_PASSWORD %}
399 | 'password': ${REDIS_PASSWORD}
400 | {% endif REDIS_PASSWORD %}
401 | }
402 | ```
403 |
404 | If `REDIS_PASSWORD` equals '123456', output would be:
405 | ```
406 | {
407 | 'host': 'redis-cache.kobo.local',
408 | 'port': '6379',
409 | 'password': '123456'
410 | }
411 | ```
412 |
413 | If `REDIS_PASSWORD` equals '' (or `False` or `None`), output would be:
414 | ```
415 | {
416 | 'host': 'redis-cache.kobo.local',
417 | 'port': '6379'
418 |
419 | }
420 | ```
421 |
422 | """
423 | IF_PATTERN = '{{% if {} %}}'
424 | ENDIF_PATTERN = '{{% endif {} %}}'
425 |
426 | def __init__(self, template, template_variables_):
427 | for key, value in template_variables_.items():
428 | if self.IF_PATTERN.format(key) in template:
429 | if value:
430 | if_pattern = r'{}\s*'.format(self.IF_PATTERN.format(key))
431 | endif_pattern = r'\s*{}'.format(
432 | self.ENDIF_PATTERN.format(key))
433 | template = re.sub(if_pattern, '', template)
434 | template = re.sub(endif_pattern, '', template)
435 | else:
436 | pattern = r'{}(.|\s)*?{}'.format(
437 | self.IF_PATTERN.format(key),
438 | self.ENDIF_PATTERN.format(key))
439 | template = re.sub(pattern, '', template)
440 | super(ExtendedPyTemplate, self).__init__(template)
441 |
--------------------------------------------------------------------------------
/helpers/command.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import os
3 | import sys
4 | import time
5 | import subprocess
6 |
7 | from helpers.cli import CLI
8 | from helpers.config import Config
9 | from helpers.network import Network
10 | from helpers.template import Template
11 | from helpers.upgrading import Upgrading
12 | from helpers.utils import run_docker_compose
13 |
14 |
15 | class Command:
16 |
17 | @staticmethod
18 | def help():
19 | output = [
20 | 'Usage: python3 run.py [options]',
21 | '',
22 | ' Options:',
23 | ' -i, --info',
24 | ' Show KoboToolbox Url and super user credentials',
25 | ' -l, --logs',
26 | ' Display docker logs',
27 | ' -b, --build',
28 | ' Build django (kpi) container (only on dev/staging mode)',
29 | ' -s, --setup',
30 | ' Prompt questions to (re)write configuration files',
31 | ' -S, --stop',
32 | ' Stop KoboToolbox',
33 | ' -u, --update, --upgrade [branch or tag]',
34 | ' Update KoboToolbox',
35 | ' -cf, --compose-frontend [docker-compose arguments]',
36 | ' Run a docker-compose command in the front-end '
37 | 'environment',
38 | ' -cb, --compose-backend [docker-compose arguments]',
39 | ' Run a docker-compose command in the back-end '
40 | 'environment',
41 | ' -m, --maintenance',
42 | ' Activate maintenance mode. All traffic is '
43 | 'redirected to maintenance page',
44 | ' -sm, --stop-maintenance',
45 | ' Stop maintenance mode',
46 | ' -v, --version',
47 | ' Display current version',
48 | ''
49 | ]
50 | print('\n'.join(output))
51 |
52 | @classmethod
53 | def build(cls):
54 | """
55 | Builds kpi image with `--no-caches` option
56 | """
57 | config = Config()
58 | dict_ = config.get_dict()
59 |
60 | if config.dev_mode or config.staging_mode:
61 |
62 | prefix = config.get_prefix('frontend')
63 | timestamp = int(time.time())
64 | dict_['kpi_dev_build_id'] = f'{prefix}{timestamp}'
65 | config.write_config()
66 | Template.render(config)
67 | frontend_command = run_docker_compose(dict_, [
68 | '-f', 'docker-compose.frontend.yml',
69 | '-f', 'docker-compose.frontend.override.yml',
70 | '-p', config.get_prefix('frontend'),
71 | 'build', '--force-rm', '--no-cache', 'kpi'
72 | ])
73 | CLI.run_command(frontend_command, dict_['kobodocker_path'])
74 |
75 | @classmethod
76 | def compose_frontend(cls, args):
77 | config = Config()
78 | dict_ = config.get_dict()
79 | command = run_docker_compose(dict_, [
80 | '-f', 'docker-compose.frontend.yml',
81 | '-f', 'docker-compose.frontend.override.yml',
82 | '-p', config.get_prefix('frontend')
83 | ])
84 |
85 | cls.__validate_custom_yml(config, command)
86 | command.extend(args)
87 | subprocess.call(command, cwd=dict_['kobodocker_path'])
88 |
89 | @classmethod
90 | def compose_backend(cls, args):
91 | config = Config()
92 | dict_ = config.get_dict()
93 | command = run_docker_compose(dict_, [
94 | '-f', f'docker-compose.backend.yml',
95 | '-f', f'docker-compose.backend.override.yml',
96 | '-p', config.get_prefix('backend')
97 | ])
98 | cls.__validate_custom_yml(config, command)
99 | command.extend(args)
100 | subprocess.call(command, cwd=dict_['kobodocker_path'])
101 |
102 | @classmethod
103 | def info(cls, timeout=600):
104 | config = Config()
105 | dict_ = config.get_dict()
106 |
107 | nginx_port = dict_['exposed_nginx_docker_port']
108 |
109 | main_url = '{}://{}.{}{}'.format(
110 | 'https' if dict_['https'] else 'http',
111 | dict_['kpi_subdomain'],
112 | dict_['public_domain_name'],
113 | ':{}'.format(nginx_port) if (
114 | nginx_port and
115 | str(nginx_port) != Config.DEFAULT_NGINX_PORT
116 | ) else ''
117 | )
118 |
119 | stop = False
120 | start = int(time.time())
121 | success = False
122 | hostname = f"{dict_['kpi_subdomain']}.{dict_['public_domain_name']}"
123 | https = dict_['https']
124 | nginx_port = int(Config.DEFAULT_NGINX_HTTPS_PORT) \
125 | if https else int(dict_['exposed_nginx_docker_port'])
126 | already_retried = False
127 | while not stop:
128 | if Network.status_check(hostname,
129 | '/service_health/',
130 | nginx_port, https) == Network.STATUS_OK_200:
131 | stop = True
132 | success = True
133 | elif int(time.time()) - start >= timeout:
134 | if timeout > 0:
135 | CLI.colored_print(
136 | '\n`KoboToolbox` has not started yet. '
137 | 'This can happen with low CPU/RAM computers.\n',
138 | CLI.COLOR_INFO)
139 | question = f'Wait for another {timeout} seconds?'
140 | response = CLI.yes_no_question(question)
141 | if response:
142 | start = int(time.time())
143 | continue
144 | else:
145 | if not already_retried:
146 | already_retried = True
147 | CLI.colored_print(
148 | '\nSometimes front-end containers cannot '
149 | 'communicate with back-end containers.\n'
150 | 'Restarting the front-end containers usually '
151 | 'fixes it.\n', CLI.COLOR_INFO)
152 | question = 'Would you like to try?'
153 | response = CLI.yes_no_question(question)
154 | if response:
155 | start = int(time.time())
156 | cls.restart_frontend()
157 | continue
158 | stop = True
159 | else:
160 | sys.stdout.write('.')
161 | sys.stdout.flush()
162 | time.sleep(10)
163 |
164 | # Create a new line
165 | print('')
166 |
167 | if success:
168 | username = dict_['super_user_username']
169 | password = dict_['super_user_password']
170 |
171 | message = (
172 | 'Ready\n'
173 | f'URL: {main_url}\n'
174 | f'User: {username}\n'
175 | f'Password: {password}'
176 | )
177 | CLI.framed_print(message,
178 | color=CLI.COLOR_SUCCESS)
179 |
180 | else:
181 | message = (
182 | 'KoboToolbox could not start!\n'
183 | 'Please try `python3 run.py --logs` to see the logs.'
184 | )
185 | CLI.framed_print(message, color=CLI.COLOR_ERROR)
186 |
187 | return success
188 |
189 | @classmethod
190 | def logs(cls):
191 | config = Config()
192 | dict_ = config.get_dict()
193 |
194 | if config.backend:
195 | backend_command = run_docker_compose(dict_, [
196 | '-f', f'docker-compose.backend.yml',
197 | '-f', f'docker-compose.backend.override.yml',
198 | '-p', config.get_prefix('backend'),
199 | 'logs', '-f'
200 | ])
201 | cls.__validate_custom_yml(config, backend_command)
202 | CLI.run_command(backend_command, dict_['kobodocker_path'], True)
203 |
204 | if config.frontend:
205 | frontend_command = run_docker_compose(dict_, [
206 | '-f', 'docker-compose.frontend.yml',
207 | '-f', 'docker-compose.frontend.override.yml',
208 | '-p', config.get_prefix('frontend'),
209 | 'logs', '-f',
210 | ])
211 |
212 | cls.__validate_custom_yml(config, frontend_command)
213 | CLI.run_command(frontend_command, dict_['kobodocker_path'], True)
214 |
215 | @classmethod
216 | def configure_maintenance(cls):
217 | config = Config()
218 | dict_ = config.get_dict()
219 |
220 | if not config.multi_servers or config.frontend:
221 |
222 | config.maintenance()
223 | Template.render_maintenance(config)
224 | dict_['maintenance_enabled'] = True
225 | config.write_config()
226 | cls.stop_nginx()
227 | cls.start_maintenance()
228 |
229 | @classmethod
230 | def stop_nginx(cls):
231 | config = Config()
232 | dict_ = config.get_dict()
233 |
234 | nginx_stop_command = run_docker_compose(dict_, [
235 | '-f', 'docker-compose.frontend.yml',
236 | '-f', 'docker-compose.frontend.override.yml',
237 | '-p', config.get_prefix('frontend'),
238 | 'stop', 'nginx',
239 | ])
240 |
241 | cls.__validate_custom_yml(config, nginx_stop_command)
242 | CLI.run_command(nginx_stop_command, dict_['kobodocker_path'])
243 |
244 | @classmethod
245 | def start_maintenance(cls):
246 | config = Config()
247 | dict_ = config.get_dict()
248 |
249 | frontend_command = run_docker_compose(dict_, [
250 | '-f', 'docker-compose.maintenance.yml',
251 | '-f', 'docker-compose.maintenance.override.yml',
252 | '-p', config.get_prefix('maintenance'),
253 | 'up', '-d',
254 | ])
255 |
256 | CLI.run_command(frontend_command, dict_['kobodocker_path'])
257 | CLI.colored_print('Maintenance mode has been started',
258 | CLI.COLOR_SUCCESS)
259 |
260 | @classmethod
261 | def restart_frontend(cls):
262 | cls.start(frontend_only=True)
263 |
264 | @classmethod
265 | def start(cls, frontend_only=False, force_setup=False):
266 | config = Config()
267 | dict_ = config.get_dict()
268 |
269 | cls.stop(output=False, frontend_only=frontend_only)
270 | if frontend_only:
271 | CLI.colored_print('Launching front-end containers', CLI.COLOR_INFO)
272 | else:
273 | CLI.colored_print('Launching environment', CLI.COLOR_INFO)
274 |
275 | # Test if ports are available
276 | ports = []
277 | if config.proxy:
278 | nginx_port = int(dict_['nginx_proxy_port'])
279 | else:
280 | nginx_port = int(dict_['exposed_nginx_docker_port'])
281 |
282 | if frontend_only or config.frontend or not config.multi_servers:
283 | ports.append(nginx_port)
284 |
285 | if not frontend_only and config.expose_backend_ports and config.backend:
286 | ports.append(dict_['postgresql_port'])
287 | ports.append(dict_['mongo_port'])
288 | ports.append(dict_['redis_main_port'])
289 | ports.append(dict_['redis_cache_port'])
290 |
291 | for port in ports:
292 | if Network.is_port_open(port):
293 | CLI.colored_print(f'Port {port} is already open. '
294 | 'KoboToolbox cannot start',
295 | CLI.COLOR_ERROR)
296 | sys.exit(1)
297 |
298 | # Start the back-end containers
299 | if not frontend_only and config.backend:
300 |
301 | backend_command = run_docker_compose(dict_, [
302 | '-f', f'docker-compose.backend.yml',
303 | '-f', f'docker-compose.backend.override.yml',
304 | '-p', config.get_prefix('backend'),
305 | 'up', '-d'
306 | ])
307 |
308 | cls.__validate_custom_yml(config, backend_command)
309 | CLI.run_command(backend_command, dict_['kobodocker_path'])
310 |
311 | # Start the front-end containers
312 | if config.frontend:
313 |
314 | # If this was previously a shared-database setup, migrate to
315 | # separate databases for KPI and KoboCAT
316 | Upgrading.migrate_single_to_two_databases(config)
317 |
318 | frontend_command = run_docker_compose(dict_, [
319 | '-f', 'docker-compose.frontend.yml',
320 | '-f', 'docker-compose.frontend.override.yml',
321 | '-p', config.get_prefix('frontend'),
322 | 'up', '-d',
323 | ])
324 |
325 | if dict_['maintenance_enabled']:
326 | cls.start_maintenance()
327 | # Start all front-end services except the non-maintenance NGINX
328 | frontend_command.extend([
329 | s for s in config.get_service_names() if s != 'nginx'
330 | ])
331 |
332 | cls.__validate_custom_yml(config, frontend_command)
333 | CLI.run_command(frontend_command, dict_['kobodocker_path'])
334 |
335 | # Start reverse proxy if user uses it.
336 | if config.use_letsencrypt:
337 | if force_setup:
338 | # Let's Encrypt NGINX container needs kobo-docker NGINX
339 | # container to be started first
340 | config.init_letsencrypt()
341 |
342 | proxy_command = run_docker_compose(dict_, ['up', '-d'])
343 | CLI.run_command(
344 | proxy_command, config.get_letsencrypt_repo_path()
345 | )
346 |
347 | if dict_['maintenance_enabled']:
348 | CLI.colored_print(
349 | 'Maintenance mode is enabled. To resume '
350 | 'normal operation, use `--stop-maintenance`',
351 | CLI.COLOR_INFO,
352 | )
353 | elif not frontend_only:
354 | if not config.multi_servers or config.frontend:
355 | CLI.colored_print('Waiting for environment to be ready. '
356 | 'It can take a few minutes.', CLI.COLOR_INFO)
357 | cls.info()
358 | else:
359 | CLI.colored_print(
360 | (f'Back-end server is starting up '
361 | 'and should be up & running soon!\nPlease look at docker '
362 | 'logs for further information: '
363 | '`python3 run.py -cb logs -f`'),
364 | CLI.COLOR_WARNING)
365 |
366 | @classmethod
367 | def stop(cls, output=True, frontend_only=False):
368 | """
369 | Stop containers.
370 | Because containers share the same network, containers must be stopped
371 | first, then "down-ed" to remove any attached internal networks.
372 | The order must respected to avoid removing networks with active endpoints.
373 | """
374 | config = Config()
375 |
376 | if not config.multi_servers or config.frontend:
377 | # Stop maintenance container in case it's up&running
378 | cls.stop_containers('maintenance')
379 |
380 | # Stop reverse proxy if user uses it.
381 | if config.use_letsencrypt:
382 | cls.stop_containers('certbot')
383 |
384 | # Stop down front-end containers
385 | cls.stop_containers('frontend')
386 |
387 | # Clean maintenance services
388 | cls.stop_containers('maintenance', down=True)
389 |
390 | # Clean certbot services if user uses it.
391 | if config.use_letsencrypt:
392 | cls.stop_containers('certbot', down=True)
393 |
394 | if not frontend_only and config.backend:
395 | cls.stop_containers('backend', down=True)
396 |
397 | # Clean front-end services
398 | if not config.multi_servers or config.frontend:
399 | cls.stop_containers('frontend', down=True)
400 |
401 | if output:
402 | CLI.colored_print('KoboToolbox has been stopped', CLI.COLOR_SUCCESS)
403 |
404 | @classmethod
405 | def stop_containers(cls, group: str, down: bool = False):
406 |
407 | config = Config()
408 | dict_ = config.get_dict()
409 |
410 | if group not in ['frontend', 'backend', 'certbot', 'maintenance']:
411 | raise Exception('Unknown group')
412 |
413 | group_docker_maps = {
414 | 'frontend': {
415 | 'options': [
416 | '-f', 'docker-compose.frontend.yml',
417 | '-f', 'docker-compose.frontend.override.yml',
418 | '-p', config.get_prefix('frontend'),
419 | ],
420 | 'custom_yml': True,
421 | },
422 | 'backend': {
423 | 'options': [
424 | '-f', f'docker-compose.backend.yml',
425 | '-f', f'docker-compose.backend.override.yml',
426 | '-p', config.get_prefix('backend'),
427 | ],
428 | 'custom_yml': True,
429 | },
430 | 'certbot': {
431 | 'options': [],
432 | 'custom_yml': False,
433 | 'path': config.get_letsencrypt_repo_path(),
434 | },
435 | 'maintenance': {
436 | 'options': [
437 | '-f', 'docker-compose.maintenance.yml',
438 | '-f', 'docker-compose.maintenance.override.yml',
439 | '-p', config.get_prefix('maintenance'),
440 | ],
441 | 'custom_yml': False,
442 | }
443 | }
444 |
445 | path = group_docker_maps[group].get('path', dict_['kobodocker_path'])
446 | mode = 'stop' if not down else 'down'
447 | options = group_docker_maps[group]['options']
448 | command = run_docker_compose(dict_, options + [mode])
449 | if group_docker_maps[group]['custom_yml']:
450 | cls.__validate_custom_yml(config, command)
451 |
452 | CLI.run_command(command, path)
453 |
454 | @classmethod
455 | def stop_maintenance(cls):
456 | """
457 | Stop maintenance mode
458 | """
459 | config = Config()
460 | dict_ = config.get_dict()
461 |
462 | if not config.multi_servers or config.frontend:
463 | # Stop maintenance container in case it's up&running
464 | cls.stop_containers('maintenance')
465 |
466 | # Create and start NGINX container
467 | frontend_command = run_docker_compose(dict_, [
468 | '-f', 'docker-compose.frontend.yml',
469 | '-f', 'docker-compose.frontend.override.yml',
470 | '-p', config.get_prefix('frontend'),
471 | 'up', '-d',
472 | 'nginx',
473 | ])
474 |
475 | cls.__validate_custom_yml(config, frontend_command)
476 | CLI.run_command(frontend_command, dict_['kobodocker_path'])
477 |
478 | CLI.colored_print('Maintenance mode has been stopped',
479 | CLI.COLOR_SUCCESS)
480 |
481 | dict_['maintenance_enabled'] = False
482 | config.write_config()
483 |
484 | @classmethod
485 | def version(cls):
486 | git_commit_version_command = ['git', 'rev-parse', 'HEAD']
487 | stdout = CLI.run_command(git_commit_version_command)
488 | build = stdout.strip()[0:7]
489 | version = Config.KOBO_INSTALL_VERSION
490 | CLI.colored_print(
491 | f'kobo-install Version: {version} (build {build})',
492 | CLI.COLOR_SUCCESS,
493 | )
494 |
495 | @staticmethod
496 | def __validate_custom_yml(config, command):
497 | """
498 | Validate whether docker-compose must start the containers with a
499 | custom YML file in addition to the default. If the file does not yet exist,
500 | kobo-install is paused until the user creates it and resumes the setup manually.
501 |
502 | If user has chosen to use a custom YML file, it is injected into `command`
503 | before being executed.
504 | """
505 | dict_ = config.get_dict()
506 | frontend_command = True
507 | # Detect if it's a front-end command or back-end command
508 | for part in command:
509 | if 'backend' in part:
510 | frontend_command = False
511 | break
512 |
513 | start_index = 6 # len of command `docker` + extra space
514 | if frontend_command and dict_['use_frontend_custom_yml']:
515 | custom_file = '{}/docker-compose.frontend.custom.yml'.format(
516 | dict_['kobodocker_path']
517 | )
518 |
519 | does_custom_file_exist = os.path.exists(custom_file)
520 | while not does_custom_file_exist:
521 | message = (
522 | 'Please create your custom configuration in\n'
523 | '`{custom_file}`.'
524 | ).format(custom_file=custom_file)
525 | CLI.framed_print(message, color=CLI.COLOR_INFO, columns=90)
526 | input('Press any key when it is done...')
527 | does_custom_file_exist = os.path.exists(custom_file)
528 |
529 | # Add custom file to docker-compose command
530 | command.insert(start_index, '-f')
531 | command.insert(start_index + 1, 'docker-compose.frontend.custom.yml')
532 |
533 | if not frontend_command and dict_['use_backend_custom_yml']:
534 | custom_file = '{}/docker-compose.backend.custom.yml'.format(
535 | dict_['kobodocker_path'],
536 | )
537 |
538 | does_custom_file_exist = os.path.exists(custom_file)
539 | while not does_custom_file_exist:
540 | message = (
541 | 'Please create your custom configuration in\n'
542 | '`{custom_file}`.'
543 | ).format(custom_file=custom_file)
544 | CLI.framed_print(message, color=CLI.COLOR_INFO, columns=90)
545 | input('Press any key when it is done...')
546 | does_custom_file_exist = os.path.exists(custom_file)
547 |
548 | # Add custom file to docker-compose command
549 | command.insert(start_index, '-f')
550 | command.insert(
551 | start_index + 1,
552 | 'docker-compose.backend.custom.yml',
553 | )
554 |
--------------------------------------------------------------------------------
/tests/test_config.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import os
3 | import pytest
4 | import random
5 | import shutil
6 | import tempfile
7 | import time
8 | from unittest.mock import patch, MagicMock
9 |
10 | from helpers.cli import CLI
11 | from helpers.config import Config
12 | from .utils import (
13 | mock_read_config as read_config,
14 | mock_write_trigger_upsert_db_users,
15 | MockAWSValidation
16 | )
17 |
18 | CHOICE_YES = '1'
19 | CHOICE_NO = '2'
20 |
21 |
22 | def test_read_config():
23 | read_config()
24 |
25 |
26 | def test_advanced_options():
27 | config = read_config()
28 | with patch.object(CLI, 'colored_input',
29 | return_value=CHOICE_YES) as mock_ci:
30 | config._Config__questions_advanced_options()
31 | assert config.advanced_options
32 |
33 | with patch.object(CLI, 'colored_input',
34 | return_value=CHOICE_NO) as mock_ci:
35 | config._Config__questions_advanced_options()
36 | assert not config.advanced_options
37 |
38 |
39 | def test_installation():
40 | config = read_config()
41 | with patch.object(CLI, 'colored_input',
42 | return_value=CHOICE_NO) as mock_ci:
43 | config._Config__questions_installation_type()
44 | assert not config.local_install
45 |
46 | with patch.object(CLI, 'colored_input',
47 | return_value=CHOICE_YES) as mock_ci:
48 | config._Config__questions_installation_type()
49 | assert config.local_install
50 | assert not config.multi_servers
51 | assert not config.use_letsencrypt
52 |
53 | return config
54 |
55 |
56 | @patch('helpers.config.Config._Config__clone_repo',
57 | MagicMock(return_value=True))
58 | def test_staging_mode():
59 | config = read_config()
60 | kpi_repo_path = tempfile.mkdtemp()
61 |
62 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input:
63 | mock_colored_input.side_effect = iter([CHOICE_YES, kpi_repo_path])
64 | config._Config__questions_dev_mode()
65 | dict_ = config.get_dict()
66 | assert not config.dev_mode
67 | assert config.staging_mode
68 | assert dict_['kpi_path'] == kpi_repo_path
69 | shutil.rmtree(kpi_repo_path)
70 |
71 |
72 | @patch('helpers.config.Config._Config__clone_repo', MagicMock(return_value=True))
73 | def test_dev_mode():
74 | config = test_installation()
75 |
76 | kpi_repo_path = tempfile.mkdtemp()
77 |
78 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input:
79 | mock_colored_input.side_effect = iter(
80 | [
81 | '8080',
82 | CHOICE_YES,
83 | CHOICE_NO,
84 | kpi_repo_path,
85 | CHOICE_YES,
86 | CHOICE_NO,
87 | ]
88 | )
89 |
90 | config._Config__questions_dev_mode()
91 | dict_ = config.get_dict()
92 | assert config.dev_mode
93 | assert not config.staging_mode
94 | assert config.get_dict().get('exposed_nginx_docker_port') == '8080'
95 | assert dict_['kpi_path'] == kpi_repo_path
96 | assert dict_['npm_container'] is False
97 | assert dict_['use_celery'] is False
98 |
99 | shutil.rmtree(kpi_repo_path)
100 |
101 | with patch.object(CLI, 'colored_input', return_value=CHOICE_NO) as mock_ci:
102 | config._Config__questions_dev_mode()
103 | dict_ = config.get_dict()
104 | assert not config.dev_mode
105 | assert dict_['kpi_path'] == ''
106 |
107 |
108 | def test_server_roles_questions():
109 | config = read_config()
110 | assert config.frontend
111 | assert config.backend
112 |
113 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input:
114 | mock_colored_input.side_effect = iter(
115 | [CHOICE_YES, 'frontend', 'backend'])
116 |
117 | config._Config__questions_multi_servers()
118 |
119 | config._Config__questions_roles()
120 | assert config.frontend
121 | assert not config.backend
122 |
123 | config._Config__questions_roles()
124 | assert not config.frontend
125 | assert config.backend
126 |
127 |
128 | def test_session_cookies():
129 | config = read_config()
130 |
131 | assert config._Config__dict['django_session_cookie_age'] == 604800
132 |
133 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input_1:
134 | mock_colored_input_1.side_effect = iter([
135 | 'None', # Wrong, should ask again
136 | '', # Wrong, should ask again
137 | '1' # Correct, will continue
138 | ])
139 | config._Config__questions_session_cookies()
140 | assert config._Config__dict['django_session_cookie_age'] == 3600
141 |
142 |
143 | def test_use_https():
144 | config = read_config()
145 |
146 | assert config.is_secure
147 |
148 | with patch.object(CLI, 'colored_input',
149 | return_value=CHOICE_YES) as mock_ci:
150 | config._Config__questions_https()
151 | assert not config.local_install
152 | assert config.is_secure
153 |
154 | with patch.object(CLI, 'colored_input',
155 | return_value=CHOICE_YES) as mock_ci:
156 | config._Config__questions_installation_type()
157 | assert config.local_install
158 | assert not config.is_secure
159 |
160 |
161 | def _aws_validation_setup():
162 | config = read_config()
163 |
164 | assert not config._Config__dict['use_aws']
165 | assert not config._Config__dict['aws_credentials_valid']
166 |
167 | return config
168 |
169 |
170 | def test_aws_credentials_invalid_with_no_configuration():
171 | config = _aws_validation_setup()
172 |
173 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input:
174 | mock_colored_input.side_effect = CHOICE_NO
175 | assert not config._Config__dict['use_aws']
176 | assert not config._Config__dict['aws_credentials_valid']
177 |
178 |
179 | def test_aws_validation_fails_with_system_exit():
180 | config = _aws_validation_setup()
181 |
182 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input:
183 | mock_colored_input.side_effect = iter(
184 | [
185 | CHOICE_YES, # Yes, Use AWS Storage
186 | '', # Empty Access Key
187 | '', # Empty Secret Key
188 | '', # Empty Bucket Name
189 | '', # Empty Region Name
190 | CHOICE_YES, # Yes, validate AWS credentials
191 | '', # Empty Access Key
192 | '', # Empty Secret Key
193 | '', # Empty Bucket Name
194 | '', # Empty Region Name
195 | # it failed, let's try one more time
196 | '', # Empty Access Key
197 | '', # Empty Secret Key
198 | '', # Empty Bucket Name
199 | '', # Empty Region Name
200 | ]
201 | )
202 | try:
203 | config._Config__questions_aws()
204 | except SystemExit:
205 | pass
206 | assert not config._Config__dict['aws_credentials_valid']
207 |
208 |
209 | def test_aws_invalid_credentials_continue_without_validation():
210 | config = _aws_validation_setup()
211 |
212 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input:
213 | mock_colored_input.side_effect = iter([CHOICE_YES, '', '', '', '', CHOICE_NO])
214 | config._Config__questions_aws()
215 | assert not config._Config__dict['aws_credentials_valid']
216 |
217 |
218 | @patch('helpers.aws_validation.AWSValidation.validate_credentials',
219 | new=MockAWSValidation.validate_credentials)
220 | def test_aws_validation_passes_with_valid_credentials():
221 | config = _aws_validation_setup()
222 |
223 | # correct keys, no validation, should continue without issue
224 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input:
225 | mock_colored_input.side_effect = iter(
226 | [
227 | CHOICE_YES,
228 | 'test_access_key',
229 | 'test_secret_key',
230 | 'test_bucket_name',
231 | 'test_region_name',
232 | CHOICE_NO,
233 | ]
234 | )
235 | config._Config__questions_aws()
236 | assert not config._Config__dict['aws_credentials_valid']
237 |
238 | # correct keys in first attempt, choose to validate, continue
239 | # without issue
240 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input:
241 | config._Config__dict['aws_credentials_valid'] = False
242 | mock_colored_input.side_effect = iter(
243 | [
244 | CHOICE_YES,
245 | 'test_access_key',
246 | 'test_secret_key',
247 | 'test_bucket_name',
248 | 'test_region_name',
249 | CHOICE_YES,
250 | ]
251 | )
252 | config._Config__questions_aws()
253 | assert config._Config__dict['aws_credentials_valid']
254 |
255 | # correct keys in second attempt, choose to validate, continue
256 | # without issue
257 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input:
258 | config._Config__dict['aws_credentials_valid'] = False
259 | mock_colored_input.side_effect = iter(
260 | [
261 | CHOICE_YES,
262 | '',
263 | '',
264 | '',
265 | '',
266 | CHOICE_YES,
267 | 'test_access_key',
268 | 'test_secret_key',
269 | 'test_bucket_name',
270 | 'test_region_name',
271 | ]
272 | )
273 | config._Config__questions_aws()
274 | assert config._Config__dict['aws_credentials_valid']
275 |
276 | # correct keys in third attempt, choose to validate, continue
277 | # without issue
278 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input:
279 | config._Config__dict['aws_credentials_valid'] = False
280 | mock_colored_input.side_effect = iter(
281 | [
282 | CHOICE_YES,
283 | '',
284 | '',
285 | '',
286 | '',
287 | CHOICE_YES,
288 | '',
289 | '',
290 | '',
291 | '',
292 | 'test_access_key',
293 | 'test_secret_key',
294 | 'test_bucket_name',
295 | 'test_region_name',
296 | ]
297 | )
298 | config._Config__questions_aws()
299 | assert config._Config__dict['aws_credentials_valid']
300 |
301 |
302 | @patch('helpers.config.Config._Config__clone_repo',
303 | MagicMock(return_value=True))
304 | def test_proxy_letsencrypt():
305 | config = read_config()
306 |
307 | assert config.proxy
308 | assert config.use_letsencrypt
309 |
310 | # Force custom exposed port
311 | config._Config__dict['exposed_nginx_docker_port'] = '8088'
312 |
313 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input:
314 | # Use default options
315 | mock_colored_input.side_effect = iter(
316 | [CHOICE_YES, 'test@test.com', CHOICE_YES, Config.DEFAULT_NGINX_PORT]
317 | )
318 | config._Config__questions_reverse_proxy()
319 | dict_ = config.get_dict()
320 | assert config.proxy
321 | assert config.use_letsencrypt
322 | assert config.block_common_http_ports
323 | assert dict_['nginx_proxy_port'] == Config.DEFAULT_PROXY_PORT
324 | assert dict_['exposed_nginx_docker_port'] == Config.DEFAULT_NGINX_PORT
325 |
326 |
327 | def test_proxy_no_letsencrypt_advanced():
328 | config = read_config()
329 | # Force advanced options
330 | config._Config__dict['advanced'] = True
331 | assert config.advanced_options
332 | assert config.proxy
333 | assert config.use_letsencrypt
334 | proxy_port = Config.DEFAULT_NGINX_PORT
335 |
336 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input:
337 | mock_colored_input.side_effect = iter(
338 | [CHOICE_NO, CHOICE_NO, proxy_port])
339 | config._Config__questions_reverse_proxy()
340 | dict_ = config.get_dict()
341 | assert config.proxy
342 | assert not config.use_letsencrypt
343 | assert not config.block_common_http_ports
344 | assert dict_['nginx_proxy_port'] == proxy_port
345 |
346 |
347 | def test_proxy_no_letsencrypt():
348 | config = read_config()
349 |
350 | assert config.proxy
351 | assert config.use_letsencrypt
352 |
353 | with patch.object(CLI, 'colored_input',
354 | return_value=CHOICE_NO) as mock_ci:
355 | config._Config__questions_reverse_proxy()
356 | dict_ = config.get_dict()
357 | assert config.proxy
358 | assert not config.use_letsencrypt
359 | assert config.block_common_http_ports
360 | assert dict_['nginx_proxy_port'] == Config.DEFAULT_PROXY_PORT
361 |
362 |
363 | def test_proxy_no_letsencrypt_retains_custom_nginx_proxy_port():
364 | custom_proxy_port = 9090
365 | config = read_config(overrides={
366 | 'advanced': True,
367 | 'use_letsencrypt': False,
368 | 'nginx_proxy_port': str(custom_proxy_port),
369 | })
370 | with patch.object(
371 | CLI, 'colored_input',
372 | new=classmethod(lambda cls, message, color, default: default)
373 | ) as mock_ci:
374 | config._Config__questions_reverse_proxy()
375 | dict_ = config.get_dict()
376 | assert dict_['nginx_proxy_port'] == str(custom_proxy_port)
377 |
378 |
379 | def test_no_proxy_no_ssl():
380 | config = read_config()
381 | dict_ = config.get_dict()
382 | assert config.is_secure
383 | assert dict_['nginx_proxy_port'] == Config.DEFAULT_PROXY_PORT
384 |
385 | proxy_port = Config.DEFAULT_NGINX_PORT
386 |
387 | with patch.object(CLI, 'colored_input',
388 | return_value=CHOICE_NO) as mock_ci:
389 | config._Config__questions_https()
390 | assert not config.is_secure
391 |
392 | with patch.object(CLI, 'colored_input',
393 | return_value=CHOICE_NO) as mock_ci_2:
394 | config._Config__questions_reverse_proxy()
395 | dict_ = config.get_dict()
396 | assert not config.proxy
397 | assert not config.use_letsencrypt
398 | assert not config.block_common_http_ports
399 | assert dict_['nginx_proxy_port'] == proxy_port
400 |
401 |
402 | def test_proxy_no_ssl_advanced():
403 | config = read_config()
404 | # Force advanced options
405 | config._Config__dict['advanced'] = True
406 | assert config.advanced_options
407 | assert config.is_secure
408 |
409 | with patch.object(CLI, 'colored_input',
410 | return_value=CHOICE_NO) as mock_ci:
411 | config._Config__questions_https()
412 | assert not config.is_secure
413 |
414 | # Proxy - not on the same server
415 | proxy_port = Config.DEFAULT_NGINX_PORT
416 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input_1:
417 | mock_colored_input_1.side_effect = iter(
418 | [CHOICE_YES, CHOICE_NO, proxy_port])
419 | config._Config__questions_reverse_proxy()
420 | dict_ = config.get_dict()
421 | assert config.proxy
422 | assert not config.use_letsencrypt
423 | assert not config.block_common_http_ports
424 | assert dict_['nginx_proxy_port'] == proxy_port
425 |
426 | # Proxy - on the same server
427 | proxy_port = Config.DEFAULT_PROXY_PORT
428 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input_2:
429 | mock_colored_input_2.side_effect = iter(
430 | [CHOICE_YES, CHOICE_YES, proxy_port])
431 | config._Config__questions_reverse_proxy()
432 | dict_ = config.get_dict()
433 | assert config.proxy
434 | assert not config.use_letsencrypt
435 | assert config.block_common_http_ports
436 | assert dict_['nginx_proxy_port'] == proxy_port
437 |
438 |
439 | def test_port_allowed():
440 | config = read_config()
441 | # Use let's encrypt by default
442 | assert not config._Config__is_port_allowed(Config.DEFAULT_NGINX_PORT)
443 | assert not config._Config__is_port_allowed('443')
444 | assert config._Config__is_port_allowed(Config.DEFAULT_PROXY_PORT)
445 |
446 | # Don't use let's encrypt
447 | config._Config__dict['use_letsencrypt'] = False
448 | config._Config__dict['block_common_http_ports'] = False
449 | assert config._Config__is_port_allowed(Config.DEFAULT_NGINX_PORT)
450 | assert config._Config__is_port_allowed('443')
451 |
452 |
453 | def test_create_directory():
454 | config = read_config()
455 | destination_path = tempfile.mkdtemp()
456 |
457 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input:
458 | mock_colored_input.side_effect = iter([destination_path, CHOICE_YES])
459 | config._Config__create_directory()
460 | dict_ = config.get_dict()
461 | assert dict_['kobodocker_path'] == destination_path
462 |
463 | shutil.rmtree(destination_path)
464 |
465 |
466 | @patch('helpers.config.Config.write_config', new=lambda *a, **k: None)
467 | def test_maintenance():
468 | config = read_config()
469 |
470 | # First time
471 | with pytest.raises(SystemExit) as pytest_wrapped_e:
472 | config.maintenance()
473 | assert pytest_wrapped_e.type == SystemExit
474 | assert pytest_wrapped_e.value.code == 1
475 |
476 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input_1:
477 | mock_colored_input_1.side_effect = iter([
478 | '2hours', # Wrong value, it should ask again
479 | '2 hours', # OK
480 | '', # Wrong value, it should ask again
481 | '20190101T0200', # OK
482 | 'email@example.com'
483 | ])
484 | config._Config__dict['date_created'] = time.time()
485 | config._Config__first_time = False
486 | config.maintenance()
487 | dict_ = config.get_dict()
488 | expected_str = 'Tuesday, January 01 ' \
489 | 'at 02:00 GMT'
490 | assert dict_['maintenance_date_str'] == expected_str
491 |
492 |
493 | def test_exposed_ports():
494 | config = read_config()
495 | with patch.object(CLI, 'colored_input',
496 | return_value=CHOICE_YES) as mock_ci:
497 | # Choose multi servers options
498 | config._Config__questions_multi_servers()
499 |
500 | with patch('helpers.cli.CLI.colored_input') as mock_ci:
501 | # Choose to customize ports
502 | mock_ci.side_effect = iter(
503 | [CHOICE_YES, '5532', '27117', '6479', '6480'])
504 | config._Config__questions_ports()
505 |
506 | assert config._Config__dict['postgresql_port'] == '5532'
507 | assert config._Config__dict['mongo_port'] == '27117'
508 | assert config._Config__dict['redis_main_port'] == '6479'
509 | assert config._Config__dict['redis_cache_port'] == '6480'
510 | assert config.expose_backend_ports
511 |
512 | with patch.object(CLI, 'colored_input',
513 | return_value=CHOICE_NO) as mock_ci_1:
514 | # Choose to single server
515 | config._Config__questions_multi_servers()
516 |
517 | with patch.object(CLI, 'colored_input',
518 | return_value=CHOICE_NO) as mock_ci_2:
519 | # Choose to not expose ports
520 | config._Config__questions_ports()
521 |
522 | assert config._Config__dict['postgresql_port'] == '5432'
523 | assert config._Config__dict['mongo_port'] == '27017'
524 | assert config._Config__dict['redis_main_port'] == '6379'
525 | assert config._Config__dict['redis_cache_port'] == '6380'
526 | assert not config.expose_backend_ports
527 |
528 |
529 | @patch('helpers.config.Config.write_config', new=lambda *a, **k: None)
530 | def test_force_secure_mongo():
531 | config = read_config()
532 | dict_ = config.get_dict()
533 |
534 | with patch('helpers.cli.CLI.colored_input') as mock_ci:
535 | # We need to run it like if user has already run the setup once to
536 | # force MongoDB to 'upsert' users.
537 | config._Config__first_time = False
538 | # Run with no advanced options
539 |
540 | mock_ci.side_effect = iter([
541 | dict_['kobodocker_path'],
542 | CHOICE_YES, # Confirm path
543 | CHOICE_NO,
544 | CHOICE_NO,
545 | dict_['public_domain_name'],
546 | dict_['kpi_subdomain'],
547 | dict_['kc_subdomain'],
548 | dict_['ee_subdomain'],
549 | CHOICE_NO, # Do you want to use HTTPS?
550 | dict_['smtp_host'],
551 | dict_['smtp_port'],
552 | dict_['smtp_user'],
553 | 'test@test.com',
554 | dict_['super_user_username'],
555 | dict_['super_user_password'],
556 | CHOICE_NO,
557 | ])
558 | new_config = config.build()
559 | assert new_config['mongo_secured'] is True
560 |
561 |
562 | @patch('helpers.config.Config._Config__write_upsert_db_users_trigger_file',
563 | new=mock_write_trigger_upsert_db_users)
564 | def test_secure_mongo_advanced_options():
565 | config = read_config()
566 | config._Config__dict['advanced'] = True
567 |
568 | # Try when setup is run for the first time.
569 | config._Config__first_time = True
570 | with patch('helpers.cli.CLI.colored_input') as mock_ci:
571 | mock_ci.side_effect = iter([
572 | 'root',
573 | 'rootpassword',
574 | 'mongo_kobo_user',
575 | 'mongopassword',
576 | ])
577 | config._Config__questions_mongo()
578 | assert not os.path.exists('/tmp/upsert_db_users')
579 |
580 | # Try when setup has been already run once
581 | # If it's an upgrade, users should not see:
582 | # ╔══════════════════════════════════════════════════════╗
583 | # ║ MongoDB root's and/or user's usernames have changed! ║
584 | # ╚══════════════════════════════════════════════════════╝
585 | config._Config__first_time = False
586 | config._Config__dict['mongo_secured'] = False
587 |
588 | with patch('helpers.cli.CLI.colored_input') as mock_ci:
589 | mock_ci.side_effect = iter([
590 | 'root',
591 | 'rootPassword',
592 | 'mongo_kobo_user',
593 | 'mongoPassword',
594 | ])
595 | config._Config__questions_mongo()
596 | assert os.path.exists('/tmp/upsert_db_users')
597 | assert os.path.getsize('/tmp/upsert_db_users') == 0
598 | os.remove('/tmp/upsert_db_users')
599 |
600 | # Try when setup has been already run once
601 | # If it's NOT an upgrade, Users should see:
602 | # ╔══════════════════════════════════════════════════════╗
603 | # ║ MongoDB root's and/or user's usernames have changed! ║
604 | # ╚══════════════════════════════════════════════════════╝
605 | config._Config__dict['mongo_secured'] = True
606 | with patch('helpers.cli.CLI.colored_input') as mock_ci:
607 | mock_ci.side_effect = iter([
608 | 'root',
609 | 'rootPassw0rd',
610 | 'kobo_user',
611 | 'mongoPassword',
612 | CHOICE_YES,
613 | ])
614 | config._Config__questions_mongo()
615 | assert os.path.exists('/tmp/upsert_db_users')
616 | assert os.path.getsize('/tmp/upsert_db_users') != 0
617 | os.remove('/tmp/upsert_db_users')
618 |
619 |
620 | @patch('helpers.config.Config._Config__write_upsert_db_users_trigger_file',
621 | new=mock_write_trigger_upsert_db_users)
622 | def test_update_mongo_passwords():
623 | config = read_config()
624 | with patch('helpers.cli.CLI.colored_input') as mock_ci:
625 | config._Config__first_time = False
626 | # Test with unsecured MongoDB is covered in test_secure_mongo_advanced_options
627 | config._Config__dict['mongo_secured'] = True
628 | config._Config__dict['mongo_root_username'] = 'root'
629 | config._Config__dict['mongo_user_username'] = 'user'
630 | mock_ci.side_effect = iter([
631 | 'root',
632 | 'rootPassword',
633 | 'user',
634 | 'mongoPassword'
635 | ])
636 | config._Config__questions_mongo()
637 | assert os.path.exists('/tmp/upsert_db_users')
638 | assert os.path.getsize('/tmp/upsert_db_users') == 0
639 | os.remove('/tmp/upsert_db_users')
640 |
641 |
642 | @patch('helpers.config.Config._Config__write_upsert_db_users_trigger_file',
643 | new=mock_write_trigger_upsert_db_users)
644 | def test_update_mongo_usernames():
645 | config = read_config()
646 | with patch('helpers.cli.CLI.colored_input') as mock_ci:
647 | config._Config__first_time = False
648 | config._Config__dict['mongo_root_username'] = 'root'
649 | config._Config__dict['mongo_user_username'] = 'user'
650 | mock_ci.side_effect = iter([
651 | 'admin',
652 | 'rootPassword',
653 | 'another_user',
654 | 'mongoPassword',
655 | CHOICE_YES # Delete users
656 | ])
657 | config._Config__questions_mongo()
658 | assert os.path.exists('/tmp/upsert_db_users')
659 | with open('/tmp/upsert_db_users', 'r') as f:
660 | content = f.read()
661 | expected_content = 'user\tformhub\nroot\tadmin'
662 | assert content == expected_content
663 | os.remove('/tmp/upsert_db_users')
664 |
665 |
666 | @patch('helpers.config.Config._Config__write_upsert_db_users_trigger_file',
667 | new=mock_write_trigger_upsert_db_users)
668 | def test_update_postgres_password():
669 | """
670 | Does **NOT** test if user is updated in PostgreSQL but the file creation
671 | (and its content) used to trigger the action by PostgreSQL container.
672 |
673 | When password changes, file must contain ``
674 | Users should not be deleted if they already exist.
675 | """
676 | config = read_config()
677 | with patch('helpers.cli.CLI.colored_input') as mock_ci:
678 | config._Config__first_time = False
679 | config._Config__dict['postgres_user'] = 'user'
680 | config._Config__dict['postgres_password'] = 'password'
681 | mock_ci.side_effect = iter([
682 | 'kobocat',
683 | 'koboform',
684 | 'user',
685 | 'userPassw0rd',
686 | CHOICE_NO, # Tweak settings
687 | ])
688 | config._Config__questions_postgres()
689 | assert os.path.exists('/tmp/upsert_db_users')
690 | with open('/tmp/upsert_db_users', 'r') as f:
691 | content = f.read()
692 | expected_content = 'user\tfalse'
693 | assert content == expected_content
694 | os.remove('/tmp/upsert_db_users')
695 |
696 |
697 | @patch('helpers.config.Config._Config__write_upsert_db_users_trigger_file',
698 | new=mock_write_trigger_upsert_db_users)
699 | def test_update_postgres_username():
700 | """
701 | Does **NOT** test if user is updated in PostgreSQL but the file creation
702 | (and its content) used to trigger the action by PostgreSQL container.
703 |
704 | When username changes, file must contain ``
705 | """
706 | config = read_config()
707 | with patch('helpers.cli.CLI.colored_input') as mock_ci:
708 | config._Config__first_time = False
709 | config._Config__dict['postgres_user'] = 'user'
710 | config._Config__dict['postgres_password'] = 'password'
711 | mock_ci.side_effect = iter([
712 | 'kobocat',
713 | 'koboform',
714 | 'another_user',
715 | 'password',
716 | CHOICE_YES, # Delete user
717 | CHOICE_NO, # Tweak settings
718 | ])
719 | config._Config__questions_postgres()
720 | assert os.path.exists('/tmp/upsert_db_users')
721 | with open('/tmp/upsert_db_users', 'r') as f:
722 | content = f.read()
723 | expected_content = 'user\ttrue'
724 | assert content == expected_content
725 | os.remove('/tmp/upsert_db_users')
726 |
727 |
728 | def test_update_postgres_db_name_from_single_database():
729 | """
730 | Simulate upgrade from single database to two databases.
731 | With two databases, KoboCat has its own database. We ensure that
732 | `kc_postgres_db` gets `postgres_db` value.
733 | """
734 | config = read_config()
735 | dict_ = config.get_dict()
736 | old_db_name = 'postgres_db_kobo'
737 | config._Config__dict['postgres_db'] = old_db_name
738 | del config._Config__dict['kc_postgres_db']
739 | assert 'postgres_db' in dict_
740 | assert 'kc_postgres_db' not in dict_
741 | dict_ = config.get_upgraded_dict()
742 | assert dict_['kc_postgres_db'] == old_db_name
743 |
744 |
745 | def test_use_boolean():
746 | """
747 | Ensure config uses booleans instead of '1' or '2'
748 | """
749 | config = read_config()
750 | boolean_properties = [
751 | 'advanced',
752 | 'aws_backup_bucket_deletion_rule_enabled',
753 | 'backup_from_primary',
754 | 'block_common_http_ports',
755 | 'custom_secret_keys',
756 | 'customized_ports',
757 | 'debug',
758 | 'dev_mode',
759 | 'expose_backend_ports',
760 | 'https',
761 | 'local_installation',
762 | 'multi',
763 | 'npm_container',
764 | 'postgres_settings',
765 | 'proxy',
766 | 'raven_settings',
767 | 'review_host',
768 | 'smtp_use_tls',
769 | 'staging_mode',
770 | 'two_databases',
771 | 'use_aws',
772 | 'use_backup',
773 | 'use_letsencrypt',
774 | 'use_private_dns',
775 | 'uwsgi_settings',
776 | ]
777 | expected_dict = {}
778 | for property_ in boolean_properties:
779 | old_value = str(random.randint(1, 2))
780 | expected_dict[property_] = True if old_value == '1' else False
781 | config._Config__dict[property_] = old_value
782 |
783 | dict_ = config.get_upgraded_dict()
784 |
785 | for property_ in boolean_properties:
786 | assert dict_[property_] == expected_dict[property_]
787 |
788 |
789 | def test_backup_schedules_from_single_instance():
790 | config = read_config()
791 | # Force advanced options and single instance
792 | config._Config__dict['advanced'] = True
793 | config._Config__dict['multi'] = False
794 |
795 | assert config._Config__dict['kobocat_media_backup_schedule'] == '0 0 * * 0'
796 | assert config._Config__dict['mongo_backup_schedule'] == '0 1 * * 0'
797 | assert config._Config__dict['postgres_backup_schedule'] == '0 2 * * 0'
798 | assert config._Config__dict['redis_backup_schedule'] == '0 3 * * 0'
799 |
800 | with patch('helpers.cli.CLI.colored_input') as mock_ci:
801 | mock_ci.side_effect = iter([
802 | CHOICE_YES, # Activate backup
803 | '1 1 1 1 1', # KoBoCAT media
804 | '2 2 2 2 2', # PostgreSQL
805 | '3 3 3 3 3', # Mongo
806 | '4 4 4 4 4', # Redis
807 | ])
808 | config._Config__questions_backup()
809 | assert config._Config__dict['kobocat_media_backup_schedule'] == '1 1 1 1 1'
810 | assert config._Config__dict['postgres_backup_schedule'] == '2 2 2 2 2'
811 | assert config._Config__dict['mongo_backup_schedule'] == '3 3 3 3 3'
812 | assert config._Config__dict['redis_backup_schedule'] == '4 4 4 4 4'
813 |
814 |
815 | def test_backup_schedules_from_frontend_instance():
816 | config = read_config()
817 | # Force advanced options
818 | config._Config__dict['advanced'] = True
819 |
820 | assert config._Config__dict['kobocat_media_backup_schedule'] == '0 0 * * 0'
821 |
822 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input:
823 | mock_colored_input.side_effect = iter(
824 | [CHOICE_YES, 'frontend']
825 | )
826 | config._Config__questions_multi_servers()
827 | config._Config__questions_roles()
828 |
829 | assert config.frontend
830 |
831 | with patch('helpers.cli.CLI.colored_input') as mock_ci:
832 | mock_ci.side_effect = iter([
833 | CHOICE_YES, # Activate backup
834 | '1 1 1 1 1', # KoBoCAT media
835 | ])
836 | config._Config__questions_backup()
837 | assert config._Config__dict['kobocat_media_backup_schedule'] == '1 1 1 1 1'
838 |
839 |
840 | def test_backup_schedules_from_backend():
841 | config = read_config()
842 | # Force advanced options
843 | config._Config__dict['advanced'] = True
844 |
845 | assert config._Config__dict['mongo_backup_schedule'] == '0 1 * * 0'
846 | assert config._Config__dict['postgres_backup_schedule'] == '0 2 * * 0'
847 | assert config._Config__dict['redis_backup_schedule'] == '0 3 * * 0'
848 |
849 | with patch('helpers.cli.CLI.colored_input') as mock_colored_input:
850 | mock_colored_input.side_effect = iter([CHOICE_YES, 'backend'])
851 | config._Config__questions_multi_servers()
852 | config._Config__questions_roles()
853 | assert config.backend
854 |
855 | with patch('helpers.cli.CLI.colored_input') as mock_ci:
856 | mock_ci.side_effect = iter([
857 | CHOICE_YES, # Activate backup
858 | CHOICE_NO, # Choose AWS
859 | '1 1 1 1 1', # PostgreSQL
860 | '3 3 3 3 3', # Mongo
861 | '4 4 4 4 4', # Redis
862 | ])
863 | config._Config__questions_backup()
864 |
865 | assert config._Config__dict['postgres_backup_schedule'] == '1 1 1 1 1'
866 | assert config._Config__dict['mongo_backup_schedule'] == '3 3 3 3 3'
867 | assert config._Config__dict['redis_backup_schedule'] == '4 4 4 4 4'
868 |
869 |
870 | def test_activate_only_postgres_backup():
871 | config = read_config()
872 | # Force advanced options and single instance
873 | config._Config__dict['advanced'] = True
874 | config._Config__dict['multi'] = False
875 | # Force `False` to validate it becomes `True` at the end
876 | config._Config__dict['backup_from_primary'] = False
877 |
878 | assert config._Config__dict['kobocat_media_backup_schedule'] == '0 0 * * 0'
879 | assert config._Config__dict['mongo_backup_schedule'] == '0 1 * * 0'
880 | assert config._Config__dict['postgres_backup_schedule'] == '0 2 * * 0'
881 | assert config._Config__dict['redis_backup_schedule'] == '0 3 * * 0'
882 |
883 | with patch('builtins.input') as mock_input:
884 | mock_input.side_effect = iter([
885 | CHOICE_YES, # Activate backup
886 | '-', # Deactivate KoBoCAT media
887 | '2 2 2 2 2', # Modify PostgreSQL
888 | '-', # Deactivate Mongo
889 | '-', # Deactivate Redis
890 | ])
891 | config._Config__questions_backup()
892 | assert config._Config__dict['kobocat_media_backup_schedule'] == ''
893 | assert config._Config__dict['postgres_backup_schedule'] == '2 2 2 2 2'
894 | assert config._Config__dict['mongo_backup_schedule'] == ''
895 | assert config._Config__dict['redis_backup_schedule'] == ''
896 |
--------------------------------------------------------------------------------