├── .circleci └── config.yml ├── .dockerignore ├── .flake8 ├── .github └── dependabot.yml ├── .gitignore ├── .readthedocs.yaml ├── CHANGELOG.rst ├── CODE_OF_CONDUCT.md ├── Dockerfile ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.rst ├── Testing.Dockerfile ├── Testing.Dockerfile.dockerignore ├── VERSION ├── app.wsgi ├── autograph └── autograph.yaml ├── bin ├── build-images.sh ├── deploy-dockerhub.sh └── run.sh ├── config └── example.ini ├── docker-compose.yml ├── docs ├── Makefile ├── README.md ├── case-studies.rst ├── conf.py ├── getting-started.rst ├── index.rst ├── introduction.rst ├── requirements.txt ├── screencasts.rst ├── screencasts │ ├── approve-review.vtt │ ├── approve-review.webm │ ├── fetch-local-settings.vtt │ ├── fetch-local-settings.webm │ ├── modify-request-review.vtt │ └── modify-request-review.webm ├── support.rst ├── target-filters.rst ├── tutorial-attachments.rst ├── tutorial-dev-kinto-admin.rst ├── tutorial-dev-server.rst ├── tutorial-local-server.rst ├── tutorial-multi-signoff.rst └── tutorial-normandy-integration.rst ├── kinto-remote-settings ├── README.rst ├── pyproject.toml ├── setup.cfg ├── setup.py ├── src │ └── kinto_remote_settings │ │ ├── __init__.py │ │ ├── changes │ │ ├── __init__.py │ │ ├── utils.py │ │ └── views.py │ │ ├── signer │ │ ├── __init__.py │ │ ├── backends │ │ │ ├── __init__.py │ │ │ ├── autograph.py │ │ │ ├── base.py │ │ │ ├── exceptions.py │ │ │ └── local_ecdsa.py │ │ ├── events.py │ │ ├── generate_keypair.py │ │ ├── listeners.py │ │ ├── serializer.py │ │ ├── updater.py │ │ └── utils.py │ │ └── testing │ │ ├── __init__.py │ │ └── mock_listener.py └── tests │ ├── changes │ ├── __init__.py │ ├── config.ini │ ├── test_changes.py │ ├── test_changeset.py │ └── test_utils.py │ └── signer │ ├── __init__.py │ ├── config │ ├── autocreate.ini │ ├── autograph.yaml │ ├── bob.ecdsa.private.pem │ ├── ecdsa.private.pem │ ├── ecdsa.public.pem │ └── signer.ini │ ├── support.py │ ├── test_autocreate_resources.py │ ├── test_events.py │ ├── test_generate_keypair.py │ ├── test_plugin_setup.py │ ├── test_serializer.py │ ├── test_signer.py │ ├── test_signer_attachments.py │ ├── test_signoff_flow.py │ ├── test_updater.py │ └── test_utils.py ├── pyproject.toml ├── renovate.json ├── requirements-dev.txt ├── requirements.in ├── requirements.txt └── tests ├── __init__.py ├── browser_test.py ├── conftest.py ├── integration_test.py ├── kinto-logo.svg └── run.sh /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | # These environment variables must be set in CircleCI UI 2 | # 3 | # DOCKERHUB_REPO - docker hub repo, format: / 4 | # DOCKER_USER 5 | # DOCKER_PASS 6 | # 7 | version: 2.1 8 | aliases: 9 | - &restore_deps_cache 10 | name: Restoring Python dependency cache 11 | key: v2-requirements-{{ checksum "requirements.txt" }}-{{ checksum "requirements-dev.txt" }} 12 | 13 | - &save_deps_cache 14 | name: Saving Python dependency cache 15 | key: v2-requirements-{{ checksum "requirements.txt" }}-{{ checksum "requirements-dev.txt" }} 16 | paths: 17 | - /home/circleci/.cache/pip 18 | 19 | jobs: 20 | build_and_publish: 21 | machine: 22 | image: ubuntu-2004:202111-01 23 | docker_layer_caching: true 24 | 25 | working_directory: ~/kinto-dist 26 | 27 | environment: 28 | DOCKER_BUILDKIT: 1 29 | COMPOSE_DOCKER_CLI_BUILD: 1 30 | BUILDKIT_PROGRESS: plain 31 | steps: 32 | - run: 33 | name: Install essential packages 34 | command: | 35 | sudo apt-get update 36 | sudo apt-get install -y ca-certificates curl git openssh-client libpq-dev 37 | 38 | - checkout 39 | 40 | - run: 41 | name: Create version.json 42 | command: | 43 | # create a version.json per https://github.com/mozilla-services/Dockerflow/blob/main/docs/version_object.md 44 | printf '{"commit":"%s","version":"%s","source":"https://github.com/%s/%s","build":"%s"}\n' \ 45 | "$CIRCLE_SHA1" \ 46 | $(cat VERSION) \ 47 | "$CIRCLE_PROJECT_USERNAME" \ 48 | "$CIRCLE_PROJECT_REPONAME" \ 49 | "$CIRCLE_BUILD_URL" > version.json 50 | 51 | - run: 52 | name: Build 53 | command: make build 54 | 55 | - run: 56 | name: Push to Dockerhub 57 | command: | 58 | if [ "${CIRCLE_BRANCH}" == "main" ]; then 59 | ./bin/deploy-dockerhub.sh latest 60 | fi 61 | if [ -n "${CIRCLE_TAG}" ]; then 62 | ./bin/deploy-dockerhub.sh "$CIRCLE_TAG" 63 | fi 64 | 65 | integration_test: 66 | machine: 67 | image: ubuntu-2004:202111-01 68 | docker_layer_caching: true 69 | working_directory: ~/kinto-dist 70 | environment: 71 | DOCKER_BUILDKIT: 1 72 | COMPOSE_DOCKER_CLI_BUILD: 1 73 | BUILDKIT_PROGRESS: plain 74 | steps: 75 | - run: 76 | name: Install essential packages 77 | command: | 78 | sudo apt-get update 79 | sudo apt-get install -y libpq-dev 80 | 81 | - checkout 82 | 83 | - run: 84 | name: Set Python Version 85 | command: | 86 | python3 --version 87 | pyenv global 3.9.7 88 | 89 | - run: 90 | name: Set hosts 91 | command: | 92 | echo 127.0.0.1 localhost | sudo tee -a /etc/hosts 93 | cat /etc/hosts 94 | 95 | - run: 96 | name: Build 97 | command: make build 98 | 99 | - run: 100 | name: Setup env 101 | command: | 102 | mkdir mail 103 | sudo chmod 777 mail 104 | 105 | - run: 106 | name: Integration Test 107 | command: make integration-test 108 | 109 | - store_artifacts: 110 | path: integration-test.html 111 | 112 | unit_test: 113 | docker: 114 | - image: cimg/python:3.9 115 | - image: circleci/postgres:12.8-bullseye-ram 116 | environment: 117 | POSTGRES_DB: testdb 118 | POSTGRES_USER: postgres 119 | POSTGRES_PASSWORD: postgres 120 | steps: 121 | - checkout 122 | - restore_cache: *restore_deps_cache 123 | - run: 124 | name: Run kinto_remote_settings plugin unit tests 125 | command: make test 126 | - save_cache: *save_deps_cache 127 | lint_format: 128 | docker: 129 | - image: cimg/python:3.9 130 | steps: 131 | - checkout 132 | - restore_cache: *restore_deps_cache 133 | - run: 134 | name: Check linting and formatting 135 | command: make lint 136 | - save_cache: *save_deps_cache 137 | 138 | workflows: 139 | version: 2 140 | main: 141 | jobs: 142 | - lint_format 143 | - integration_test: 144 | requires: 145 | - lint_format 146 | - unit_test: 147 | requires: 148 | - lint_format 149 | - build_and_publish: 150 | requires: 151 | - unit_test 152 | - integration_test 153 | filters: 154 | tags: 155 | only: /.*/ 156 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | .venv 3 | .vscode 4 | .pytest_cache 5 | tests/ 6 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 99 3 | ignore = E203, E266, E501, W503 4 | exclude = .git,__pycache__,node_modules 5 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: pip 4 | directory: "/" 5 | schedule: 6 | interval: weekly 7 | day: sunday 8 | timezone: UCT 9 | open-pull-requests-limit: 99 10 | reviewers: 11 | - leplatrem 12 | ignore: 13 | - dependency-name: sqlalchemy 14 | versions: 15 | - 1.4.2 16 | - 1.4.3 17 | - 1.4.5 18 | - dependency-name: pyramid 19 | versions: 20 | - "2.0" 21 | - dependency-name: idna 22 | versions: 23 | - "3.1" 24 | - dependency-name: importlib-metadata 25 | versions: 26 | - 3.7.2 27 | - 3.7.3 28 | - 3.8.1 29 | - package-ecosystem: docker 30 | directory: "/" 31 | schedule: 32 | interval: daily 33 | timezone: UCT 34 | open-pull-requests-limit: 99 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | .venv/ 6 | kinto-logo.png 7 | config/kinto.ini 8 | config/development.ini 9 | server.ini 10 | .pytest_cache/ 11 | 12 | # C extensions 13 | *.so 14 | 15 | # Distribution / packaging 16 | .Python 17 | env/ 18 | develop-eggs/ 19 | dist/ 20 | downloads/ 21 | eggs/ 22 | .eggs/ 23 | lib/ 24 | lib64/ 25 | parts/ 26 | sdist/ 27 | var/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *,cover 51 | .hypothesis/ 52 | integration-test.html 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | 61 | # Sphinx documentation 62 | docs/_build/ 63 | 64 | # PyBuilder 65 | target/ 66 | 67 | #Ipython Notebook 68 | .ipynb_checkpoints 69 | .bash_history 70 | .python_history 71 | 72 | # misc 73 | tests/pub 74 | tests/setup.cfg 75 | mail/*.eml 76 | attachments/ 77 | .DS_Store 78 | 79 | # Visual Studio Code 80 | .vscode/ 81 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | python: 4 | install: 5 | - requirements: docs/requirements.txt 6 | 7 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Community Participation Guidelines 2 | 3 | This repository is governed by Mozilla's code of conduct and etiquette guidelines. 4 | For more details, please read the 5 | [Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). 6 | 7 | ## How to Report 8 | For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page. 9 | 10 | 16 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1.3 2 | 3 | FROM python:3.9-slim-bullseye@sha256:daf74cd7c4a6d420c2979b1fc74a3000489b69a330cbc15d0ab7b4721697945a as compile 4 | 5 | RUN apt-get update && apt-get install -y --no-install-recommends \ 6 | # Needed to download Rust 7 | curl \ 8 | # Needed to build psycopg and uWSGI 9 | build-essential \ 10 | python-dev \ 11 | libpq-dev \ 12 | # Needed to build uwsgi-dogstatsd plugin 13 | git 14 | 15 | # Get rustup https://rustup.rs/ for canonicaljson-rs 16 | # minimal profile https://rust-lang.github.io/rustup/concepts/profiles.html 17 | RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --profile minimal -y 18 | # Add cargo to PATH 19 | ENV PATH="/root/.cargo/bin:$PATH" 20 | 21 | RUN python -m venv /opt/venv 22 | ENV PATH="/opt/venv/bin:$PATH" 23 | 24 | RUN pip install --upgrade pip 25 | 26 | COPY requirements.txt . 27 | 28 | # Python packages 29 | RUN pip install --no-cache-dir -r requirements.txt 30 | RUN uwsgi --build-plugin https://github.com/Datadog/uwsgi-dogstatsd 31 | 32 | 33 | FROM python:3.9-slim-bullseye@sha256:daf74cd7c4a6d420c2979b1fc74a3000489b69a330cbc15d0ab7b4721697945a AS build 34 | 35 | RUN apt-get update && apt-get install -y --no-install-recommends \ 36 | # Needed for UWSGI 37 | libxml2-dev \ 38 | # Needed for psycopg2 39 | libpq-dev 40 | 41 | WORKDIR /app 42 | 43 | COPY --from=compile /opt/venv /opt/venv 44 | COPY --from=compile /dogstatsd_plugin.so . 45 | 46 | ENV PYTHONUNBUFFERED=1 \ 47 | PORT=8888 \ 48 | PATH="/opt/venv/bin:$PATH" 49 | 50 | # add a non-privileged user for installing and running 51 | # the application 52 | RUN chown 10001:10001 /app && \ 53 | groupadd --gid 10001 app && \ 54 | useradd --no-create-home --uid 10001 --gid 10001 --home-dir /app app 55 | 56 | COPY . . 57 | RUN pip install ./kinto-remote-settings 58 | 59 | # Drop down to unprivileged user 60 | RUN chown -R 10001:10001 /app 61 | 62 | USER 10001 63 | 64 | EXPOSE $PORT 65 | 66 | # Run uwsgi by default 67 | ENTRYPOINT ["/bin/bash", "/app/bin/run.sh"] 68 | CMD ["uwsgi", "--ini", "/etc/kinto.ini"] 69 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.rst LICENSE Makefile requirements.txt 2 | include app.wsgi 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | VENV := $(shell echo $${VIRTUAL_ENV-.venv}) 2 | INSTALL_STAMP := $(VENV)/.install.stamp 3 | PSQL_INSTALLED := $(shell psql --version 2>/dev/null) 4 | 5 | clean: 6 | find . -name '*.pyc' -delete 7 | find . -name '__pycache__' -type d | xargs rm -rf 8 | 9 | distclean: clean 10 | rm -rf *.egg *.egg-info/ dist/ build/ 11 | 12 | maintainer-clean: distclean 13 | rm -rf .venv/ 14 | 15 | $(VENV)/bin/python: 16 | virtualenv $(VENV) --python=python3 17 | 18 | $(INSTALL_STAMP): $(VENV)/bin/python requirements.txt requirements-dev.txt 19 | $(VENV)/bin/python -m pip install --upgrade pip 20 | $(VENV)/bin/pip install -r requirements.txt 21 | $(VENV)/bin/pip install -e kinto-remote-settings 22 | $(VENV)/bin/pip install -r requirements-dev.txt 23 | touch $(INSTALL_STAMP) 24 | 25 | format: $(INSTALL_STAMP) 26 | $(VENV)/bin/isort . --virtual-env=$(VENV) 27 | $(VENV)/bin/black kinto-remote-settings tests 28 | 29 | lint: $(INSTALL_STAMP) 30 | $(VENV)/bin/isort . --check-only --virtual-env=$(VENV) 31 | $(VENV)/bin/black --check kinto-remote-settings tests --diff 32 | $(VENV)/bin/flake8 kinto-remote-settings tests 33 | 34 | test: $(INSTALL_STAMP) 35 | PYTHONPATH=. $(VENV)/bin/pytest kinto-remote-settings 36 | 37 | integration-test: 38 | docker-compose run web migrate 39 | docker-compose run tests 40 | 41 | build: 42 | ./bin/build-images.sh 43 | docker-compose build 44 | 45 | build-db: 46 | ifdef PSQL_INSTALLED 47 | @pg_isready 2>/dev/null 1>&2 || (echo Run PostgreSQL before starting tests. && exit 1) 48 | @echo Creating db... 49 | @psql -tc "SELECT 1 FROM pg_database WHERE datname = 'testdb'" -U postgres -h localhost | grep -q 1 || psql -c "CREATE DATABASE testdb ENCODING 'UTF8' TEMPLATE template0;" -U postgres -h localhost 50 | @psql -c "ALTER DATABASE testdb SET TIMEZONE TO UTC;" 51 | @echo Done! 52 | else 53 | @echo PostgreSQL not installed. Please install PostgreSQL to use this command. 54 | endif 55 | 56 | stop: 57 | docker-compose stop 58 | 59 | down: 60 | docker-compose down 61 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Kinto Distribution 2 | ================== 3 | 4 | This project **is deprecated** and has become https://github.com/mozilla/remote-settings/ 5 | -------------------------------------------------------------------------------- /Testing.Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1.3 2 | 3 | # This name comes from the docker-compose yml file that defines a name 4 | # for the "web" container's image. 5 | FROM kinto:build 6 | 7 | WORKDIR /app 8 | 9 | ENV PYTHONUNBUFFERED=1 \ 10 | PYTHONPATH="/app:$PYTHONPATH" 11 | 12 | USER root 13 | 14 | RUN apt-get update && \ 15 | apt-get install -y --no-install-recommends \ 16 | curl wget 17 | 18 | RUN pip install --upgrade pip 19 | COPY requirements-dev.txt . 20 | RUN pip install --no-cache-dir -r requirements-dev.txt 21 | 22 | COPY tests . 23 | COPY kinto-remote-settings/tests ./kinto-remote-settings/tests 24 | 25 | ENTRYPOINT ["/bin/bash", "/app/run.sh"] 26 | -------------------------------------------------------------------------------- /Testing.Dockerfile.dockerignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | .venv 3 | .vscode 4 | .pytest_cache 5 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 28.0.0.dev0 2 | -------------------------------------------------------------------------------- /app.wsgi: -------------------------------------------------------------------------------- 1 | try: 2 | import ConfigParser as configparser 3 | except ImportError: 4 | import configparser 5 | import logging.config 6 | import os 7 | 8 | from kinto import main 9 | 10 | here = os.path.dirname(__file__) 11 | 12 | ini_path = os.environ.get('KINTO_INI') 13 | if ini_path is None: 14 | ini_path = os.path.join(here, 'config', 'kinto.ini') 15 | 16 | # If, for some reason you accidentally get the config file path wrong 17 | # you'll get really cryptic errors from `logging.config.fileConfig(ini_path)` 18 | # so to save yourself the pain of debugging, make sure the file definitely 19 | # does exist and can be read. 20 | # Actually opening it to read will check permissions *and* presence. 21 | with open(ini_path) as f: 22 | assert f.read(), '{} empty'.format(ini_path) 23 | 24 | # Set up logging 25 | logging.config.fileConfig(ini_path) 26 | 27 | # Parse config and create WSGI app 28 | config = configparser.ConfigParser() 29 | config.read(ini_path) 30 | 31 | application = main(config.items('DEFAULT'), **dict(config.items('app:main'))) 32 | -------------------------------------------------------------------------------- /autograph/autograph.yaml: -------------------------------------------------------------------------------- 1 | server: 2 | listen: "0.0.0.0:8000" 3 | # cache 500k nonces to protect from authorization replay attacks 4 | noncecachesize: 524288 5 | 6 | # The keys below are testing keys that do not grant any power 7 | signers: 8 | # a p384 key, the standard 9 | - id: appkey1 10 | type: contentsignature 11 | x5u: https://bucket.example.net/appkey1.pem 12 | privatekey: | 13 | -----BEGIN EC PARAMETERS----- 14 | BgUrgQQAIg== 15 | -----END EC PARAMETERS----- 16 | -----BEGIN EC PRIVATE KEY----- 17 | MIGkAgEBBDAzX2TrGOr0WE92AbAl+nqnpqh25pKCLYNMTV2hJHztrkVPWOp8w0mh 18 | scIodK8RMpagBwYFK4EEACKhZANiAATiTcWYbt0Wg63dO7OXvpptNG0ryxv+v+Js 19 | JJ5Upr3pFus5fZyKxzP9NPzB+oFhL/xw3jMx7X5/vBGaQ2sJSiNlHVkqZgzYF6JQ 20 | 4yUyiqTY7v67CyfUPA1BJg/nxOS9m3o= 21 | -----END EC PRIVATE KEY----- 22 | 23 | authorizations: 24 | - id: kintodev 25 | key: 3isey64n25fim18chqgewirm6z2gwva1mas0eu71e9jtisdwv6bd 26 | signers: 27 | - appkey1 28 | 29 | monitoring: 30 | key: 19zd4w3xirb5syjgdx8atq6g91m03bdsmzjifs2oddivswlu9qs 31 | -------------------------------------------------------------------------------- /bin/build-images.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | docker build . -t kinto:build 5 | docker build . --file Testing.Dockerfile -t kinto:tests 6 | -------------------------------------------------------------------------------- /bin/deploy-dockerhub.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # THIS IS MEANT TO BE RUN BY CI ONLY. 4 | 5 | set -e 6 | 7 | # Usage: retry MAX CMD... 8 | # Retry CMD up to MAX times. If it fails MAX times, returns failure. 9 | # Example: retry 3 docker push "$DOCKERHUB_REPO:$TAG" 10 | function retry() { 11 | max=$1 12 | shift 13 | count=1 14 | until "$@"; do 15 | count=$((count + 1)) 16 | if [[ $count -gt $max ]]; then 17 | return 1 18 | fi 19 | echo "$count / $max" 20 | done 21 | return 0 22 | } 23 | 24 | # configure docker creds 25 | echo "$DOCKER_PASS" | docker login -u="$DOCKER_USER" --password-stdin 26 | 27 | docker images 28 | 29 | # docker tag and push git branch to dockerhub 30 | if [ -n "$1" ]; then 31 | TAG="$1" 32 | echo "Tag and push ${DOCKERHUB_REPO}:${TAG} to Dockerhub" 33 | docker tag kinto:build "$DOCKERHUB_REPO:$TAG" || 34 | (echo "Couldn't tag kinto-dist as $DOCKERHUB_REPO:$TAG" && false) 35 | retry 3 docker push "$DOCKERHUB_REPO:$TAG" || 36 | (echo "Couldn't push $DOCKERHUB_REPO:$TAG" && false) 37 | echo "Done." 38 | fi 39 | -------------------------------------------------------------------------------- /bin/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | : "${KINTO_INI:=config/example.ini}" 5 | 6 | usage() { 7 | echo "usage: ./bin/run.sh migrate|start|uwsgistart|bash|whatevercommandyouwant" 8 | exit 1 9 | } 10 | 11 | [ $# -lt 1 ] && usage 12 | 13 | case $1 in 14 | migrate) 15 | kinto migrate --ini $KINTO_INI 16 | ;; 17 | start) 18 | kinto start --ini $KINTO_INI 19 | ;; 20 | uwsgistart) 21 | KINTO_INI=$KINTO_INI uwsgi --http :8888 --ini $KINTO_INI 22 | ;; 23 | *) 24 | exec "$@" 25 | ;; 26 | esac 27 | -------------------------------------------------------------------------------- /config/example.ini: -------------------------------------------------------------------------------- 1 | [server:main] 2 | use = egg:waitress#main 3 | host = 0.0.0.0 4 | port = 8888 5 | 6 | 7 | [app:main] 8 | use = egg:kinto 9 | 10 | # 11 | # Features 12 | # 13 | kinto.experimental_permissions_endpoint = true 14 | # kinto.readonly = false 15 | # kinto.paginate_by = 0 16 | kinto.experimental_collection_schema_validation = true 17 | # kinto.trailing_slash_redirect_enabled = true 18 | # kinto.batch_max_requests = 25 19 | # kinto.version_json_path = ./version.json 20 | 21 | # 22 | # Production settings 23 | # 24 | # http://kinto.readthedocs.io/en/latest/configuration/production.html 25 | # 26 | # kinto.http_scheme = https 27 | # kinto.http_host = kinto.services.mozilla.com 28 | 29 | kinto.statsd_url = udp://localhost:8125 30 | # kinto.statsd_prefix = kinto-prod 31 | 32 | # kinto.retry_after_seconds = 5 33 | # kinto.backoff = 10 34 | # kinto.eos = 35 | 36 | # Always return CORS headers 37 | cornice.always_cors = true 38 | 39 | # 40 | # Backends. 41 | # 42 | # http://kinto.readthedocs.io/en/latest/configuration/settings.html#storage 43 | # 44 | kinto.storage_backend = kinto.core.storage.postgresql 45 | kinto.storage_url = postgresql://admin:pass@localhost/dbname 46 | # kinto.storage_pool_size = 25 47 | # kinto.storage_max_overflow = 5 48 | # kinto.storage_pool_recycle = -1 49 | # kinto.storage_pool_timeout = 30 50 | # kinto.storage_max_backlog = -1 51 | 52 | kinto.cache_backend = kinto.core.cache.postgresql 53 | kinto.cache_url = postgresql://admin:pass@localhost/dbname 54 | # kinto.cache_pool_size = 25 55 | # kinto.cache_max_overflow = 5 56 | # kinto.cache_pool_recycle = -1 57 | # kinto.cache_pool_timeout = 30 58 | # kinto.cache_max_backlog = -1 59 | 60 | kinto.permission_backend = kinto.core.permission.postgresql 61 | kinto.permission_url = postgresql://admin:pass@localhost/dbname 62 | # kinto.permission_pool_size = 25 63 | # kinto.permission_max_overflow = 5 64 | # kinto.permission_pool_recycle = -1 65 | # kinto.permission_pool_timeout = 30 66 | # kinto.permission_max_backlog = -1 67 | 68 | 69 | # 70 | # Auth configuration. 71 | # 72 | # http://kinto.readthedocs.io/en/latest/configuration/settings.html#authentication 73 | # 74 | kinto.userid_hmac_secret = 58de20af8811f56f52d24a6862d13c163b0ea353b0e2f23ea34aaccbef174ec8 75 | multiauth.policies = account 76 | multiauth.policy.account.use = kinto.plugins.accounts.authentication.AccountsAuthenticationPolicy 77 | 78 | kinto.account_create_principals = system.Everyone 79 | kinto.account_write_principals = account:admin 80 | 81 | 82 | # 83 | # Root permissions 84 | # 85 | # kinto.bucket_create_principals = system.Authenticated 86 | # kinto.bucket_read_principals = system.Authenticated 87 | 88 | 89 | # 90 | # Client cache headers 91 | # 92 | # http://kinto.readthedocs.io/en/latest/configuration/settings.html#client-caching 93 | # 94 | # Every bucket objects objects and list 95 | # kinto.bucket_cache_expires_seconds = 3600 96 | # 97 | # Every collection objects and list of every buckets 98 | # kinto.collection_cache_expires_seconds = 3600 99 | # 100 | # Every group objects and list of every buckets 101 | # kinto.group_cache_expires_seconds = 3600 102 | # 103 | # Every records objects and list of every collections 104 | # kinto.record_cache_expires_seconds = 3600 105 | # 106 | # Records in a specific bucket 107 | # kinto.blog_record_cache_expires_seconds = 3600 108 | # 109 | # Records in a specific collection in a specific bucket 110 | # kinto.blog_article_record_cache_expires_seconds = 3600 111 | 112 | kinto.project_name = Remote Settings 113 | 114 | # 115 | # Plugins 116 | # 117 | kinto.includes = kinto.plugins.default_bucket 118 | kinto.plugins.history 119 | kinto.plugins.admin 120 | kinto.plugins.accounts 121 | kinto.plugins.flush 122 | kinto_remote_settings 123 | kinto_emailer 124 | kinto_attachment 125 | # kinto.plugins.quotas 126 | 127 | # 128 | # Kinto history 129 | # 130 | kinto.history.exclude_resources = /buckets/main-preview 131 | /buckets/main 132 | 133 | # 134 | # Kinto changes 135 | # 136 | kinto.changes.resources = /buckets/main-workspace 137 | kinto.monitor.changes.record_cache_expires_seconds = 60 138 | 139 | # 140 | # Kinto attachment 141 | # 142 | kinto.attachment.base_url = http://localhost:8000 143 | kinto.attachment.folder = {bucket_id}/{collection_id} 144 | # Either local filesystem: 145 | kinto.attachment.base_path = /tmp 146 | # or Amazon S3: 147 | # kinto.attachment.aws.access_key = 148 | # kinto.attachment.aws.secret_key = 149 | # kinto.attachment.aws.bucket_name = 150 | # kinto.attachment.aws.acl = public-read 151 | 152 | # 153 | # Kinto signer 154 | # 155 | kinto.signer.resources = /buckets/main-workspace -> /buckets/main-preview -> /buckets/main 156 | kinto.signer.editors_group = {collection_id}-editors 157 | kinto.signer.reviewers_group = {collection_id}-reviewers 158 | 159 | kinto.signer.to_review_enabled = true 160 | kinto.signer.signer_backend = kinto_remote_settings.signer.backends.autograph 161 | 162 | # Autograph credentials 163 | # https://github.com/mozilla-services/autograph/blob/29a206fb6/autograph.yaml#L1218-L1221 164 | kinto.signer.autograph.server_url = http://autograph:8000 165 | kinto.signer.autograph.hawk_id = kintodev 166 | kinto.signer.autograph.hawk_secret = 3isey64n25fim18chqgewirm6z2gwva1mas0eu71e9jtisdwv6bd 167 | 168 | # 169 | # Kinto emailer 170 | # 171 | mail.default_sender = kinto@restmail.net 172 | mail.debug_mailer = true 173 | # mail.host = localhost 174 | # mail.port = 25 175 | # mail.username = None 176 | # mail.password = None 177 | # mail.tls = False 178 | # mail.ssl = False 179 | # mail.keyfile = None 180 | # mail.certfile = None 181 | # mail.queue_path = None 182 | # mail.debug = 0 183 | # mail.sendmail_app = /usr/sbin/sendmail 184 | # mail.sendmail_template = {sendmail_app} -t -i -f {sender} 185 | 186 | 187 | [uwsgi] 188 | wsgi-file = app.wsgi 189 | enable-threads = true 190 | # socket = /var/run/uwsgi/kinto.sock 191 | socket = /tmp/kinto.sock 192 | chmod-socket = 666 193 | processes = 1 194 | master = true 195 | module = kinto 196 | harakiri = 120 197 | uid = 10001 198 | gid = 10001 199 | lazy = true 200 | lazy-apps = true 201 | single-interpreter = true 202 | buffer-size = 65535 203 | post-buffering = 65535 204 | enable-metrics = true 205 | plugin = dogstatsd 206 | 207 | # 208 | # Logging configuration 209 | # 210 | 211 | [loggers] 212 | keys = root 213 | 214 | [handlers] 215 | keys = console, sentry 216 | 217 | [formatters] 218 | keys = generic, json 219 | 220 | [logger_root] 221 | handlers = console, sentry 222 | 223 | [handler_console] 224 | class = StreamHandler 225 | args = (sys.stdout,) 226 | level = INFO 227 | formatter = json 228 | 229 | [handler_sentry] 230 | class = raven.handlers.logging.SentryHandler 231 | args = ('https://:@app.getsentry.com/',) 232 | level = ERROR 233 | formatter = generic 234 | 235 | [formatter_json] 236 | class = kinto.core.JsonLogFormatter 237 | 238 | [formatter_generic] 239 | format = %(asctime)s,%(msecs)03d %(levelname)-5.5s [%(name)s] %(message)s 240 | datefmt = %H:%M:%S 241 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | db: 5 | image: postgres:12 6 | environment: 7 | POSTGRES_HOST_AUTH_METHOD: trust 8 | healthcheck: 9 | test: [ "CMD", "pg_isready" ] 10 | interval: 1s 11 | timeout: 3s 12 | retries: 30 13 | 14 | memcached: 15 | image: memcached:1 16 | 17 | autograph: 18 | # Autograph is stuck at 2.7.0 because of mozilla-services/autograph#344 19 | image: mozilla/autograph:2.7.0 20 | volumes: 21 | - ${PWD}/autograph/autograph.yaml:/app/autograph.yaml 22 | 23 | selenium: 24 | image: selenium/standalone-firefox 25 | volumes: 26 | - /dev/shm:/dev/shm 27 | ports: 28 | - 4444:4444 29 | shm_size: 2g 30 | 31 | web: 32 | build: 33 | context: . 34 | dockerfile: Dockerfile 35 | image: kinto:build 36 | depends_on: 37 | - db 38 | - memcached 39 | - autograph 40 | environment: 41 | - KINTO_CACHE_BACKEND=kinto.core.cache.memcached 42 | - KINTO_CACHE_HOSTS=memcached:11211 memcached:11212 43 | - KINTO_STORAGE_URL=postgresql://postgres@db/postgres 44 | - KINTO_PERMISSION_URL=postgresql://postgres@db/postgres 45 | - KINTO_SIGNER_AUTOGRAPH_SERVER_URL=http://autograph:8000 46 | ports: 47 | - 8888:8888 48 | volumes: 49 | - $PWD:/app 50 | command: uwsgistart 51 | 52 | tests: 53 | build: 54 | context: . 55 | dockerfile: Testing.Dockerfile 56 | image: kinto:tests 57 | depends_on: 58 | - web 59 | - selenium 60 | environment: 61 | - SERVER=http://web:8888/v1 62 | - SELENIUM_HOST=selenium 63 | - SELENIUM_PORT=4444 64 | volumes: 65 | - $PWD/tests:/app 66 | - $PWD/mail:/app/mail/ 67 | - $PWD/kinto-remote-settings:/app/kinto-remote-settings 68 | - $PWD/setup.cfg:/app/setup.cfg 69 | command: start 70 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = RemoteSettings 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | ## Remote Settings 2 | 3 | Remote Settings is a Mozilla service that makes it easy to manage evergreen settings data in Firefox. A simple API is available in Firefox for accessing the synchronized data. 4 | 5 | # https://remote-settings.readthedocs.io 6 | -------------------------------------------------------------------------------- /docs/case-studies.rst: -------------------------------------------------------------------------------- 1 | .. _case-studies: 2 | 3 | Case Studies 4 | ============ 5 | 6 | This page (*under construction*) contains some pointers to existing use cases and implementations. 7 | 8 | 9 | Search configuration 10 | -------------------- 11 | 12 | The list of search engines is managed via the ``search-config`` collection. 13 | 14 | 15 | Configuration 16 | ''''''''''''' 17 | 18 | * A complex JSON schema validates entries (`source `__) 19 | 20 | * On the server, the group of editors is different from the group of users allowed to approve changes (`permissions `_) 21 | 22 | 23 | Implementation 24 | '''''''''''''' 25 | 26 | * Client is initialized from the Search service (`source `__) 27 | 28 | * Initial data is packaged in release, and is loaded on first startup from a call to ``.get()`` (`source `__) 29 | 30 | * A hijack blocklist is managed separately, and the integrity/signature of the local records is verified on each read (`source `__). A certificate might have to be downloaded if missing/outdated. 31 | 32 | 33 | Misc 34 | '''' 35 | 36 | * `searchengine devtools `_ 37 | 38 | 39 | Normandy Recipes 40 | ---------------- 41 | 42 | The list of ongoing experimentations is managed via the ``normandy-recipes-capabilities`` collection. 43 | 44 | 45 | Implementation 46 | '''''''''''''' 47 | 48 | * Synchronization happens on first startup because no initial data is packaged: 49 | 50 | - ``Normandy.init()`` in BrowserGlue (`source `_) 51 | - Calling ``.get()`` with implicit ``syncIfEmpty: true`` option will initialize the local DB by synchronizing the collection for Normandy (`source `__) 52 | 53 | * In order to guarantee that the records are published from Normandy, each record is signed individually on the server side (`source `__). Records are published from Django using ``kinto-http.py``. 54 | 55 | * Signature verification for the whole collection is done as usual, and the per-record one is verified on read when recipe eligibility is checked (`source `__) 56 | 57 | 58 | Configuration 59 | ''''''''''''' 60 | 61 | * Multi signoff is disabled (`config `__) 62 | * A scheduled task backports certain recipes into a legacy collection for old clients (`source `__, `config `__) 63 | 64 | 65 | Misc 66 | '''' 67 | 68 | * `Poucave checks for Normandy `_ 69 | 70 | 71 | HIBP Monitor Breaches 72 | --------------------- 73 | 74 | The list of websites whose credentials database was leaked is managed via the ``fxmonitor-breaches`` collection. 75 | 76 | Automation 77 | '''''''''' 78 | 79 | * A script pulls from `Have I Been Powned `_ API, and creates the missing records using a Kinto Account, and then requests review (`source `__) 80 | * This script is ran by OPs as a cron job (`source `__, `request ticket `_) 81 | * A human approves the changes manually 82 | 83 | 84 | Blocklist 85 | --------- 86 | 87 | The list of blocked addons is managed via the ``blocklists/addons-bloomfilters``. 88 | 89 | 90 | Implementation 91 | '''''''''''''' 92 | 93 | Addons blocklist implemented using a bloomfilter (`docs `_) 94 | 95 | * Bloomfilters are published from a Cron job on the addons-server, implementated using raw Python requests (`source `__) 96 | 97 | * Incremental updates of bloomfilters are downloaded as binary attachments, full or base + stashes (`source `__) 98 | 99 | * Attachments are stored in IndexedDB thanks to the ``useCache: true`` option (`source `__) 100 | 101 | * When using the attachment IndexedDB cache, attachments can be packaged in release in order to avoid downloading on new profiles initialization. The bloomfilter base attachment is shipped in release along with its record metadata (`source `__) 102 | 103 | * Attachments are updated in tree regularly using custom code in ``periodic_file_updates.sh`` (`source `__) 104 | 105 | 106 | Misc 107 | '''' 108 | 109 | * `Remote Settings authentication from CLI in Javascript `_ (See `Bug 1630651 `_) 110 | 111 | 112 | User Journey 113 | ------------ 114 | 115 | Contextual features recommandations is managed via the ``cfr`` collection. 116 | 117 | 118 | Localization 119 | '''''''''''' 120 | 121 | * Contextual recommandations are published using translatable placeholders or string IDs 122 | 123 | :: 124 | 125 | "content": { 126 | "icon": "chrome://browser/skin/notification-icons/block-fingerprinter.svg", 127 | "text": { 128 | "string_id": "cfr-doorhanger-fingerprinters-description" 129 | }, 130 | "layout": "icon_and_message", 131 | "buttons": { 132 | "primary": { 133 | "event": "PROTECTION", 134 | "label": { 135 | "string_id": "cfr-doorhanger-socialtracking-ok-button" 136 | }, 137 | "action": { 138 | "type": "OPEN_PROTECTION_PANEL" 139 | } 140 | }, 141 | ... 142 | 143 | 144 | * In parallel, localizations are published in a separate collection 145 | * Each locale has its own record, with its ID in the following format `` `cfr-v1-${locale}` `` and a Fluent file attached. 146 | * A specificly instantiated downloader fetches the relevant one and reloads l10n (`source `__) 147 | * This specific record is checked on each load, attachment is downloaded only if updated/missing/corrupted (built-in feature of attachment downloader) 148 | 149 | 150 | Security State 151 | -------------- 152 | 153 | Several security related collections are managed in the dedicated ``security-state`` bucket. 154 | 155 | Configuration 156 | ''''''''''''' 157 | 158 | * Dedicated bucket in order to have specific content signature certificates 159 | 160 | .. code:: javascript 161 | 162 | const OneCRLBlocklistClient = RemoteSettings( 163 | Services.prefs.getCharPref(ONECRL_COLLECTION_PREF), 164 | { 165 | bucketNamePref: ONECRL_BUCKET_PREF, 166 | lastCheckTimePref: ONECRL_CHECKED_PREF, 167 | signerName: Services.prefs.getCharPref(ONECRL_SIGNER_PREF), 168 | } 169 | ); 170 | 171 | `source `__ 172 | 173 | 174 | Cert Revocations (CRLite) 175 | ''''''''''''''''''''''''' 176 | 177 | Certificates revocation list using a bloomfilter. 178 | 179 | * Sysops run a scheduled job that pulls data from a Git repo, authenticates using a Kinto account to publish (``account:crlite_publisher``), and approves changes with another one (``account:crlite_reviewer``) (`source `__) 180 | 181 | * Download of attachments happens sequentially at the end of first sync (*caution*) 182 | 183 | * Incremental updates of bloomfilters are downloaded as binary attachments in profile folder (`source `__) 184 | 185 | * Poucave check for age of revocations (`source `__). 186 | 187 | 188 | Intermediates 189 | ''''''''''''' 190 | 191 | * Download of attachments sequentially at the end of first sync (*caution*) 192 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Configuration file for the Sphinx documentation builder. 4 | # 5 | # This file does only contain a selection of the most common options. For a 6 | # full list see the documentation: 7 | # http://www.sphinx-doc.org/en/master/config 8 | 9 | # -- Path setup -------------------------------------------------------------- 10 | 11 | # If extensions (or modules to document with autodoc) are in another directory, 12 | # add these directories to sys.path here. If the directory is relative to the 13 | # documentation root, use os.path.abspath to make it absolute, like shown here. 14 | # 15 | # import os 16 | # import sys 17 | # sys.path.insert(0, os.path.abspath('.')) 18 | import datetime 19 | 20 | 21 | # -- Project information ----------------------------------------------------- 22 | 23 | project = 'Remote Settings' 24 | copyright = "2015-%s, Mozilla Services" % datetime.datetime.now().year 25 | author = 'Mozilla' 26 | 27 | # The short X.Y version 28 | version = '' 29 | # The full version, including alpha/beta/rc tags 30 | release = '' 31 | 32 | 33 | # -- General configuration --------------------------------------------------- 34 | 35 | # If your documentation needs a minimal Sphinx version, state it here. 36 | # 37 | # needs_sphinx = '1.0' 38 | 39 | # Add any Sphinx extension module names here, as strings. They can be 40 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 41 | # ones. 42 | extensions = [ 43 | ] 44 | 45 | # Add any paths that contain templates here, relative to this directory. 46 | templates_path = ['_templates'] 47 | 48 | # The suffix(es) of source filenames. 49 | # You can specify multiple suffix as a list of string: 50 | # 51 | # source_suffix = ['.rst', '.md'] 52 | source_suffix = '.rst' 53 | 54 | # The master toctree document. 55 | master_doc = 'index' 56 | 57 | # The language for content autogenerated by Sphinx. Refer to documentation 58 | # for a list of supported languages. 59 | # 60 | # This is also used if you do content translation via gettext catalogs. 61 | # Usually you set "language" from the command line for these cases. 62 | language = None 63 | 64 | # List of patterns, relative to source directory, that match files and 65 | # directories to ignore when looking for source files. 66 | # This pattern also affects html_static_path and html_extra_path . 67 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 68 | 69 | # The name of the Pygments (syntax highlighting) style to use. 70 | pygments_style = 'sphinx' 71 | 72 | 73 | # -- Options for HTML output ------------------------------------------------- 74 | 75 | # The theme to use for HTML and HTML Help pages. See the documentation for 76 | # a list of builtin themes. 77 | # 78 | html_theme = 'alabaster' 79 | 80 | # Theme options are theme-specific and customize the look and feel of a theme 81 | # further. For a list of options available for each theme, see the 82 | # documentation. 83 | # 84 | # html_theme_options = {} 85 | 86 | # Add any paths that contain custom static files (such as style sheets) here, 87 | # relative to this directory. They are copied after the builtin static files, 88 | # so a file named "default.css" will overwrite the builtin "default.css". 89 | html_static_path = ['_static', 'screencasts'] 90 | 91 | # Custom sidebar templates, must be a dictionary that maps document names 92 | # to template names. 93 | # 94 | # The default sidebars (for documents that don't match any pattern) are 95 | # defined by theme itself. Builtin themes are using these templates by 96 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 97 | # 'searchbox.html']``. 98 | # 99 | # html_sidebars = {} 100 | 101 | 102 | # -- Options for HTMLHelp output --------------------------------------------- 103 | 104 | # Output file base name for HTML help builder. 105 | htmlhelp_basename = 'RemoteSettingsdoc' 106 | 107 | 108 | # -- Options for LaTeX output ------------------------------------------------ 109 | 110 | latex_elements = { 111 | # The paper size ('letterpaper' or 'a4paper'). 112 | # 113 | # 'papersize': 'letterpaper', 114 | 115 | # The font size ('10pt', '11pt' or '12pt'). 116 | # 117 | # 'pointsize': '10pt', 118 | 119 | # Additional stuff for the LaTeX preamble. 120 | # 121 | # 'preamble': '', 122 | 123 | # Latex figure (float) alignment 124 | # 125 | # 'figure_align': 'htbp', 126 | } 127 | 128 | # Grouping the document tree into LaTeX files. List of tuples 129 | # (source start file, target name, title, 130 | # author, documentclass [howto, manual, or own class]). 131 | latex_documents = [ 132 | (master_doc, 'RemoteSettings.tex', 'Remote Settings Documentation', 133 | 'Mozilla', 'manual'), 134 | ] 135 | 136 | 137 | # -- Options for manual page output ------------------------------------------ 138 | 139 | # One entry per manual page. List of tuples 140 | # (source start file, name, description, authors, manual section). 141 | man_pages = [ 142 | (master_doc, 'remotesettings', 'Remote Settings Documentation', 143 | [author], 1) 144 | ] 145 | 146 | 147 | # -- Options for Texinfo output ---------------------------------------------- 148 | 149 | # Grouping the document tree into Texinfo files. List of tuples 150 | # (source start file, target name, title, author, 151 | # dir menu entry, description, category) 152 | texinfo_documents = [ 153 | (master_doc, 'RemoteSettings', 'Remote Settings Documentation', 154 | author, 'RemoteSettings', 'One line description of project.', 155 | 'Miscellaneous'), 156 | ] 157 | -------------------------------------------------------------------------------- /docs/getting-started.rst: -------------------------------------------------------------------------------- 1 | .. _getting-started: 2 | 3 | Getting Started 4 | =============== 5 | 6 | We will help you to use Remote Settings in your application! 7 | 8 | .. _go-to-prod: 9 | 10 | Create a new official type of Remote Settings 11 | --------------------------------------------- 12 | 13 | Basically, you will have to go through these 3 steps: 14 | 15 | 1. Setup the `Mozilla VPN `_ 16 | 2. Design your data model (see below) and prepare the list of colleagues that will be allowed to review your data 17 | 3. Request the creation of your collection using `this Bugzilla ticket template `_ 18 | 19 | Once done, you will be able to login and edit your records on the Admin UIs: 20 | 21 | - https://settings-writer.prod.mozaws.net/v1/admin/ 22 | 23 | The records will then be publicly visible at ``__ 24 | 25 | Don't hesitate to contact us (``#delivery`` on Slack) if you're stuck or have questions about the process! 26 | 27 | Check out the :ref:`screencast to create, request review and approve changes `, or :ref:`our FAQ `! 28 | 29 | .. note:: 30 | 31 | In order to **try out changes in a real environment**, you can use the **STAGE** instance: 32 | 33 | - https://settings-writer.stage.mozaws.net/v1/admin/ (*Admin UI*) 34 | 35 | In order to switch Firefox from PROD to STAGE, use the `Remote Settings DevTools `_! 36 | 37 | The records will be publicly visible at ``__ 38 | 39 | .. note:: 40 | 41 | If you simply **want to play** with the stack or the API, the best way to get started is probably to use our :ref:`DEV server `, 42 | since it's accessible without VPN access. 43 | 44 | 45 | About your data 46 | --------------- 47 | 48 | Name your collection in lowercase with dashes (eg. ``public-list-suffix``, `examples `_). 49 | 50 | The Admin UI automatically builds forms based on some metadata for your collection, namely: 51 | 52 | - the list of fields to be displayed as the list columns (eg. ``title``, ``comment.author``) 53 | - a JSON schema that will be render as a form to create and edit your records (`see example `_) 54 | - whether you want to control the ID field or let the server assign it automatically 55 | - whether you want to be able to attach files on records 56 | 57 | .. note:: 58 | 59 | If your client code expects to find 0 or 1 record by looking up on a specific field, you should probably use that field as the record ID. ``RemoteSettings("cid").get({filters: {id: "a-value"}})`` will be instantaneous. 60 | 61 | By default, all records are made available to all users. If you want to control which users should have a particular entry, you can add a ``filter_expression`` field (see :ref:`target filters `). 62 | 63 | 64 | Records vs. Attachments? 65 | '''''''''''''''''''''''' 66 | 67 | Since the diff-based synchronization happens at the record level, it is recommended to keep your Remote Settings records small, especially if you update them often. 68 | 69 | It is important to design your data layout carefully, especially if: 70 | 71 | * you have too many records (eg. > 2000) 72 | * you have big amounts of data (eg. > 1MB) 73 | * your data cannot be easily broken into pieces 74 | * your updates are likely to overwrite most of the collection content 75 | 76 | Consider the following summary table: 77 | 78 | +-------------------------------------+--------------------------------------+-------------------------------------+ 79 | | Strategy | Pros | Cons | 80 | +-------------------------------------+--------------------------------------+-------------------------------------+ 81 | | Many small records | - Efficient sync | - Costly lookups in client | 82 | | | - Easier to review changes in Admin | - Updates potentially harder to | 83 | | | UI | automate | 84 | | | | | 85 | +-------------------------------------+--------------------------------------+-------------------------------------+ 86 | | Few big records | - Efficient lookups in client | - Harder to review changes within | 87 | | | | records in Admin UI | 88 | | | | - Memory usage in client | 89 | | | | | 90 | +-------------------------------------+--------------------------------------+-------------------------------------+ 91 | | Attachments | - No limit in size & format | - No partial update | 92 | | | | - Packaging attachments in release | 93 | | | | binary is feasible but tedious | 94 | | | | (source_) | 95 | | | | | 96 | +-------------------------------------+--------------------------------------+-------------------------------------+ 97 | 98 | .. _source: https://searchfox.org/mozilla-central/rev/dd042f25a8da58d565d199dcfebe4f34db64863c/taskcluster/docker/periodic-updates/scripts/periodic_file_updates.sh#309-324 99 | 100 | - See our :ref:`tutorial for file attachments ` 101 | 102 | 103 | .. _collection-manifests: 104 | 105 | Collection manifests 106 | -------------------- 107 | 108 | Both STAGE and PROD collections attributes and permissions are managed via YAML files in the `remote-settings-permissions `_ Github repository. 109 | 110 | If you want to accelerate the process of getting your collection deployed or adjust its schema, in DEV, STAGE or PROD, you can open a pull-request with the collection, and the definition of ``{collection}-editors`` and ``{collection}-reviewers`` groups. Check out the existing ones that were merged. 111 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. Remote Settings documentation master file, created by 2 | sphinx-quickstart on Mon Jul 2 22:53:10 2018. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | .. _home: 7 | 8 | Welcome to Remote Settings documentation! 9 | ========================================= 10 | 11 | Remote Settings is a Mozilla service that makes it easy to manage evergreen settings data in Firefox. A simple API is available in Firefox for accessing the synchronized data. 12 | 13 | .. toctree:: 14 | :maxdepth: 2 15 | 16 | introduction 17 | getting-started 18 | case-studies 19 | support 20 | target-filters 21 | screencasts 22 | tutorial-dev-server 23 | tutorial-local-server 24 | tutorial-multi-signoff 25 | tutorial-attachments 26 | tutorial-normandy-integration 27 | tutorial-dev-kinto-admin 28 | 29 | Indices and tables 30 | ================== 31 | 32 | * :ref:`genindex` 33 | * :ref:`modindex` 34 | * :ref:`search` 35 | -------------------------------------------------------------------------------- /docs/introduction.rst: -------------------------------------------------------------------------------- 1 | .. _introduction: 2 | 3 | What is Remote Settings? 4 | ======================== 5 | 6 | Basically, Remote Settings consists of two components: a remote server (REST API) powered by `Kinto `_ and a client (Gecko API). 7 | 8 | Everything is done via a collection of records that is kept in sync between the client local database and the remote data. 9 | 10 | .. note:: 11 | 12 | See also `The History of Remote Settings `_ 13 | 14 | 15 | Why is it better than building my own? 16 | -------------------------------------- 17 | 18 | Out of the box you get for free: 19 | 20 | - Multi-signoff 21 | - Syncing of data - real time push based updates 22 | - Content signing - your data is signed server side and verified on the client side transparently 23 | - File attachment support 24 | - Target filtering (JEXL a-la Normandy) 25 | - Telemetry 26 | 27 | 28 | What does the workflow look like? 29 | --------------------------------- 30 | 31 | Once your collection is setup, a typical workflow would be: 32 | 33 | 1. Connect to the UI on the VPN 34 | 2. Make some changes 35 | 3. Request a review (with an optional comment) 36 | 37 | The people designated as reviewers will receive a notification email. 38 | 39 | 4. As a reviewer, you can preview the changes in a real browser 40 | 5. Once you verified that the changes have the expected effects, you can approve (or reject) the changes from the Admin UI 41 | 6. Changes will then be pulled by every Firefox clients on their next synchronization 42 | 43 | 44 | What does the client API look like? 45 | ----------------------------------- 46 | 47 | On the client side, listen for changes via an event listener: 48 | 49 | .. code-block:: javascript 50 | 51 | const { RemoteSettings } = ChromeUtils.import("resource://services-settings/remote-settings.js", {}); 52 | 53 | RemoteSettings("my-collection") 54 | .on("sync", (e) => { 55 | const { created, updated, deleted } = e.data; 56 | /* 57 | updated == [ 58 | 59 | { 60 | old: {label: "Yahoo", enabled: true, weight: 10, id: "d0782d8d", last_modified: 1522764475905}, 61 | new: {label: "Google", enabled: true, weight: 20, id: "8883955f", last_modified: 1521539068414}, 62 | }, 63 | ] 64 | */ 65 | }); 66 | 67 | 68 | Or get the current list of local records: 69 | 70 | .. code-block:: javascript 71 | 72 | const records = await RemoteSettings("my-collection").get(); 73 | /* 74 | records == [ 75 | {label: "Yahoo", enabled: true, weight: 10, id: "d0782d8d", last_modified: 1522764475905}, 76 | {label: "Google", enabled: true, weight: 20, id: "8883955f", last_modified: 1521539068414}, 77 | {label: "Ecosia", enabled: false, weight: 5, id: "337c865d", last_modified: 1520527480321}, 78 | ] 79 | */ 80 | 81 | .. note:: 82 | 83 | * `Client API full reference `_ 84 | 85 | 86 | What does the server side API look like? 87 | ---------------------------------------- 88 | 89 | If you want, like our `Web UI `_, to rely on the REST API for your integration, the :ref:`multi-signoff tutorial ` gives a good overview. 90 | 91 | Basically, creating a record would look like this: 92 | 93 | .. code-block:: bash 94 | 95 | curl -X POST ${SERVER}/buckets/main-workspace/collections/${COLLECTION}/records \ 96 | -H 'Content-Type:application/json' \ 97 | -d "{\"data\": {\"property\": $i}}" \ 98 | -u us3r:p455w0rd 99 | 100 | Requesting review: 101 | 102 | .. code-block:: bash 103 | 104 | curl -X PATCH ${SERVER}/buckets/main-workspace/collections/${COLLECTION} \ 105 | -H 'Content-Type:application/json' \ 106 | -d '{"data": {"status": "to-review"}}' \ 107 | -u us3r:p455w0rd 108 | 109 | Approving changes (different user): 110 | 111 | .. code-block:: bash 112 | 113 | curl -X PATCH ${SERVER}/buckets/main-workspace/collections/${COLLECTION} \ 114 | -H 'Content-Type:application/json' \ 115 | -d '{"data": {"status": "to-sign"}}' \ 116 | -u another:p455w0rd 117 | 118 | And the record is now published: 119 | 120 | .. code-block:: bash 121 | 122 | curl ${SERVER}/buckets/main/collections/${COLLECTION}/records 123 | 124 | .. note:: 125 | 126 | * `Kinto REST API reference `_ 127 | * `Python client `_ 128 | * `JavaScript client `_ 129 | 130 | 131 | Awesome! How do I get started? 132 | ------------------------------ 133 | 134 | You'll find out :ref:`in the next chapter `! 135 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | docutils<0.18 2 | -------------------------------------------------------------------------------- /docs/screencasts.rst: -------------------------------------------------------------------------------- 1 | .. _screencasts: 2 | 3 | Screencasts 4 | =========== 5 | 6 | .. _screencasts-modify-request-review: 7 | 8 | Modify records and request review 9 | --------------------------------- 10 | 11 | #. Login as an editor 12 | #. Make some changes in the collection 13 | #. Request review 14 | 15 | .. raw:: html 16 | 17 | 20 | 21 | 22 | .. _screencasts-approve-review: 23 | 24 | Approve changes 25 | --------------- 26 | 27 | #. Login as a review 28 | #. Review the changes 29 | #. Approve the review 30 | 31 | .. raw:: html 32 | 33 | 36 | 37 | 38 | .. _screencasts-fetch-local-settings: 39 | 40 | Fetch settings from local server 41 | -------------------------------- 42 | 43 | #. Change the necessary settings 44 | #. Register a callback for the ``sync`` event 45 | #. Trigger a synchronization manually 46 | 47 | .. raw:: html 48 | 49 | 52 | -------------------------------------------------------------------------------- /docs/screencasts/approve-review.vtt: -------------------------------------------------------------------------------- 1 | WEBVTT 2 | 3 | 00:00:00.000 --> 00:00:05.000 4 | Login on the Remote Settings admin 5 | 6 | 00:00:09.000 --> 00:00:12.000 7 | Go to the main-workspace bucket 8 | 9 | 00:00:13.000 --> 00:00:18.000 10 | And open your collection 11 | 12 | 00:00:19.000 --> 00:00:21.000 13 | You can inspect the changes 14 | 15 | 00:00:22.000 --> 00:00:26.000 16 | Each operation with its author 17 | 18 | 00:00:47.000 --> 00:00:50.000 19 | If you are happy with the changes 20 | 21 | 00:00:51.000 --> 00:00:55.000 22 | Click approve! 23 | 24 | 00:00:55.000 --> 00:01:01.000 25 | The changes are now published! 26 | -------------------------------------------------------------------------------- /docs/screencasts/approve-review.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla-services/kinto-dist/1511dbbb76ee69970bbd29eb56b7811974ce2ba1/docs/screencasts/approve-review.webm -------------------------------------------------------------------------------- /docs/screencasts/fetch-local-settings.vtt: -------------------------------------------------------------------------------- 1 | WEBVTT 2 | 3 | 00:00:02.000 --> 00:00:06.000 4 | Go into about:config 5 | 6 | 00:00:10.000 --> 00:00:15.000 7 | Put the URL of your server 8 | 9 | 00:00:16.000 --> 00:00:21.000 10 | It can be the dev server or a local one 11 | 12 | 00:00:22.000 --> 00:00:25.000 13 | Create the boolean preference 14 | 15 | 00:00:25.000 --> 00:00:30.000 16 | To disable signature verification 17 | 18 | 00:00:33.000 --> 00:00:38.000 19 | Now open the browser internal toolbox 20 | 21 | 00:00:39.000 --> 00:00:45.000 22 | To execute Javascript in the browser context 23 | 24 | 00:00:52.000 --> 00:00:57.000 25 | Import the RemoteSettings module 26 | 27 | 00:01:00.000 --> 00:01:05.000 28 | Register a callback on the "sync" event 29 | 30 | 00:01:06.000 --> 00:01:11.000 31 | And trigger a synchronization manually 32 | 33 | 00:01:12.000 --> 00:01:18.000 34 | The records are logged as instructed! 35 | 36 | 00:01:19.000 --> 00:01:26.000 37 | You can now play with the RemoteSettings API... 38 | -------------------------------------------------------------------------------- /docs/screencasts/fetch-local-settings.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla-services/kinto-dist/1511dbbb76ee69970bbd29eb56b7811974ce2ba1/docs/screencasts/fetch-local-settings.webm -------------------------------------------------------------------------------- /docs/screencasts/modify-request-review.vtt: -------------------------------------------------------------------------------- 1 | WEBVTT 2 | 3 | 00:00:00.000 --> 00:00:05.000 4 | Login on the Remote Settings admin 5 | 6 | 00:00:09.000 --> 00:00:13.000 7 | Go to the main-workspace bucket 8 | 9 | 00:00:16.000 --> 00:00:20.000 10 | And open your collection 11 | 12 | 00:00:21.000 --> 00:00:24.000 13 | The three possible review states are shown on top 14 | 15 | 00:00:27.000 --> 00:00:30.000 16 | We will create a record 17 | 18 | 00:00:30.000 --> 00:00:35.000 19 | with arbritary data 20 | 21 | 00:00:40.000 --> 00:00:46.000 22 | We will delete another also 23 | 24 | 00:00:50.000 --> 00:00:54.000 25 | Now that we are happy with the changes 26 | 27 | 00:00:55.000 --> 00:00:57.000 28 | We will request a review 29 | 30 | 00:01:09.000 --> 00:01:10.000 31 | We're done! -------------------------------------------------------------------------------- /docs/screencasts/modify-request-review.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla-services/kinto-dist/1511dbbb76ee69970bbd29eb56b7811974ce2ba1/docs/screencasts/modify-request-review.webm -------------------------------------------------------------------------------- /docs/support.rst: -------------------------------------------------------------------------------- 1 | Support 2 | ======= 3 | 4 | .. _troubleshooting: 5 | 6 | Troubleshooting 7 | --------------- 8 | 9 | * Open a `Server Side ticket `_ (Admin, permissions etc.) 10 | * Open a `Client Side ticket `_ (Gecko API related) 11 | 12 | I cannot access my collection 13 | ''''''''''''''''''''''''''''' 14 | 15 | * Check that you can ping the server on the VPN 16 | - Make sure you were added in the appropriate VPN group (see :ref:`getting-started`) 17 | - Join ``#engops`` on Slack to troubleshoot. 18 | * Check that you can login on the Admin UI 19 | * In the ``main-workspace`` bucket, check that you can create records in your collection (eg. ``main-workspace/tippytop``) 20 | 21 | I approved the changes, but still don't see them 22 | '''''''''''''''''''''''''''''''''''''''''''''''' 23 | 24 | * A CDN serves as a cache, only push notifications bust the cache efficiently 25 | * Check that your data is visible on the source server: eg. https://settings.prod.mozaws.net/v1/buckets/main/collections/cfr/changeset?_expected=something-random-42 26 | 27 | 28 | .. _faq: 29 | 30 | Frequently Asked Questions 31 | -------------------------- 32 | 33 | How often the synchronization happens? 34 | '''''''''''''''''''''''''''''''''''''' 35 | 36 | Synchronizations can be within 10 minutes of the change or in 24 hours. 37 | 38 | There are two triggers for synchronization: a push notification and a polling check. Every five minutes a server side process checks for changes. If any changes are found a push notification will be sent and online clients will check in for updates. Clients that are offline or did not receive the push notification will either catch-up on next startup or automatically poll for changes every 24 hours. 39 | 40 | 41 | What is the lag on the CDN? 42 | ''''''''''''''''''''''''''' 43 | 44 | The client uses the ``/v1/buckets/main/collections/{cid}/changeset`` endpoint, which requires a ``?_expected={}`` query parameter. Since the Push notification contains the latest change timestamp, the first clients to pull the changes from the CDN will bust its cache. 45 | 46 | When using the ``/records`` endpoint manually, without any query parameters, the CDN lag can be much higher (typically 1H). 47 | 48 | 49 | How do I setup Firefox to pull data from STAGE? 50 | ''''''''''''''''''''''''''''''''''''''''''''''' 51 | 52 | The **recommended way** to setup Firefox to pull data from STAGE is to use the `Remote Settings DevTools `_ extension: switch the environment in the configuration section and click the *Sync* button. 53 | 54 | Alternatively, in order to point STAGE before on fresh profiles for example, you can set the `appropriate preferences `_ in a ``user.js`` file: 55 | 56 | :: 57 | 58 | user_pref("services.settings.server", "https://settings.stage.mozaws.net/v1"); 59 | user_pref("dom.push.serverURL", "https://autopush.stage.mozaws.net"); 60 | user_pref("security.content.signature.root_hash", "3C:01:44:6A:BE:90:36:CE:A9:A0:9A:CA:A3:A5:20:AC:62:8F:20:A7:AE:32:CE:86:1C:B2:EF:B7:0F:A0:C7:45"); 61 | user_pref("services.settings.load_dump", false); 62 | 63 | See `developer docs `_ to trigger a synchronization manually. 64 | 65 | 66 | How do I preview the changes before approving? 67 | '''''''''''''''''''''''''''''''''''''''''''''' 68 | 69 | The recommended way to setup Firefox to pull data from the preview collection is to use the `Remote Settings DevTools `_ extension: switch the environment to *Preview* and click the *Sync* button. 70 | 71 | Alternatively, you can change the ``services.settings.default_bucket`` preference to ``main-preview``, and trigger a synchronization manually. 72 | 73 | 74 | How do I preview the changes before requesting review? 75 | '''''''''''''''''''''''''''''''''''''''''''''''''''''' 76 | 77 | Currently, this is not possible. 78 | 79 | Possible workarounds: 80 | 81 | - use a :ref:`local server ` or the :ref:`DEV server ` 82 | - request review, preview changes, fix up, request review again 83 | 84 | 85 | How do I trigger a synchronization manually? 86 | '''''''''''''''''''''''''''''''''''''''''''' 87 | 88 | See `developer docs `_. 89 | 90 | 91 | How do I define default data for new profiles? 92 | '''''''''''''''''''''''''''''''''''''''''''''' 93 | 94 | See `developer docs about initial data `_. 95 | 96 | 97 | How do I automate the publication of records? (one shot) 98 | '''''''''''''''''''''''''''''''''''''''''''''''''''''''' 99 | 100 | The Remote Settings server is a REST API (namely a `Kinto instance `_). Records can be created in batches, and as seen in the :ref:`multi signoff tutorial ` reviews can be requested and approved using ``PATCH`` requests. 101 | 102 | If it is a one time run, then you can run the script as if it was you: 103 | 104 | 1. Authenticate on the Admin UI 105 | 2. On the top right corner, use the 📋 icon to copy the authentication string (eg. ``Bearer r43yt0956u0yj1``) 106 | 3. Use this header in your ``cURL`` commands (or Python/JS/Rust clients etc.) 107 | 108 | .. code-block:: bash 109 | 110 | curl 'https://settings-writer.stage.mozaws.net/v1/' \ 111 | -H 'Authorization: Bearer r43yt0956u0yj1' 112 | 113 | 114 | How do I automate the publication of records? (forever) 115 | ''''''''''''''''''''''''''''''''''''''''''''''''''''''' 116 | 117 | If the automation is meant to last (eg. cronjob, lambda, server to server) then the procedure would look like this: 118 | 119 | 1. `Request a dedicated Kinto internal account `_ to be created for you (eg. ``password-rules-publisher``). Secret password should remain in a vault and managed by OPs. 120 | 2. Write a script that: 121 | 122 | 1. takes the server and credentials as ENV variables (eg. ``SERVER=prod AUTH=password-rules-publisher:s3cr3t``); 123 | 2. compares your source of truth with the collection records. Exit early if no change; 124 | 3. performs all deletions/updates/creations; 125 | 4. patches the collection metadata in order to request review (see :ref:`multi-signoff tutorial `); 126 | 127 | 3. Request the OPs team to setup a cronjob in order to run your script (`request example `_) 128 | 129 | We recommend the use of `kinto-http.py `_ (`script exanple `_), but Node JS is also possible (`HIBP example `_). 130 | 131 | .. note:: 132 | 133 | Even if publication of records is done by a script, a human will have to approve the changes manually. 134 | Generally speaking, disabling dual sign-off is possible, but only in **very** specific cases. 135 | 136 | If you want to skip manual approval, request a review of your design by the cloud operations security team. 137 | 138 | 139 | .. _duplicate_data: 140 | 141 | Once data is ready in DEV or STAGE, how do we go live in PROD? 142 | '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' 143 | 144 | Stage and prod are aligned in terms of setup, features and versions. 145 | 146 | Hence, once done in DEV or STAGE there is nothing specific / additional to do: you should be able to redo the same in PROD! 147 | 148 | 149 | If you have a lot of data that you want to duplicate from one instance to another, you can use `kinto-wizard `_ to dump and load records! 150 | 151 | .. code-block:: bash 152 | 153 | pip install --user kinto-wizard 154 | 155 | Dump the main records: 156 | 157 | .. code-block:: bash 158 | 159 | kinto-wizard dump --records --server https://settings.stage.mozaws.net/v1 --bucket=main --collection=top-sites > top-sites.yaml 160 | 161 | Open the ``.yaml`` file and rename the bucket name on top to ``main-workspace``. 162 | 163 | Login in the Remote Settings Admin and copy the authentication header (icon in the top bar), in order to use it in the ``--auth`` parameter of the ``kinto-wizard load`` command. 164 | 165 | .. code-block:: bash 166 | 167 | kinto-wizard load --server https://settings.prod.mozaws.net/v1 --auth="Bearer uLdb-Yafefe....2Hyl5_w" top-sites.yaml 168 | 169 | Requesting review can be done via the UI, :ref:`or the command-line `. 170 | 171 | 172 | How many records does it support? 173 | ''''''''''''''''''''''''''''''''' 174 | 175 | We already have use-cases that contain several hundreds of records, and it's totally fine. 176 | 177 | Nevertheless, if you have thousands of records that change very often, we should talk! Mostly in order to investigate the impact in terms of payload, bandwidth, signature verification etc. 178 | 179 | 180 | Are there any size restrictions for a single record, or all records in a collection? 181 | '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' 182 | 183 | Quotas were not enabled on the server. Therefore, technically you can create records with any size, and have as many as you want in the collection. 184 | 185 | **However**, beyond some reasonable size for the whole collection serialized as JSON, it is recommended using our :ref:`attachments feature `. 186 | 187 | Using attachments on records, you can publish data of any size (as JSON, gzipped, etc.). It gets published on S3 and the records only contain metadata about the remote file (including hash, useful for signature verification). 188 | 189 | 190 | Also does remote settings do any sort of compression for the records? 191 | ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' 192 | 193 | We are working on improving the handling of Gzip encoding for the attachments files (see `Bug 1339114 `_). 194 | 195 | But by default, Remote Settings does not try to be smart regarding compression. 196 | 197 | 198 | Is it possible to deliver remote settings to some users only? 199 | ''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' 200 | 201 | By default, settings are delivered to every user. 202 | 203 | You can add :ref:`JEXL filters on records ` to define targets. Every record will be downloaded but the list obtained with ``.get()`` will only contain entries that match. 204 | 205 | In order to limit the users that will download the records, you can check out our :ref:`dedicated tutorial `. 206 | 207 | 208 | How does the client choose the collections to synchronize? 209 | '''''''''''''''''''''''''''''''''''''''''''''''''''''''''' 210 | 211 | First, the client fetches the `list of published collections `_. 212 | 213 | Then, it synchronizes the collections that match one of the following: 214 | 215 | * it has an instantiated client — ie. a call to ``RemoteSettings("cid")`` was done earlier 216 | * some local data exists in the internal IndexedDB 217 | * a JSON dump was shipped in mozilla-central for this collection in ``services/settings/dumps/`` 218 | -------------------------------------------------------------------------------- /docs/tutorial-attachments.rst: -------------------------------------------------------------------------------- 1 | .. _tutorial-attachments: 2 | 3 | Work with Attachments 4 | ===================== 5 | 6 | Goals 7 | ----- 8 | 9 | * Publish large or binary content 10 | 11 | Prerequisites 12 | ------------- 13 | 14 | This guide assumes that you have already installed the following commands: 15 | 16 | - cURL 17 | - uuidgen 18 | - `jq `_ (*optional*) 19 | 20 | And that you are familiar with the Remote Settings API, at least on the dev server. 21 | 22 | We'll refer the running instance as ``$SERVER`` (eg. ``https://kinto.dev.mozaws.net/v1``). 23 | 24 | 25 | Introduction 26 | ------------ 27 | 28 | Files can be attached to records. When a record has a file attached to it, it has an ``attachment`` attribute, which contains the file related information (url, hash, size, mimetype, etc.). 29 | 30 | The Remote Settings client API is **not in charge** of downloading the remote files during synchronization. 31 | 32 | However, a `helper is available `_ on the client instance. 33 | 34 | During synchronization, only the records that changed are fetched. Depending on your implementation, attachments may have to be redownloaded completely even if only a few bytes were changed. 35 | 36 | 37 | Publish records with attachments 38 | -------------------------------- 39 | 40 | Files can be attached to existing records or records can be created when uploading the attachment. 41 | 42 | Suppose that we want to attach a file (``/home/mathieu/DAFSA.bin``) to the existing record ``bec3e95c-4d28-40c1-b486-76682962861f``: 43 | 44 | .. code-block:: bash 45 | 46 | BUCKET=main-workspace # (or just ``main`` in Dev) 47 | COLLECTION=public-suffix-list 48 | RECORD=bec3e95c-4d28-40c1-b486-76682962861f 49 | FILEPATH=/home/mathieu/DAFSA.bin 50 | 51 | curl -X POST ${SERVER}/buckets/${BUCKET}/collections/${COLLECTION}/records/${RECORD}/attachment \ 52 | -H 'Content-Type:multipart/form-data' \ 53 | -F attachment=@$FILEPATH \ 54 | -u user:pass 55 | 56 | And in order to create a record with both attributes and attachment, you'll have a generate a record id yourself. 57 | 58 | .. code-block:: bash 59 | 60 | RECORD=`uuidgen` 61 | 62 | curl -X POST ${SERVER}/buckets/${BUCKET}/collections/${COLLECTION}/records/${RECORD}/attachment \ 63 | -H 'Content-Type:multipart/form-data' \ 64 | -F attachment=@$FILEPATH \ 65 | -F 'data={"name": "Mac Fly", "age": 42}' \ 66 | -u user:pass 67 | 68 | .. note:: 69 | 70 | Since the dev server is open to anyone and runs on ``.mozaws.net``, we only allow certain types of files (images, audio, video, archives, ``.bin``, ``.json``, ``.gz``). 71 | 72 | If you need to upload files with a specific extension, let us know and we will add it to the whitelist (except ``.html``, ``.js``). 73 | 74 | 75 | Synchronize attachments 76 | ----------------------- 77 | 78 | Attachments can be downloaded when the ``"sync"`` event is received. 79 | 80 | .. code-block:: bash 81 | 82 | const client = RemoteSettings("a-key"); 83 | 84 | client.on("sync", async ({ data: { created, updated, deleted } }) => { 85 | const toDownload = created 86 | .concat(updated.map(u => u.new)) 87 | .filter(d => d.attachment); 88 | 89 | // Download attachments 90 | const fileURLs = await Promise.all( 91 | toDownload.map(entry => client.attachments.download(entry, { retries: 2 })) 92 | ); 93 | 94 | // Open downloaded files... 95 | const fileContents = await Promise.all( 96 | fileURLs.map(async url => { 97 | const r = await fetch(url); 98 | return r.blob(); 99 | }) 100 | ); 101 | }); 102 | 103 | See more details in `client documentation `_. 104 | 105 | 106 | About compression 107 | ----------------- 108 | 109 | The server does not compress the files. 110 | 111 | We plan to enable compression at the HTTP level (`Bug 1339114 `_) for when clients fetch the attachment using the ``Accept-Encoding: gzip`` request header. 112 | 113 | 114 | In the admin tool 115 | ----------------- 116 | 117 | The Remote Settings administration tool supports attachments as well. If a collection has a record schema and attachments are "enabled" for that collection, then editors will be able to upload attachments as part of editing records. 118 | 119 | The controls for attachments in a given collection are in the ``attachment`` field in the collection metadata (probably located in the `remote-settings-permissions `_ repo). The ``attachment`` attribute should be an object and it can have the following properties: 120 | 121 | - ``enabled``: boolean, true to enable attachments for this collection 122 | - ``required``: boolean, true if records in this collection must have an attachment 123 | -------------------------------------------------------------------------------- /docs/tutorial-dev-kinto-admin.rst: -------------------------------------------------------------------------------- 1 | .. _tutorial-dev-kinto-admin: 2 | 3 | Kinto Admin Development 4 | ======================= 5 | 6 | Goals 7 | ----- 8 | 9 | * Development environment for Kinto Admin 10 | * Connect to a local Remote Settings server 11 | * Contribute patches 12 | 13 | Prerequisites 14 | ------------- 15 | 16 | This guide assumes you have already installed and set up the following: 17 | 18 | - :ref:`tutorial-local-server` with multi-signoff 19 | 20 | Kinto Admin 21 | ----------- 22 | 23 | This part is very classic. We recommend `using NVM `_ in order to have recent versions of Node and NPM. 24 | 25 | .. code-block:: 26 | 27 | git clone git@github.com:Kinto/kinto-admin.git 28 | 29 | cd kinto-admin/ 30 | 31 | npm install 32 | npm start 33 | 34 | The UI should be accessible at http://0.0.0.0:3000 35 | 36 | Initialization script 37 | --------------------- 38 | 39 | Since the container is not configured with a real database by default, the content is flushed on each restart. 40 | 41 | This means you will have to populate data regularly. 42 | 43 | We'll create a small bash script ``init.sh`` with the following commands: 44 | 45 | .. code-block:: bash 46 | 47 | set -e 48 | set -o pipefail 49 | 50 | SERVER=http://localhost:8888/v1 51 | 52 | In the :ref:`prerequisite tutorial `, an ``admin`` user was created, as well as the basic buckets. Let's add that to our script: 53 | 54 | .. code-block:: bash 55 | 56 | curl -X PUT --fail --verbose ${SERVER}/accounts/admin \ 57 | -d '{"data": {"password": "s3cr3t"}}' \ 58 | -H 'Content-Type:application/json' 59 | 60 | BASIC_AUTH=admin:s3cr3t 61 | 62 | curl -X PUT --fail --verbose ${SERVER}/buckets/main-workspace \ 63 | -d '{"permissions": {"collection:create": ["system.Authenticated"], "group:create": ["system.Authenticated"]}}' \ 64 | -H 'Content-Type:application/json' \ 65 | -u $BASIC_AUTH 66 | 67 | curl -X PUT --fail --verbose ${SERVER}/buckets/main-preview \ 68 | -d '{"permissions": {"read": ["system.Everyone"]}}' \ 69 | -H 'Content-Type:application/json' \ 70 | -u $BASIC_AUTH 71 | 72 | curl -X PUT --fail --verbose ${SERVER}/buckets/main \ 73 | -d '{"permissions": {"read": ["system.Everyone"]}}' \ 74 | -H 'Content-Type:application/json' \ 75 | -u $BASIC_AUTH 76 | 77 | In order to play with multi-signoff, we'll create an ``editor`` and a ``reviewer`` accounts, put these lines in the ``init.sh`` script. 78 | 79 | .. code-block:: bash 80 | 81 | curl -X PUT --fail --verbose ${SERVER}/accounts/editor \ 82 | -d '{"data": {"password": "3d1t0r"}}' \ 83 | -H 'Content-Type:application/json' 84 | 85 | curl -X PUT --fail --verbose ${SERVER}/accounts/reviewer \ 86 | -d '{"data": {"password": "r3v13w3r"}}' \ 87 | -H 'Content-Type:application/json' 88 | 89 | 90 | Now create a collection, with a dedicated reviewer group: 91 | 92 | .. code-block:: bash 93 | 94 | curl -X PUT --fail --verbose ${SERVER}/buckets/main-workspace/collections/password-recipes \ 95 | -H 'Content-Type:application/json' \ 96 | -u editor:3d1t0r 97 | 98 | 99 | curl -X PATCH --fail --verbose $SERVER/buckets/main-workspace/groups/password-recipes-reviewers \ 100 | -H 'Content-Type:application/json-patch+json' \ 101 | -d '[{ "op": "add", "path": "/data/members/0", "value": "account:reviewer" }]' \ 102 | -u $BASIC_AUTH 103 | 104 | And at last, create some records, request review and approve changes. 105 | 106 | .. code-block:: bash 107 | 108 | for i in `seq 1 10`; do 109 | curl -X POST --fail --verbose ${SERVER}/buckets/main-workspace/collections/password-recipes/records \ 110 | -H 'Content-Type:application/json' \ 111 | -d "{\"data\": {\"property\": $i}}" \ 112 | -u editor:3d1t0r 113 | done 114 | 115 | curl -X PATCH --fail --verbose ${SERVER}/buckets/main-workspace/collections/password-recipes \ 116 | -H 'Content-Type:application/json' \ 117 | -d '{"data": {"status": "to-review"}}' \ 118 | -u editor:3d1t0r 119 | 120 | curl -X PATCH --fail --verbose ${SERVER}/buckets/main-workspace/collections/password-recipes \ 121 | -H 'Content-Type:application/json' \ 122 | -d '{"data": {"status": "to-sign"}}' \ 123 | -u reviewer:r3v13w3r 124 | 125 | echo "" 126 | echo "Done." 127 | 128 | 129 | With the service running locally, populating it should now just consist in running: 130 | 131 | .. code-block:: bash 132 | 133 | bash init.sh 134 | 135 | 136 | Connect Admin UI 137 | ---------------- 138 | 139 | On http://0.0.0.0:3000, when specifying http://0.0.0.0:8888/v1 in the *Server URL*, the option to login with *Kinto Account* should be shown. 140 | 141 | Using Container Tabs in Firefox, you can have one tab logged as ``editor`` with password ``3d1t0r`` and another one with ``reviewer`` and ``r3v13w3r``. 142 | 143 | 144 | Submit Patches 145 | -------------- 146 | 147 | Development `happens on Github `_. 148 | 149 | The process for a patch to reach Remote Settings is the following: 150 | 151 | * Get the patch merged on Kinto/kinto-admin 152 | * Release a new version of `kinto-admin on NPM `_ 153 | * Upgrade the kinto-admin plugin in Kinto (`example PR `_) 154 | * Release a new version of Kinto 155 | * Upgrade Kinto in `kinto-dist `_ 156 | * Release a new version of kinto-dist 157 | * STAGE is deployed automatically when a new tag of kinto-dist is published 158 | * Request a deployment in PROD on Bugzilla (`example `_) 159 | -------------------------------------------------------------------------------- /docs/tutorial-dev-server.rst: -------------------------------------------------------------------------------- 1 | .. _tutorial-dev-server: 2 | 3 | Use the Dev Server 4 | ================== 5 | 6 | Goals 7 | ----- 8 | 9 | * Discover Remote Settings without VPN 10 | * Create remote records 11 | * Pull them from the server 12 | 13 | 14 | Prerequisites 15 | ------------- 16 | 17 | This guide assumes you have already installed and set up the following: 18 | 19 | - cURL 20 | - `jq `_ (*optional*) 21 | 22 | 23 | Introduction 24 | ------------ 25 | 26 | The DEV server is different from STAGE and PROD: 27 | 28 | - it runs the latest version of `kinto-dist `_ 29 | - it is accessible without VPN 30 | - authenticated users are allowed to create collections 31 | - it does not support push notifications 32 | 33 | .. note:: 34 | 35 | Until November 2021, we were using the Kinto demo server, which had 36 | no sign-off and was flushed everyday. This DEV instance is now running 37 | the same configuration as STAGE/PROD. 38 | 39 | 40 | Obtain credentials 41 | ------------------ 42 | 43 | Until `Bug 1630651 `_ happens, the easiest way to obtain your OpenID credentials is to use the admin interface. 44 | 45 | 1. `Login on the Admin UI `_ using your LDAP identity 46 | 2. Copy the authentication header (📋 icon in the top bar) 47 | 3. Test your credentials with ``curl``. When reaching out the server root URL with this bearer token you should see a ``user`` entry whose ``id`` field is ``ldap:@mozilla.com``. 48 | 49 | .. code-block:: bash 50 | 51 | SERVER=https://settings.dev.mozaws.net/v1 52 | BEARER_TOKEN="Bearer uLdb-Yafefe....2Hyl5_w" 53 | 54 | curl -s ${SERVER}/ -H "Authorization:${BEARER_TOKEN}" | jq .user 55 | 56 | 57 | (optional) Create a collection 58 | ------------------------------ 59 | 60 | All PROD collections will be available, with the same permissions and groups memberships. 61 | 62 | Choose a name for your settings that makes sense for your use-case and is specific enough (eg. ``focus-search-engines``, not ``search``). 63 | 64 | .. code-block:: bash 65 | 66 | CID=focus-search-engines 67 | 68 | Using the REST API, we create a collection: 69 | 70 | .. code-block:: bash 71 | 72 | curl -X PUT ${SERVER}/buckets/main-workspace/collections/${CID} \ 73 | -H 'Content-Type:application/json' \ 74 | -H "Authorization:${BEARER_TOKEN}" 75 | 76 | Now that we created this collection, two groups should have been created automatically. Check their presence and content with: 77 | 78 | .. code-block:: bash 79 | 80 | curl -s ${SERVER}/buckets/main-workspace/groups/${CID}-editors | jq 81 | curl -s ${SERVER}/buckets/main-workspace/groups/${CID}-reviewers | jq 82 | 83 | We create a simple record for testing purposes: 84 | 85 | .. code-block:: bash 86 | 87 | curl -X POST ${SERVER}/buckets/main-workspace/collections/${CID}/records \ 88 | -d '{"data": {"title": "example"}}' \ 89 | -H 'Content-Type:application/json' \ 90 | -H "Authorization:${BEARER_TOKEN}" 91 | 92 | And request a review in order to trigger content signatures: 93 | 94 | .. code-block:: bash 95 | 96 | curl -X PATCH ${SERVER}/buckets/main-workspace/collections/${CID} \ 97 | -H 'Content-Type:application/json' \ 98 | -d '{"data": {"status": "to-review"}}' \ 99 | -H "Authorization:${BEARER_TOKEN}" 100 | 101 | At this point, the server part is ready: it contains a public **preview** collection with one record. You can fetch its content (records and signature) with: 102 | 103 | .. code-block:: bash 104 | 105 | curl ${SERVER}/buckets/main-preview/collections/${CID}/changeset?_expected=0 # arbitrary cache-bust value 106 | 107 | And it should be listed in the special endpoint that provides all collections timestamps: 108 | 109 | .. code-block:: bash 110 | 111 | curl ${SERVER}/buckets/monitor/collections/changes/records 112 | 113 | 114 | Prepare the client 115 | ------------------ 116 | 117 | Until `support for the DEV environment `_ is added to the `Remote Settings dev tools 118 | `_, we'll change the preferences manually. 119 | 120 | .. important:: 121 | 122 | This is a critical preference, you should use a dedicated Firefox profile for development. 123 | 124 | .. code-block:: javascript 125 | 126 | Services.prefs.setCharPref("services.settings.loglevel", "debug"); 127 | Services.prefs.setCharPref("services.settings.server", "https://settings.dev.mozaws.net/v1"); 128 | // Dev collections are signed with the STAGE infrastructure, use STAGE's hash: 129 | Services.prefs.setCharPref("security.content.signature.root_hash", "3C:01:44:6A:BE:90:36:CE:A9:A0:9A:CA:A3:A5:20:AC:62:8F:20:A7:AE:32:CE:86:1C:B2:EF:B7:0F:A0:C7:45"); 130 | // Prevent packaged dumps to interfere. 131 | Services.prefs.setBoolPref("services.settings.load_dump", false); 132 | // The changes are not approved yet, point the client to «preview» 133 | Services.prefs.setCharPref("services.settings.default_bucket", "main-preview"); 134 | 135 | From your code, or the browser console, register the new collection by listening to the ``sync`` event: 136 | 137 | .. code-block:: bash 138 | 139 | const { RemoteSettings } = ChromeUtils.import("resource://services-settings/remote-settings.js", {}); 140 | 141 | const client = RemoteSettings("your-collection-id"); 142 | 143 | client.on("sync", ({ data }) => { 144 | // Dump records titles to stdout 145 | data.current.forEach(r => dump(`${r.title}\n`)); 146 | }); 147 | 148 | 149 | Synchronize manually 150 | -------------------- 151 | 152 | Then force a synchronization manually with: 153 | 154 | .. code-block:: javascript 155 | 156 | await RemoteSettings.pollChanges(); 157 | 158 | .. seealso:: 159 | 160 | Check out :ref:`the dedicated screencast ` for this operation! 161 | 162 | 163 | Going further 164 | ------------- 165 | 166 | Now that your client can pull data from the server, you can proceed with more advanced stuff like: 167 | 168 | * `Login on the Admin UI `_ and browse your data 169 | * Create, modify, delete remote records on the server and check out the different ``sync`` event data attributes 170 | * Define a `JSON schema on your collection `_ to validate records and have forms in the Admin UI 171 | * Attach files to your records (see :ref:`tutorial `) 172 | * Read the multi signoff tutorial (see :ref:`tutorial `), to add a reviewer to your collection 173 | * Import the data from the STAGE/PROD collection into your DEV (see :ref:`usage of kinto-wizard `.) 174 | * If you feel ready, try out the STAGE environment with VPN access, running a :ref:`local server ` etc. 175 | 176 | 177 | Delete your collection 178 | ---------------------- 179 | 180 | .. code-block:: bash 181 | 182 | curl -X DELETE ${SERVER}/buckets/main-workspace/groups/${CID}-editors -H "Authorization:${BEARER_TOKEN}" 183 | curl -X DELETE ${SERVER}/buckets/main-workspace/groups/${CID}-reviewers -H "Authorization:${BEARER_TOKEN}" 184 | curl -X DELETE ${SERVER}/buckets/main-workspace/collections/${CID} -H "Authorization:${BEARER_TOKEN}" 185 | curl -X DELETE ${SERVER}/buckets/main-preview/collections/${CID} -H "Authorization:${BEARER_TOKEN}" 186 | curl -X DELETE ${SERVER}/buckets/main/collections/${CID} -H "Authorization:${BEARER_TOKEN}" 187 | -------------------------------------------------------------------------------- /docs/tutorial-local-server.rst: -------------------------------------------------------------------------------- 1 | .. _tutorial-local-server: 2 | 3 | Setup a Local Server 4 | ==================== 5 | 6 | Goals 7 | ----- 8 | 9 | * Run a local server 10 | * Pull data from it 11 | * Setup advanced features like multi signoff and signatures 12 | 13 | Prerequisites 14 | ------------- 15 | 16 | This guide assumes you have already installed and set up the following: 17 | 18 | - cURL 19 | - Docker 20 | 21 | Introduction 22 | ------------ 23 | 24 | There are several ways to run a local instance of Kinto, the underlying software of Remote Settings. 25 | 26 | We will use Docker for the sake of simplicity, but you may find more convenient to `install the Python package for example `_. 27 | 28 | 29 | Quick start 30 | ----------- 31 | 32 | We will run a local container with the minimal configuration. It should be enough to hack on your Remote Settings integration in the plane. 33 | However if your goal is to setup a local server that has the same signoff features as STAGE and PROD, you can continue into the configuration of the next section. 34 | 35 | Pull the Docker container: 36 | 37 | .. code-block:: bash 38 | 39 | docker pull mozilla/kinto-dist 40 | 41 | Create a configuration file ``server.ini`` with the following content: 42 | 43 | .. code-block:: ini 44 | 45 | [app:main] 46 | use = egg:kinto 47 | kinto.includes = kinto.plugins.admin 48 | kinto.plugins.accounts 49 | kinto.plugins.history 50 | kinto_changes 51 | kinto_attachment 52 | kinto_signer 53 | 54 | kinto.storage_backend = kinto.core.storage.memory 55 | kinto.storage_url = 56 | kinto.cache_backend = kinto.core.cache.memory 57 | kinto.cache_url = 58 | kinto.permission_backend = kinto.core.permission.memory 59 | kinto.permission_url = 60 | 61 | multiauth.policies = account 62 | multiauth.policy.account.use = kinto.plugins.accounts.authentication.AccountsAuthenticationPolicy 63 | kinto.userid_hmac_secret = 284461170acd78f0be0827ef514754937474d7c922191e4f78be5c1d232b38c4 64 | 65 | kinto.bucket_create_principals = system.Authenticated 66 | kinto.account_create_principals = system.Everyone 67 | kinto.account_write_principals = account:admin 68 | 69 | kinto.experimental_permissions_endpoint = true 70 | kinto.experimental_collection_schema_validation = true 71 | kinto.changes.resources = /buckets/main 72 | kinto.attachment.base_path = /tmp/attachments 73 | kinto.attachment.base_url = 74 | kinto.attachment.extra.base_url = http://localhost:8888/attachments 75 | kinto.attachment.folder = {bucket_id}/{collection_id} 76 | kinto.signer.resources = /buckets/main-workspace -> /buckets/main-preview -> /buckets/main 77 | kinto.signer.group_check_enabled = true 78 | kinto.signer.to_review_enabled = true 79 | kinto.signer.signer_backend = kinto_signer.signer.autograph 80 | kinto.signer.main-workspace.editors_group = {collection_id}-editors 81 | kinto.signer.main-workspace.reviewers_group = {collection_id}-reviewers 82 | kinto.signer.autograph.server_url = http://autograph-server:8000 83 | # Use credentials from https://github.com/mozilla-services/autograph/blob/5b4a473/autograph.yaml 84 | kinto.signer.autograph.hawk_id = kintodev 85 | kinto.signer.autograph.hawk_secret = 3isey64n25fim18chqgewirm6z2gwva1mas0eu71e9jtisdwv6bd 86 | 87 | [uwsgi] 88 | wsgi-file = app.wsgi 89 | enable-threads = true 90 | http-socket = 0.0.0.0:8888 91 | processes = 1 92 | master = true 93 | module = kinto 94 | harakiri = 120 95 | uid = kinto 96 | gid = kinto 97 | lazy = true 98 | lazy-apps = true 99 | single-interpreter = true 100 | buffer-size = 65535 101 | post-buffering = 65535 102 | static-map = /attachments=/tmp/attachments 103 | 104 | [loggers] 105 | keys = root, kinto 106 | 107 | [handlers] 108 | keys = console 109 | 110 | [formatters] 111 | keys = color 112 | 113 | [logger_root] 114 | level = INFO 115 | handlers = console 116 | 117 | [logger_kinto] 118 | level = DEBUG 119 | handlers = console 120 | qualname = kinto 121 | 122 | [handler_console] 123 | class = StreamHandler 124 | args = (sys.stderr,) 125 | level = NOTSET 126 | formatter = color 127 | 128 | [formatter_color] 129 | class = logging_color_formatter.ColorFormatter 130 | 131 | Create a local folder to receive the potential records attachments, Docker should have the permissions to write it: 132 | 133 | .. code-block:: bash 134 | 135 | mkdir --mode=777 attachments # world writable 136 | 137 | Now, we will run the container with the local configuration file and attachments folder mounted: 138 | 139 | .. code-block:: bash 140 | 141 | docker run -v `pwd`/server.ini:/etc/kinto.ini \ 142 | -v `pwd`/attachments:/tmp/attachments \ 143 | -e KINTO_INI=/etc/kinto.ini \ 144 | -p 8888:8888 \ 145 | mozilla/kinto-dist 146 | 147 | Your local instance should now be running at http://localhost:8888/v1 and the Admin UI available at http://localhost:8888/v1/admin/ 148 | 149 | 150 | Create basic objects 151 | '''''''''''''''''''' 152 | 153 | Let's create an ``admin`` user: 154 | 155 | .. code-block:: bash 156 | 157 | SERVER=http://localhost:8888/v1 158 | 159 | curl -X PUT ${SERVER}/accounts/admin \ 160 | -d '{"data": {"password": "s3cr3t"}}' \ 161 | -H 'Content-Type:application/json' 162 | 163 | And a ``main`` bucket, that is publicly readable and where authenticated users can create collections: 164 | 165 | .. code-block:: bash 166 | 167 | BASIC_AUTH=admin:s3cr3t 168 | 169 | curl -X PUT ${SERVER}/buckets/main \ 170 | -d '{"permissions": {"read": ["system.Everyone"], "collection:create": ["system.Authenticated"]}}' \ 171 | -H 'Content-Type:application/json' \ 172 | -u $BASIC_AUTH 173 | 174 | Now your local server will roughly behave like the dev server, you can jump to :ref:`the other tutorial ` in order to create remote records and synchronize locally. 175 | 176 | 177 | Configure multi-signoff 178 | ----------------------- 179 | 180 | In this section, we will have a local setup that enables multi-signoff and interacts with an `Autograph instance `_ in order to sign the data. 181 | 182 | First, run the Autograph container in a separate terminal: 183 | 184 | .. code-block:: bash 185 | 186 | docker run --rm --name autograph-server mozilla/autograph 187 | 188 | Autograph generates the ``x5u`` certificate chains on startup. In order to have them available to download from Firefox, let's copy them out of the container. 189 | 190 | First, look up the certificate filename using ``ls`` from within the container: 191 | 192 | .. code-block:: bash 193 | 194 | docker exec -i -t autograph-server '/bin/sh' 195 | $ ls /tmp/autograph/chains/remotesettingsdev/ 196 | remote-settings.content-signature.mozilla.org-20190503.chain 197 | $ ^C 198 | 199 | Then, copy the file from the container into the host: 200 | 201 | .. code-block:: bash 202 | 203 | mkdir -p /tmp/autograph/chains/remotesettingsdev/ 204 | docker cp autograph-server:/tmp/autograph/chains/remotesettingsdev/remote-settings.content-signature.mozilla.org-20190503.chain /tmp/autograph/chains/remotesettingsdev/ 205 | 206 | And run the Remote Settings server with a link to ``autograph-server`` container: 207 | 208 | .. code-block:: bash 209 | 210 | docker run -v `pwd`/server.ini:/etc/kinto.ini \ 211 | --link autograph-server:autograph-server \ 212 | -e KINTO_INI=/etc/kinto.ini \ 213 | -p 8888:8888 \ 214 | mozilla/kinto-dist 215 | 216 | Both containers should be connected, and the heartbeat endpoint should only return positive checks: 217 | 218 | .. code-block:: bash 219 | 220 | curl http://localhost:8888/v1/__heartbeat__ 221 | 222 | {"attachments":true, "cache":true, "permission":true, "signer": true, "storage":true} 223 | 224 | In the previous section we were using the ``main`` bucket directly, but in this setup, we will create the collections in the ``main-workspace`` bucket. Data will be automatically copied to the ``main-preview`` and ``main`` when requesting review and approving changes during the multi-signoff workflow. 225 | 226 | We'll use the same ``admin`` user: 227 | 228 | .. code-block:: bash 229 | 230 | curl -X PUT ${SERVER}/accounts/admin \ 231 | -d '{"data": {"password": "s3cr3t"}}' \ 232 | -H 'Content-Type:application/json' 233 | 234 | The ``main-workspace`` bucket allows any authenticated user to create collections (like on STAGE): 235 | 236 | .. code-block:: bash 237 | 238 | BASIC_AUTH=admin:s3cr3t 239 | 240 | curl -X PUT ${SERVER}/buckets/main-workspace \ 241 | -d '{"permissions": {"collection:create": ["system.Authenticated"], "group:create": ["system.Authenticated"]}}' \ 242 | -H 'Content-Type:application/json' \ 243 | -u $BASIC_AUTH 244 | 245 | The ``main-preview`` and ``main`` buckets are (re)initialized with read-only permissions: 246 | 247 | .. code-block:: bash 248 | 249 | curl -X PUT ${SERVER}/buckets/main-preview \ 250 | -d '{"permissions": {"read": ["system.Everyone"]}}' \ 251 | -H 'Content-Type:application/json' \ 252 | -u $BASIC_AUTH 253 | 254 | curl -X PUT ${SERVER}/buckets/main \ 255 | -d '{"permissions": {"read": ["system.Everyone"]}}' \ 256 | -H 'Content-Type:application/json' \ 257 | -u $BASIC_AUTH 258 | 259 | 260 | Prepare the client 261 | ------------------ 262 | 263 | The official way to point the client at another server is using the 264 | `Remote Settings dev tools 265 | `_. This 266 | tool can set the constellation of preferences necessary to operate 267 | correctly with your local server. 268 | 269 | .. seealso:: 270 | 271 | Check out :ref:`the dedicated screencast ` for this operation! 272 | 273 | What's next? 274 | ------------ 275 | 276 | - Create a collection in the ``main-workspace`` bucket 277 | - Assign users to editors and reviewers groups 278 | - Create records, request review, preview changes in the browser, approve the changes 279 | 280 | We cover that in :ref:`the dedicated multi-signoff tutorial `. 281 | -------------------------------------------------------------------------------- /docs/tutorial-multi-signoff.rst: -------------------------------------------------------------------------------- 1 | .. _tutorial-multi-signoff: 2 | 3 | Multi Signoff Workflow 4 | ====================== 5 | 6 | Goals 7 | ----- 8 | 9 | * Create some records 10 | * Request review 11 | * Preview changes in the browser 12 | * Approve/Decline the changes 13 | 14 | Prerequisites 15 | ------------- 16 | 17 | This guide assumes you have already installed and set up the following: 18 | 19 | - cURL 20 | - `jq `_ (*optional*) 21 | - :ref:`a local instance ` with multi signoff enabled 22 | or access/contact with two users that have permissions on STAGE/PROD 23 | 24 | We'll refer the running instance as ``$SERVER`` (eg. ``http://localhost:8888/v1`` or ``https://settings-writer.prod.mozaws.net/v1`` via the VPN). 25 | 26 | .. note:: 27 | 28 | If you need to give additional users access to your collection on STAGE/PROD you must edit the :ref:`collection manifest `. 29 | 30 | 31 | Introduction 32 | ------------ 33 | 34 | Multi signoff basically consists in 3 steps: 35 | 36 | #. Editors create/update/delete records on the ``main-workspace`` bucket 37 | #. Editors request review. The changes are automatically published in the ``main-preview`` bucket. 38 | #. Reviewers can configure their browser to preview the changes, and will approve (or decline) the review request. If approved, the changes are published in the ``main`` bucket. 39 | 40 | .. seealso:: 41 | 42 | If you're interested by workflows in the Admin UI, check out :ref:`the screencasts ` instead! 43 | 44 | 45 | Create some users 46 | ''''''''''''''''' 47 | 48 | If you're not using STAGE or PROD, we'll need to create some ``reviewer`` and ``editor`` accounts on the server. We'll reuse the ``admin`` superuser seen in previous tutorials. 49 | 50 | .. code-block:: bash 51 | 52 | curl -X PUT ${SERVER}/accounts/editor \ 53 | -d '{"data": {"password": "3d1t0r"}}' \ 54 | -H 'Content-Type:application/json' 55 | 56 | curl -X PUT ${SERVER}/accounts/reviewer \ 57 | -d '{"data": {"password": "r3v13w3r"}}' \ 58 | -H 'Content-Type:application/json' 59 | 60 | .. note:: 61 | 62 | In STAGE or PROD, humans authenticate via LDAP/OpenID Connect. But scripted/scheduled tasks can also have their dedicated account like above. `Ask us `_! 63 | 64 | 65 | Create a collection 66 | ------------------- 67 | 68 | The ``main-workspace`` bucket is where every edit happens. 69 | 70 | We first have to create a new collection (eg. ``password-recipes``). We'll use the ``editor`` account: 71 | 72 | .. code-block:: bash 73 | 74 | curl -X PUT ${SERVER}/buckets/main-workspace/collections/password-recipes \ 75 | -H 'Content-Type:application/json' \ 76 | -u editor:3d1t0r 77 | 78 | .. note:: 79 | 80 | In PROD, only administrators are allowed to create collections, and the :ref:`request is made via Bugzilla `. 81 | 82 | Now that we created this collection, two groups should have been created automatically. Check their presence and content with: 83 | 84 | .. code-block:: bash 85 | 86 | curl -s ${SERVER}/buckets/main-workspace/groups/password-recipes-editors | jq 87 | curl -s ${SERVER}/buckets/main-workspace/groups/password-recipes-reviewers | jq 88 | 89 | 90 | Manage reviewers 91 | ---------------- 92 | 93 | Only the members of the ``password-recipes-editors`` group are allowed to request reviews for the records changes. 94 | 95 | Only the members of the ``password-recipes-reviewers`` group are allowed to approve/decline them. 96 | 97 | We will add our ``reviewer`` user above to the ``password-recipes-reviewers`` group with this `JSON PATCH `_ request: 98 | 99 | .. code-block:: bash 100 | 101 | curl -X PATCH $SERVER/buckets/main-workspace/groups/password-recipes-reviewers \ 102 | -H 'Content-Type:application/json-patch+json' \ 103 | -d '[{ "op": "add", "path": "/data/members/0", "value": "account:reviewer" }]' \ 104 | -u editor:3d1t0r 105 | 106 | .. note:: 107 | 108 | When using internal accounts the, user IDs are prefixed with ``account:``. In STAGE/PROD, most user IDs look like this: ``ldap:jdoe@mozilla.com``. 109 | 110 | .. _tutorial-multi-signoff-request-review: 111 | 112 | Change records and request review 113 | --------------------------------- 114 | 115 | .. seealso:: 116 | 117 | Check out :ref:`the dedicated screencast ` for the equivalent with the Admin UI! 118 | 119 | Create (or update or delete) some records: 120 | 121 | .. code-block:: bash 122 | 123 | for i in `seq 1 10`; do 124 | curl -X POST ${SERVER}/buckets/main-workspace/collections/password-recipes/records \ 125 | -H 'Content-Type:application/json' \ 126 | -d "{\"data\": {\"property\": $i}}" \ 127 | -u editor:3d1t0r 128 | done 129 | 130 | And request review: 131 | 132 | .. code-block:: bash 133 | 134 | curl -X PATCH ${SERVER}/buckets/main-workspace/collections/password-recipes \ 135 | -H 'Content-Type:application/json' \ 136 | -d '{"data": {"status": "to-review"}}' \ 137 | -u editor:3d1t0r 138 | 139 | At this point the changes were published to the ``main-preview`` bucket, which is publicly readable: 140 | 141 | .. code-block:: bash 142 | 143 | curl -s ${SERVER}/buckets/main-preview/collections/password-recipes/records | jq 144 | 145 | The collection metadata now contain some signature information: 146 | 147 | .. code-block:: bash 148 | 149 | curl -s ${SERVER}/buckets/main-preview/collections/password-recipes | jq .data.signature 150 | 151 | The monitor/changes endpoint mentions the new collection ``password-recipes``: 152 | 153 | .. code-block:: bash 154 | 155 | curl -s ${SERVER}/buckets/monitor/collections/changes/records | jq 156 | 157 | 158 | Preview changes in the browser 159 | ------------------------------ 160 | 161 | .. important:: 162 | 163 | It is recommended to use the `Remote Settings DevTools `_ instead of changing preferences manually. 164 | 165 | The following preferences must be changed to the following values in ``about:config``: 166 | 167 | * ``services.settings.server`` : ``http://localhost:8888/v1`` 168 | * ``services.settings.default_bucket`` : ``main-preview`` 169 | 170 | From your code, or the browser console, register the new collection by listening to the ``sync`` event and trigger synchronization: 171 | 172 | .. code-block:: bash 173 | 174 | const { RemoteSettings } = ChromeUtils.import("resource://services-settings/remote-settings.js", {}); 175 | 176 | RemoteSettings("password-recipes").on("sync", ({ data }) => { 177 | data.current.forEach(r => dump(`${r.property}\n`)); 178 | }); 179 | 180 | Then force a synchronization manually with: 181 | 182 | .. code-block:: javascript 183 | 184 | RemoteSettings.pollChanges(); 185 | 186 | 187 | Approve/Decline changes 188 | ----------------------- 189 | 190 | .. seealso:: 191 | 192 | Check out :ref:`the dedicated screencast ` for the equivalent with the Admin UI! 193 | 194 | Using the ``reviewer`` authentication, change the collection status to either ``to-sign`` (approve) or ``work-in-progress`` (decline). 195 | 196 | .. code-block:: bash 197 | 198 | curl -X PATCH ${SERVER}/buckets/main-workspace/collections/password-recipes \ 199 | -H 'Content-Type:application/json' \ 200 | -d '{"data": {"status": "to-sign"}}' \ 201 | -u reviewer:r3v13w3r 202 | 203 | At this point the changes were published to the ``main`` bucket, which is publicly readable: 204 | 205 | .. code-block:: bash 206 | 207 | curl -s ${SERVER}/buckets/main/collections/password-recipes/records | jq 208 | 209 | The main collection metadata now contain some signature information: 210 | 211 | .. code-block:: bash 212 | 213 | curl -s ${SERVER}/buckets/main/collections/password-recipes | jq .data.signature 214 | 215 | In the browser, the following preferences must be reset to their default value: 216 | 217 | * ``services.settings.default_bucket`` : ``main`` 218 | -------------------------------------------------------------------------------- /docs/tutorial-normandy-integration.rst: -------------------------------------------------------------------------------- 1 | .. _tutorial-normandy-integration: 2 | 3 | Normandy Integration 4 | ==================== 5 | 6 | Goals 7 | ----- 8 | 9 | * Synchronize settings on certain clients only 10 | * Settings available only temporarily 11 | 12 | .. note:: 13 | 14 | This differs from :ref:`JEXL filters `, with which all records are synchronized but listing them locally returns a filtered set. 15 | 16 | 17 | Introduction 18 | ------------ 19 | 20 | When a collection is published on the server, it get pulled during synchronization if at least one the following conditions is met: 21 | 22 | * There is an instantiated client — ie. a call to ``RemoteSettings("cid")`` was done earlier 23 | * Some local data exists in the internal IndexedDB — ie. it was pulled once already 24 | * A JSON dump was shipped in mozilla-central for this collection — in ``services/settings/dumps/`` 25 | 26 | Basically, here we will leverage the fact that **if the client is never instantiated, then it will never get synchronized**, and thus will never have any local data. 27 | 28 | 29 | Disabled by default 30 | ------------------- 31 | 32 | Instantiating a client conditionnaly using a preference whose default value is ``false`` does the trick! By default, users won't synchronize this collection data. 33 | 34 | .. code-block:: javascript 35 | 36 | if (Services.prefs.getBoolPref("my-feature-pref", false)) { 37 | const client = RemoteSettings("cid"); 38 | const records = await client.get(); 39 | } 40 | 41 | 42 | Pref Flip 43 | --------- 44 | 45 | Using `Normandy preference experiments `_, you can flip the above preference to ``true`` for a sub-population of users, or temporarily etc. (using JEXL filters BTW). 46 | 47 | When the experiment will be *enabled* on the targeted users, the client will be instantiated and the synchronization of the collection data will take place. 48 | 49 | 50 | Clean-up 51 | -------- 52 | 53 | Once the experiment is switched back to *disabled*, the local data should be deleted. We will use a preference observer to detect that the preference is switched back to ``false``: 54 | 55 | .. code-block:: javascript 56 | 57 | Services.prefs.addObserver("my-feature-pref", { 58 | async observe(aSubject, aTopic, aData) { 59 | if (!Services.prefs.getBoolPref(aData)) { 60 | // Pref was switched to false, clean-up local IndexedDB data. 61 | const collection = await RemoteSetttings("cid").openCollection(); 62 | await collection.clear(); 63 | } 64 | } 65 | }); 66 | 67 | 68 | You can also open a ticket to request the deletion of the collection from the server. 69 | -------------------------------------------------------------------------------- /kinto-remote-settings/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=42", 4 | "wheel" 5 | ] 6 | build-backend = "setuptools.build_meta" 7 | -------------------------------------------------------------------------------- /kinto-remote-settings/setup.cfg: -------------------------------------------------------------------------------- 1 | [zest.releaser] 2 | create-wheel = yes 3 | 4 | [wheel] 5 | universal = 1 6 | 7 | [bdist_wheel] 8 | universal=1 9 | -------------------------------------------------------------------------------- /kinto-remote-settings/setup.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import setuptools 4 | 5 | 6 | path = Path(__file__).parent / "../VERSION" 7 | version = path.read_text().strip() 8 | 9 | INSTALL_REQUIRES = [ 10 | "kinto", 11 | "canonicaljson-rs", 12 | "ecdsa", 13 | "requests_hawk", 14 | ] 15 | 16 | setuptools.setup( 17 | name="kinto_remote_settings", 18 | version=version, 19 | package_dir={"": "src"}, 20 | packages=setuptools.find_packages(where="src"), 21 | install_requires=INSTALL_REQUIRES, 22 | ) 23 | -------------------------------------------------------------------------------- /kinto-remote-settings/src/kinto_remote_settings/__init__.py: -------------------------------------------------------------------------------- 1 | import pkg_resources 2 | 3 | 4 | __version__ = pkg_resources.get_distribution("kinto_remote_settings").version 5 | 6 | 7 | def includeme(config): 8 | config.include("kinto_remote_settings.changes") 9 | config.include("kinto_remote_settings.signer") 10 | -------------------------------------------------------------------------------- /kinto-remote-settings/src/kinto_remote_settings/changes/__init__.py: -------------------------------------------------------------------------------- 1 | from pyramid.settings import aslist 2 | 3 | from .. import __version__ 4 | 5 | 6 | MONITOR_BUCKET = "monitor" 7 | MONITOR_BUCKET_PATH = "/buckets/{}".format(MONITOR_BUCKET) 8 | CHANGES_COLLECTION = "changes" 9 | CHANGES_COLLECTION_PATH = "{}/collections/{}".format( 10 | MONITOR_BUCKET_PATH, CHANGES_COLLECTION 11 | ) 12 | CHANGES_RECORDS_PATH = "{}/records".format(CHANGES_COLLECTION_PATH) 13 | CHANGESET_PATH = "/buckets/{bid}/collections/{cid}/changeset" 14 | 15 | 16 | def includeme(config): 17 | settings = config.get_settings() 18 | collections = settings.get("changes.resources", []) 19 | 20 | config.add_api_capability( 21 | "changes", 22 | description="Track modifications of records in Kinto and store" 23 | " the collection timestamps into a specific bucket" 24 | " and collection.", 25 | url="http://kinto.readthedocs.io/en/latest/tutorials/" 26 | "synchronisation.html#polling-for-remote-changes", 27 | version=__version__, 28 | collections=aslist(collections), 29 | ) 30 | 31 | config.scan("kinto_remote_settings.changes.views") 32 | -------------------------------------------------------------------------------- /kinto-remote-settings/src/kinto_remote_settings/changes/utils.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | from uuid import UUID 3 | 4 | from kinto.core import utils as core_utils 5 | from pyramid.settings import aslist 6 | 7 | 8 | def monitored_collections(registry): 9 | storage = registry.storage 10 | resources_uri = aslist(registry.settings.get("changes.resources", "")) 11 | collections = [] 12 | 13 | for resource_uri in resources_uri: 14 | resource_name, matchdict = core_utils.view_lookup_registry( 15 | registry, resource_uri 16 | ) 17 | if resource_name == "bucket": 18 | # Every collections in this bucket. 19 | result = storage.list_all( 20 | resource_name="collection", parent_id=resource_uri 21 | ) 22 | collections.extend([(matchdict["id"], obj["id"]) for obj in result]) 23 | 24 | elif resource_name == "collection": 25 | collections.append((matchdict["bucket_id"], matchdict["id"])) 26 | 27 | return collections 28 | 29 | 30 | def changes_object(request, bucket_id, collection_id, timestamp): 31 | """Generate an object for /buckets/monitor/collections/changes.""" 32 | http_host = request.registry.settings.get("http_host") or "" 33 | collection_uri = core_utils.instance_uri( 34 | request, "collection", bucket_id=bucket_id, id=collection_id 35 | ) 36 | uniqueid = http_host + collection_uri 37 | identifier = hashlib.md5(uniqueid.encode("utf-8")).hexdigest() 38 | entry_id = str(UUID(identifier)) 39 | 40 | return dict( 41 | id=entry_id, 42 | last_modified=timestamp, 43 | bucket=bucket_id, 44 | collection=collection_id, 45 | host=http_host, 46 | ) 47 | -------------------------------------------------------------------------------- /kinto-remote-settings/src/kinto_remote_settings/signer/backends/__init__.py: -------------------------------------------------------------------------------- 1 | from kinto import logger 2 | 3 | 4 | def heartbeat(request): 5 | """Test that signer is operationnal. 6 | 7 | :param request: current request object 8 | :type request: :class:`~pyramid:pyramid.request.Request` 9 | :returns: ``True`` is everything is ok, ``False`` otherwise. 10 | :rtype: bool 11 | """ 12 | for signer in request.registry.signers.values(): 13 | try: 14 | signer.sign("This is a heartbeat test.") 15 | except Exception as e: 16 | logger.exception(e) 17 | return False 18 | return True 19 | -------------------------------------------------------------------------------- /kinto-remote-settings/src/kinto_remote_settings/signer/backends/autograph.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import warnings 3 | from urllib.parse import urljoin 4 | 5 | import requests 6 | from kinto import logger 7 | from requests_hawk import HawkAuth 8 | 9 | from ..utils import get_first_matching_setting 10 | from .base import SignerBase 11 | 12 | 13 | SIGNATURE_FIELDS = ["signature", "x5u"] 14 | EXTRA_SIGNATURE_FIELDS = ["mode", "public_key", "type", "signer_id", "ref"] 15 | 16 | 17 | class AutographSigner(SignerBase): 18 | def __init__(self, server_url, hawk_id, hawk_secret): 19 | self.server_url = server_url 20 | self.auth = HawkAuth(id=hawk_id, key=hawk_secret) 21 | 22 | def sign(self, payload): 23 | if isinstance(payload, str): # pragma: nocover 24 | payload = payload.encode("utf-8") 25 | 26 | b64_payload = base64.b64encode(payload) 27 | url = urljoin(self.server_url, "/sign/data") 28 | resp = requests.post( 29 | url, auth=self.auth, json=[{"input": b64_payload.decode("utf-8")}] 30 | ) 31 | resp.raise_for_status() 32 | signature_bundle = resp.json()[0] 33 | 34 | # Critical fields must be present, will raise if missing. 35 | infos = {field: signature_bundle[field] for field in SIGNATURE_FIELDS} 36 | # Other fields are returned and will be stored as part of the signature. 37 | # but client won't break if they are missing, so don't raise. 38 | infos.update( 39 | **{ 40 | field: signature_bundle[field] 41 | for field in EXTRA_SIGNATURE_FIELDS 42 | if field in signature_bundle 43 | } 44 | ) 45 | logger.info( 46 | "Obtained %s response from Autograph %s" 47 | % (resp.status_code, signature_bundle["ref"]) 48 | ) 49 | return infos 50 | 51 | 52 | def load_from_settings(settings, prefix="", *, prefixes=None): 53 | if prefixes is None: 54 | prefixes = [prefix] 55 | 56 | if prefix != "": 57 | message = ( 58 | "signer.load_from_settings `prefix` parameter is deprecated, please " 59 | "use `prefixes` instead." 60 | ) 61 | warnings.warn(message, DeprecationWarning) 62 | 63 | return AutographSigner( 64 | server_url=get_first_matching_setting( 65 | "autograph.server_url", settings, prefixes 66 | ), 67 | hawk_id=get_first_matching_setting("autograph.hawk_id", settings, prefixes), 68 | hawk_secret=get_first_matching_setting( 69 | "autograph.hawk_secret", settings, prefixes 70 | ), 71 | ) 72 | -------------------------------------------------------------------------------- /kinto-remote-settings/src/kinto_remote_settings/signer/backends/base.py: -------------------------------------------------------------------------------- 1 | class SignerBase(object): 2 | def sign(self, payload): 3 | """ 4 | Signs the specified `payload` and returns the signature metadata. 5 | 6 | :returns: A mapping with every attributes about the signature 7 | (e.g. "signature", "hash_algorithm", "signature_encoding"...) 8 | :rtype: dict 9 | """ 10 | raise NotImplementedError 11 | -------------------------------------------------------------------------------- /kinto-remote-settings/src/kinto_remote_settings/signer/backends/exceptions.py: -------------------------------------------------------------------------------- 1 | class BadSignatureError(Exception): 2 | pass 3 | -------------------------------------------------------------------------------- /kinto-remote-settings/src/kinto_remote_settings/signer/backends/local_ecdsa.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import hashlib 3 | import warnings 4 | 5 | import ecdsa 6 | from ecdsa import NIST384p, SigningKey, VerifyingKey 7 | 8 | from ..utils import get_first_matching_setting 9 | from .base import SignerBase 10 | from .exceptions import BadSignatureError 11 | 12 | 13 | # Autograph uses this prefix prior to signing. 14 | SIGN_PREFIX = b"Content-Signature:\x00" 15 | 16 | 17 | class ECDSASigner(SignerBase): 18 | def __init__(self, private_key=None, public_key=None): 19 | if private_key is None and public_key is None: 20 | msg = "Please, specify either a private_key or public_key " "location." 21 | raise ValueError(msg) 22 | self.private_key = private_key 23 | self.public_key = public_key 24 | 25 | @classmethod 26 | def generate_keypair(cls): 27 | sk = SigningKey.generate(curve=NIST384p) 28 | vk = sk.get_verifying_key() 29 | return sk.to_pem(), vk.to_pem() 30 | 31 | def load_private_key(self): 32 | if self.private_key is None: 33 | msg = "Please, specify the private_key location." 34 | raise ValueError(msg) 35 | 36 | with open(self.private_key, "rb") as key_file: 37 | return SigningKey.from_pem(key_file.read()) 38 | 39 | def load_public_key(self): 40 | # Check settings validity 41 | if self.private_key: 42 | private_key = self.load_private_key() 43 | return private_key.get_verifying_key() 44 | elif self.public_key: 45 | with open(self.public_key, "rb") as key_file: 46 | return VerifyingKey.from_pem(key_file.read()) 47 | 48 | def sign(self, payload): 49 | if isinstance(payload, str): # pragma: nocover 50 | payload = payload.encode("utf-8") 51 | 52 | payload = SIGN_PREFIX + payload 53 | private_key = self.load_private_key() 54 | signature = private_key.sign( 55 | payload, hashfunc=hashlib.sha384, sigencode=ecdsa.util.sigencode_string 56 | ) 57 | x5u = "" 58 | enc_signature = base64.urlsafe_b64encode(signature).decode("utf-8") 59 | return {"signature": enc_signature, "x5u": x5u, "mode": "p384ecdsa"} 60 | 61 | def verify(self, payload, signature_bundle): 62 | if isinstance(payload, str): # pragma: nocover 63 | payload = payload.encode("utf-8") 64 | 65 | payload = SIGN_PREFIX + payload 66 | signature = signature_bundle["signature"] 67 | if isinstance(signature, str): # pragma: nocover 68 | signature = signature.encode("utf-8") 69 | 70 | signature_bytes = base64.urlsafe_b64decode(signature) 71 | 72 | public_key = self.load_public_key() 73 | try: 74 | public_key.verify( 75 | signature_bytes, 76 | payload, 77 | hashfunc=hashlib.sha384, 78 | sigdecode=ecdsa.util.sigdecode_string, 79 | ) 80 | except Exception as e: 81 | raise BadSignatureError(e) 82 | 83 | 84 | def load_from_settings(settings, prefix="", *, prefixes=None): 85 | if prefixes is None: 86 | prefixes = [prefix] 87 | 88 | if prefix != "": 89 | message = ( 90 | "signer.load_from_settings `prefix` parameter is deprecated, please " 91 | "use `prefixes` instead." 92 | ) 93 | warnings.warn(message, DeprecationWarning) 94 | 95 | private_key = get_first_matching_setting("ecdsa.private_key", settings, prefixes) 96 | public_key = get_first_matching_setting("ecdsa.public_key", settings, prefixes) 97 | try: 98 | return ECDSASigner(private_key=private_key, public_key=public_key) 99 | except ValueError: 100 | msg = ( 101 | "Please specify either kinto.signer.ecdsa.private_key or " 102 | "kinto.signer.ecdsa.public_key in the settings." 103 | ) 104 | raise ValueError(msg) 105 | -------------------------------------------------------------------------------- /kinto-remote-settings/src/kinto_remote_settings/signer/events.py: -------------------------------------------------------------------------------- 1 | class BaseEvent(object): 2 | def __init__(self, request, payload, impacted_objects, resource, original_event): 3 | self.request = request 4 | self.payload = payload 5 | self.impacted_objects = impacted_objects 6 | self.resource = resource 7 | self.original_event = original_event 8 | 9 | @property 10 | def impacted_records(self): 11 | return self.impacted_objects 12 | 13 | 14 | class ReviewRequested(BaseEvent): 15 | def __init__(self, changes_count, comment, **kwargs): 16 | super().__init__(**kwargs) 17 | self.comment = comment 18 | self.changes_count = changes_count 19 | self.payload["comment"] = comment 20 | self.payload["changes_count"] = changes_count 21 | 22 | 23 | class ReviewRejected(BaseEvent): 24 | def __init__(self, comment, **kwargs): 25 | super().__init__(**kwargs) 26 | self.comment = comment 27 | self.payload["comment"] = comment 28 | 29 | 30 | class ReviewApproved(BaseEvent): 31 | def __init__(self, changes_count, **kwargs): 32 | super().__init__(**kwargs) 33 | self.changes_count = changes_count 34 | self.payload["changes_count"] = changes_count 35 | 36 | 37 | class ReviewCanceled(BaseEvent): 38 | def __init__(self, changes_count, **kwargs): 39 | super().__init__(**kwargs) 40 | self.changes_count = changes_count 41 | self.payload["changes_count"] = changes_count 42 | -------------------------------------------------------------------------------- /kinto-remote-settings/src/kinto_remote_settings/signer/generate_keypair.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from .backends.local_ecdsa import ECDSASigner 4 | 5 | 6 | def generate_keypair(private_key_location, public_key_location): 7 | private_key, public_key = ECDSASigner.generate_keypair() 8 | 9 | with open(private_key_location, "wb+") as tmp_file: 10 | tmp_file.write(private_key) 11 | 12 | with open(public_key_location, "wb+") as tmp_file: 13 | tmp_file.write(public_key) 14 | 15 | 16 | if __name__ == "__main__": # pragma: no cover 17 | if len(sys.argv) != 3: 18 | print("Usage: python -m signer.generate_keypair " "{public_key} {private_key}") 19 | sys.exit(0) 20 | generate_keypair(sys.argv[1], sys.argv[2]) 21 | -------------------------------------------------------------------------------- /kinto-remote-settings/src/kinto_remote_settings/signer/serializer.py: -------------------------------------------------------------------------------- 1 | import operator 2 | 3 | import canonicaljson 4 | 5 | 6 | def canonical_json(records, last_modified): 7 | records = (r for r in records if not r.get("deleted", False)) 8 | records = sorted(records, key=operator.itemgetter("id")) 9 | 10 | payload = {"data": records, "last_modified": "%s" % last_modified} 11 | 12 | dump = canonicaljson.dumps(payload) 13 | 14 | return dump 15 | -------------------------------------------------------------------------------- /kinto-remote-settings/src/kinto_remote_settings/signer/utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from collections import OrderedDict 3 | from enum import Enum 4 | 5 | from kinto.core.events import ACTIONS 6 | from kinto.core.storage.exceptions import UnicityError 7 | from kinto.core.utils import build_request, instance_uri 8 | from kinto.views import NameGenerator 9 | from pyramid.exceptions import ConfigurationError 10 | 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | PLUGIN_USERID = "plugin:kinto-signer" 16 | FIELD_LAST_MODIFIED = "last_modified" 17 | 18 | 19 | class STATUS(Enum): 20 | WORK_IN_PROGRESS = "work-in-progress" 21 | TO_SIGN = "to-sign" 22 | TO_REFRESH = "to-resign" 23 | TO_REVIEW = "to-review" 24 | TO_ROLLBACK = "to-rollback" 25 | SIGNED = "signed" 26 | 27 | def __eq__(self, other): 28 | if not hasattr(other, "value"): 29 | return self.value == other 30 | return super(STATUS, self).__eq__(other) 31 | 32 | def __ne__(self, other): 33 | return not self.__eq__(other) 34 | 35 | 36 | def _get_resource(resource): 37 | # Use the default NameGenerator in Kinto resources to check if the resource 38 | # URIs seem valid. 39 | # XXX: if a custom ID generator is specified in settings, this verification would 40 | # not result as expected. 41 | name_generator = NameGenerator() 42 | 43 | parts = resource.split("/") 44 | if len(parts) == 2: 45 | bucket, collection = parts 46 | elif len(parts) == 3 and parts[1] == "buckets": 47 | # /buckets/bid 48 | _, _, bucket = parts 49 | collection = None 50 | elif len(parts) == 5 and parts[1] == "buckets" and parts[3] == "collections": 51 | # /buckets/bid/collections/cid 52 | _, _, bucket, _, collection = parts 53 | else: 54 | raise ValueError("should be a bucket or collection URI") 55 | valid_ids = name_generator.match(bucket) and ( 56 | collection is None or name_generator.match(collection) 57 | ) 58 | if not valid_ids: 59 | raise ValueError("bucket or collection id is invalid") 60 | return {"bucket": bucket, "collection": collection} 61 | 62 | 63 | def parse_resources(raw_resources): 64 | resources = OrderedDict() 65 | 66 | lines = [line.strip() for line in raw_resources.strip().splitlines()] 67 | for res in lines: 68 | error_msg = "Malformed resource: %%s (in %r). See kinto-signer README." % res 69 | if "->" not in res and ";" not in res: 70 | raise ConfigurationError(error_msg % "not separated with '->'") 71 | 72 | try: 73 | triplet = [ 74 | r.strip() for r in res.replace(";", " ").replace("->", " ").split() 75 | ] 76 | if len(triplet) == 2: 77 | source_uri, destination_uri = triplet 78 | preview_uri = None 79 | else: 80 | source_uri, preview_uri, destination_uri = triplet 81 | except ValueError: 82 | raise ConfigurationError(error_msg % "should be a pair or a triplet") 83 | 84 | try: 85 | source = _get_resource(source_uri) 86 | destination = _get_resource(destination_uri) 87 | preview = _get_resource(preview_uri) if preview_uri else None 88 | except ValueError as e: 89 | raise ConfigurationError(error_msg % e) 90 | 91 | # Raise if mix-up of per-bucket/specific collection. 92 | sections = (source, destination) + ((preview,) if preview else tuple()) 93 | all_per_bucket = all([x["collection"] is None for x in sections]) 94 | all_explicit = all([x["collection"] is not None for x in sections]) 95 | if not all_per_bucket and not all_explicit: 96 | raise ConfigurationError( 97 | error_msg % "cannot mix bucket and collection URIs" 98 | ) 99 | 100 | # Repeated source/preview/destination. 101 | if ( 102 | len(set([tuple(s.items()) for s in (source, preview or {}, destination)])) 103 | != 3 104 | ): 105 | raise ConfigurationError( 106 | error_msg % "cannot have same value for source, " 107 | " preview or destination" 108 | ) 109 | 110 | # Resources info is returned as a mapping by bucket/collection URI. 111 | bid = source["bucket"] 112 | if source["collection"] is None: 113 | # Per bucket. 114 | key = f"/buckets/{bid}" 115 | else: 116 | cid = source["collection"] 117 | # For a specific collection. 118 | key = f"/buckets/{bid}/collections/{cid}" 119 | 120 | # We can't have the same source twice. 121 | if key in resources: 122 | raise ConfigurationError(error_msg % "cannot repeat resource") 123 | 124 | resources[key] = {"source": source, "destination": destination} 125 | if preview is not None: 126 | resources[key]["preview"] = preview 127 | 128 | # Raise if same bid/cid twice/thrice. 129 | # Theoretically we could support it, but since we never said it was possible 130 | # and have no test at all for that, prefer safety. 131 | sources = [tuple(r["source"].items()) for r in resources.values()] 132 | destinations = [tuple(r["destination"].items()) for r in resources.values()] 133 | previews = [ 134 | tuple(r["preview"].items()) for r in resources.values() if "preview" in r 135 | ] 136 | 137 | if len(set(destinations)) != len(destinations): 138 | raise ConfigurationError("Resources setting has repeated destination URI") 139 | if len(set(previews)) != len(previews): 140 | raise ConfigurationError("Resources setting has repeated preview URI") 141 | 142 | intersects = ( 143 | set(sources).intersection(set(previews)) 144 | or set(sources).intersection(set(destinations)) 145 | or set(destinations).intersection(set(previews)) 146 | ) 147 | if intersects: 148 | raise ConfigurationError("cannot repeat URIs across resources") 149 | 150 | return resources 151 | 152 | 153 | def get_first_matching_setting(setting_name, settings, prefixes, default=None): 154 | for prefix in prefixes: 155 | prefixed_setting_name = prefix + setting_name 156 | if prefixed_setting_name in settings: 157 | return settings[prefixed_setting_name] 158 | return default 159 | 160 | 161 | def ensure_resource_exists( 162 | request, resource_name, parent_id, obj, permissions, matchdict 163 | ): 164 | storage = request.registry.storage 165 | permission = request.registry.permission 166 | try: 167 | created = storage.create( 168 | resource_name=resource_name, parent_id=parent_id, obj=obj 169 | ) 170 | object_uri = instance_uri(request, resource_name, **matchdict) 171 | permission.replace_object_permissions(object_uri, permissions) 172 | notify_resource_event( 173 | request, 174 | {"method": "PUT", "path": object_uri}, 175 | matchdict=matchdict, 176 | resource_name=resource_name, 177 | parent_id=parent_id, 178 | obj=created, 179 | action=ACTIONS.CREATE, 180 | ) 181 | except UnicityError: 182 | pass 183 | 184 | 185 | def storage_create_raw( 186 | storage_backend, 187 | permission_backend, 188 | resource_name, 189 | parent_id, 190 | object_uri, 191 | object_id, 192 | permissions, 193 | ): 194 | try: 195 | storage_backend.create( 196 | resource_name=resource_name, parent_id=parent_id, obj={"id": object_id} 197 | ) 198 | permission_backend.replace_object_permissions( 199 | object_id=object_uri, permissions=permissions 200 | ) 201 | logger.debug(f"Created {object_uri} with permissions {permissions}") 202 | except UnicityError: 203 | logger.warn(f"{object_uri} already exists.") 204 | 205 | 206 | def notify_resource_event( 207 | request, request_options, matchdict, resource_name, parent_id, obj, action, old=None 208 | ): 209 | """Helper that triggers resource events as real requests.""" 210 | fakerequest = build_request(request, request_options) 211 | fakerequest.matchdict = matchdict 212 | fakerequest.bound_data = request.bound_data 213 | fakerequest.authn_type, fakerequest.selected_userid = PLUGIN_USERID.split(":") 214 | fakerequest.current_resource_name = resource_name 215 | 216 | # When kinto-signer copies record from one place to another, 217 | # it simulates a resource event. Since kinto-attachment 218 | # prevents from updating attachment fields, it throws an error. 219 | # The following flag will disable the kinto-attachment check. 220 | # See https://github.com/Kinto/kinto-signer/issues/256 221 | # and https://bugzilla.mozilla.org/show_bug.cgi?id=1470812 222 | has_changed_attachment = ( 223 | resource_name == "record" 224 | and action == ACTIONS.UPDATE 225 | and "attachment" in old 226 | and old["attachment"] != obj.get("attachment") 227 | ) 228 | if has_changed_attachment: 229 | fakerequest._attachment_auto_save = True 230 | 231 | fakerequest.notify_resource_event( 232 | parent_id=parent_id, 233 | timestamp=obj[FIELD_LAST_MODIFIED], 234 | data=obj, 235 | action=action, 236 | old=old, 237 | ) 238 | 239 | 240 | def records_equal(a, b): 241 | ignore_fields = ("last_modified", "schema") 242 | ac = {k: v for k, v in a.items() if k not in ignore_fields} 243 | bc = {k: v for k, v in b.items() if k not in ignore_fields} 244 | return ac == bc 245 | 246 | 247 | def records_diff(left, right): 248 | left_by_id = {r["id"]: r for r in left} 249 | results = [] 250 | for r in right: 251 | rid = r["id"] 252 | left_record = left_by_id.pop(rid, None) 253 | if left_record is None: 254 | # In right, but not in left (deleted!) 255 | results.append({**r, "deleted": True}) 256 | elif not records_equal(left_record, r): 257 | # Differ between left and right 258 | results.append(left_record) 259 | # In left, but not in right. 260 | results.extend(left_by_id.values()) 261 | return results 262 | -------------------------------------------------------------------------------- /kinto-remote-settings/src/kinto_remote_settings/testing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla-services/kinto-dist/1511dbbb76ee69970bbd29eb56b7811974ce2ba1/kinto-remote-settings/src/kinto_remote_settings/testing/__init__.py -------------------------------------------------------------------------------- /kinto-remote-settings/src/kinto_remote_settings/testing/mock_listener.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides a mock for signer tests. Normally a file like this would 3 | be defined in a tests directory separate from this src directory, but to 4 | provide this module, Kinto expects to find it in a package found in 5 | `site-packages`. 6 | """ 7 | 8 | 9 | class Listener(object): 10 | def __init__(self): 11 | self.received = [] 12 | 13 | def __call__(self, event): 14 | self.received.append(event) 15 | 16 | 17 | listener = Listener() 18 | 19 | 20 | def load_from_config(config, prefix): 21 | return listener 22 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/changes/__init__.py: -------------------------------------------------------------------------------- 1 | import configparser 2 | import os 3 | 4 | from kinto import main as kinto_main 5 | from kinto.core.testing import BaseWebTest as CoreWebTest 6 | from kinto.core.testing import get_user_headers 7 | 8 | 9 | here = os.path.abspath(os.path.dirname(__file__)) 10 | 11 | 12 | class BaseWebTest(CoreWebTest): 13 | api_prefix = "v1" 14 | entry_point = kinto_main 15 | config = "config.ini" 16 | 17 | def __init__(self, *args, **kwargs): 18 | super(BaseWebTest, self).__init__(*args, **kwargs) 19 | self.headers.update(get_user_headers("mat")) 20 | self.headers.update({"Origin": "http://localhost:9999"}) 21 | 22 | @classmethod 23 | def get_app_settings(cls, extras=None): 24 | ini_path = os.path.join(here, cls.config) 25 | config = configparser.ConfigParser() 26 | config.read(ini_path) 27 | settings = dict(config.items("app:main")) 28 | settings.update(extras or {}) 29 | return settings 30 | 31 | def setUp(self): 32 | super(BaseWebTest, self).setUp() 33 | self.create_collection("blocklists", "certificates") 34 | 35 | def create_collection(self, bucket_id, collection_id): 36 | bucket_uri = "/buckets/%s" % bucket_id 37 | self.app.put_json(bucket_uri, {}, headers=self.headers) 38 | collection_uri = bucket_uri + "/collections/%s" % collection_id 39 | self.app.put_json(collection_uri, {}, headers=self.headers) 40 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/changes/config.ini: -------------------------------------------------------------------------------- 1 | [app:main] 2 | use = egg:kinto 3 | kinto.userid_hmac_secret = some-secret-string 4 | kinto.http_host = www.kinto-storage.org 5 | 6 | kinto.includes = kinto.plugins.default_bucket 7 | kinto_remote_settings.changes 8 | 9 | multiauth.policies = basicauth 10 | kinto.bucket_read_principals = 11 | basicauth:c6c27f0c7297ba7d4abd2a70c8a2cb88a06a3bb793817ef2c85fe8a709b08022 12 | kinto.bucket_write_principals = 13 | basicauth:c6c27f0c7297ba7d4abd2a70c8a2cb88a06a3bb793817ef2c85fe8a709b08022 14 | 15 | kinto.changes.resources = /buckets/blocklists 16 | # Since redirect disabled. 17 | kinto.changes.since_max_age_days = -1 18 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/changes/test_changes.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import unittest 3 | from unittest import mock 4 | 5 | from kinto_remote_settings import __version__ 6 | 7 | from . import BaseWebTest 8 | 9 | 10 | SAMPLE_RECORD = {"data": {"dev-edition": True}} 11 | HOUR_AGO = int(datetime.datetime.now().timestamp() * 1000) - 3600 12 | 13 | 14 | class UpdateChangesTest(BaseWebTest, unittest.TestCase): 15 | changes_uri = "/buckets/monitor/collections/changes/records" 16 | records_uri = "/buckets/blocklists/collections/certificates/records" 17 | 18 | def setUp(self): 19 | super(UpdateChangesTest, self).setUp() 20 | self.app.post_json(self.records_uri, SAMPLE_RECORD, headers=self.headers) 21 | 22 | def test_parent_bucket_and_collection_dont_have_to_exist(self): 23 | self.app.delete( 24 | "/buckets/monitor/collections/changes", 25 | headers=self.headers, 26 | status=(403, 404), 27 | ) 28 | self.app.get(self.changes_uri) # Not failing 29 | self.app.delete("/buckets/monitor", headers=self.headers, status=(403, 404)) 30 | self.app.get(self.changes_uri) # Not failing 31 | 32 | def test_parent_bucket_and_collection_can_exist(self): 33 | self.app.put("/buckets/monitor", headers=self.headers) 34 | resp = self.app.get(self.changes_uri) # Not failing 35 | self.assertEqual(len(resp.json["data"]), 1) 36 | 37 | self.app.put("/buckets/monitor/collections/changes", headers=self.headers) 38 | resp = self.app.get(self.changes_uri) # Not failing 39 | self.assertEqual(len(resp.json["data"]), 1) 40 | 41 | def test_a_change_record_is_updated_per_bucket_collection(self): 42 | resp = self.app.get(self.changes_uri) 43 | before_timestamp = resp.json["data"][0]["last_modified"] 44 | before_id = resp.json["data"][0]["id"] 45 | 46 | self.app.post_json(self.records_uri, SAMPLE_RECORD, headers=self.headers) 47 | 48 | resp = self.app.get(self.changes_uri) 49 | 50 | after_timestamp = resp.json["data"][0]["last_modified"] 51 | after_id = resp.json["data"][0]["id"] 52 | self.assertEqual(before_id, after_id) 53 | self.assertNotEqual(before_timestamp, after_timestamp) 54 | 55 | def test_only_collections_specified_in_settings_are_monitored(self): 56 | resp = self.app.get(self.changes_uri, headers=self.headers) 57 | change_record = resp.json["data"][0] 58 | records_uri = "/buckets/default/collections/certificates/records" 59 | 60 | self.app.post_json(records_uri, SAMPLE_RECORD, headers=self.headers) 61 | 62 | resp = self.app.get(self.changes_uri, headers=self.headers) 63 | after = resp.json["data"][0] 64 | self.assertEqual(change_record["id"], after["id"]) 65 | self.assertEqual(change_record["last_modified"], after["last_modified"]) 66 | 67 | def test_the_resource_configured_can_be_a_collection_uri(self): 68 | with mock.patch.dict( 69 | self.app.app.registry.settings, 70 | [("changes.resources", "/buckets/blocklists/collections/certificates")], 71 | ): 72 | resp = self.app.get(self.changes_uri) 73 | self.assertEqual(len(resp.json["data"]), 1) 74 | 75 | def test_returns_304_if_no_change_occured(self): 76 | resp = self.app.get(self.changes_uri) 77 | before_timestamp = resp.headers["ETag"] 78 | self.app.get( 79 | self.changes_uri, headers={"If-None-Match": before_timestamp}, status=304 80 | ) 81 | 82 | def test_returns_412_with_if_none_match_star(self): 83 | self.app.get(self.changes_uri, headers={"If-None-Match": "*"}, status=412) 84 | 85 | def test_no_cache_control_is_returned_if_not_configured(self): 86 | resp = self.app.get(self.changes_uri) 87 | assert "max-age" not in resp.headers["Cache-Control"] 88 | 89 | resp = self.app.get(self.changes_uri + '?_expected="42"') 90 | assert "max-age" not in resp.headers["Cache-Control"] 91 | 92 | def test_returns_empty_list_if_no_resource_configured(self): 93 | with mock.patch.dict( 94 | self.app.app.registry.settings, [("changes.resources", "")] 95 | ): 96 | resp = self.app.get(self.changes_uri) 97 | self.assertEqual(resp.json["data"], []) 98 | 99 | def test_change_record_has_greater_last_modified_of_collection_of_records(self): 100 | resp = self.app.post_json(self.records_uri, SAMPLE_RECORD, headers=self.headers) 101 | last_modified = resp.json["data"]["last_modified"] 102 | resp = self.app.get(self.changes_uri, headers=self.headers) 103 | change_last_modified = resp.json["data"][0]["last_modified"] 104 | self.assertGreaterEqual(change_last_modified, last_modified) 105 | 106 | def test_record_with_old_timestamp_does_update_changes(self): 107 | resp = self.app.post_json(self.records_uri, SAMPLE_RECORD, headers=self.headers) 108 | old_record = SAMPLE_RECORD.copy() 109 | old_record["data"]["last_modified"] = 42 110 | self.app.post_json(self.records_uri, old_record, headers=self.headers) 111 | 112 | resp = self.app.get(self.changes_uri, headers=self.headers) 113 | change_last_modified = resp.json["data"][0]["last_modified"] 114 | self.assertNotEqual(change_last_modified, 42) 115 | 116 | def test_change_record_has_server_host_attribute(self): 117 | self.app.post_json(self.records_uri, SAMPLE_RECORD, headers=self.headers) 118 | 119 | resp = self.app.get(self.changes_uri, headers=self.headers) 120 | change = resp.json["data"][0] 121 | self.assertEqual(change["host"], "www.kinto-storage.org") 122 | 123 | def test_change_record_has_bucket_and_collection_attributes(self): 124 | self.app.post_json(self.records_uri, SAMPLE_RECORD, headers=self.headers) 125 | 126 | resp = self.app.get(self.changes_uri, headers=self.headers) 127 | change = resp.json["data"][0] 128 | self.assertEqual(change["bucket"], "blocklists") 129 | self.assertEqual(change["collection"], "certificates") 130 | 131 | def test_changes_capability_exposed(self): 132 | resp = self.app.get("/") 133 | capabilities = resp.json["capabilities"] 134 | self.assertIn("changes", capabilities) 135 | expected = { 136 | "description": "Track modifications of records in Kinto and store " 137 | "the collection timestamps into a specific bucket " 138 | "and collection.", 139 | "collections": ["/buckets/blocklists"], 140 | "url": "http://kinto.readthedocs.io/en/latest/tutorials/" 141 | "synchronisation.html#polling-for-remote-changes", 142 | "version": __version__, 143 | } 144 | self.assertEqual(expected, capabilities["changes"]) 145 | 146 | 147 | class CacheExpiresTest(BaseWebTest, unittest.TestCase): 148 | changes_uri = "/buckets/monitor/collections/changes/records" 149 | 150 | @classmethod 151 | def get_app_settings(cls, extras=None): 152 | settings = super().get_app_settings(extras) 153 | settings["monitor.changes.record_cache_expires_seconds"] = "60" 154 | settings["monitor.changes.record_cache_maximum_expires_seconds"] = "3600" 155 | return settings 156 | 157 | def test_cache_expires_headers_are_supported(self): 158 | resp = self.app.get(self.changes_uri) 159 | assert "max-age=60" in resp.headers["Cache-Control"] 160 | 161 | def test_cache_expires_header_is_maximum_with_cache_busting(self): 162 | resp = self.app.get(self.changes_uri + f"?_since={HOUR_AGO}&_expected=42") 163 | assert "max-age=3600" in resp.headers["Cache-Control"] 164 | 165 | def test_cache_expires_header_is_default_with_filter(self): 166 | # The _since just filters on lower bound of timestamps, if data changes 167 | # we don't want to cache for too long. 168 | resp = self.app.get(self.changes_uri + f"?_since={HOUR_AGO}") 169 | assert "max-age=60" in resp.headers["Cache-Control"] 170 | 171 | def test_cache_expires_header_is_default_with_concurrency_control(self): 172 | # The `If-None-Match` header is just a way to obtain a 304 instead of a 200 173 | # with an empty list. In the client code [0] it is always used in conjonction 174 | # with _since={last-etag} 175 | # [0] https://searchfox.org/mozilla-central/rev/93905b66/services/settings/Utils.jsm#70-73 # noqa: 501 176 | headers = {"If-None-Match": f'"{HOUR_AGO}"'} 177 | resp = self.app.get(self.changes_uri + f'?_since="{HOUR_AGO}"', headers=headers) 178 | assert "max-age=60" in resp.headers["Cache-Control"] 179 | 180 | 181 | class OldSinceRedirectTest(BaseWebTest, unittest.TestCase): 182 | changes_uri = "/buckets/monitor/collections/changes/records" 183 | 184 | @classmethod 185 | def get_app_settings(cls, extras=None): 186 | settings = super().get_app_settings(extras) 187 | settings["kinto.changes.since_max_age_days"] = "2" 188 | settings["kinto.changes.http_host"] = "cdn-host" 189 | return settings 190 | 191 | def test_redirects_and_drops_since_if_too_old(self): 192 | resp = self.app.get(self.changes_uri + "?_since=42") 193 | self.assertEqual(resp.status_code, 307) 194 | self.assertEqual( 195 | resp.headers["Location"], 196 | "https://cdn-host/v1/buckets/monitor/collections/changes/records", 197 | ) 198 | 199 | # Try again with a real timestamp older than allowed in settings. 200 | timestamp = int( 201 | (datetime.datetime.now() - datetime.timedelta(days=3)).timestamp() * 1000 202 | ) 203 | resp = self.app.get(self.changes_uri + f"?_since={timestamp}") 204 | self.assertEqual(resp.status_code, 307) 205 | 206 | def test_redirects_keep_other_querystring_params(self): 207 | resp = self.app.get(self.changes_uri + "?_since=42&_expected=%22123456%22") 208 | self.assertEqual(resp.status_code, 307) 209 | self.assertIn("/records?_expected=%22123456%22", resp.headers["Location"]) 210 | 211 | def test_does_not_redirect_if_not_old_enough(self): 212 | timestamp = int( 213 | (datetime.datetime.now() - datetime.timedelta(days=1)).timestamp() * 1000 214 | ) 215 | resp = self.app.get(self.changes_uri + f"?_since={timestamp}") 216 | self.assertEqual(resp.status_code, 200) 217 | 218 | def test_redirects_sends_cache_control(self): 219 | response = self.app.get(self.changes_uri + "?_since=42") 220 | self.assertEqual(response.status_code, 307) 221 | self.assertIn("Expires", response.headers) 222 | self.assertIn("Cache-Control", response.headers) 223 | self.assertEqual(response.headers["Cache-Control"], "max-age=86400") 224 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/changes/test_changeset.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from email.utils import parsedate_to_datetime 3 | from unittest import mock 4 | 5 | from kinto.core.storage import exceptions as storage_exceptions 6 | from kinto.core.testing import get_user_headers 7 | 8 | from . import BaseWebTest 9 | 10 | 11 | SAMPLE_RECORD = {"data": {"dev-edition": True}} 12 | 13 | 14 | class ChangesetViewTest(BaseWebTest, unittest.TestCase): 15 | records_uri = "/buckets/blocklists/collections/certificates/records" 16 | changeset_uri = ( 17 | "/buckets/blocklists/collections/certificates/changeset?_expected=42" 18 | ) 19 | 20 | def setUp(self): 21 | super(ChangesetViewTest, self).setUp() 22 | self.app.post_json(self.records_uri, SAMPLE_RECORD, headers=self.headers) 23 | 24 | @classmethod 25 | def get_app_settings(cls, extras=None): 26 | settings = super().get_app_settings(extras) 27 | settings["blocklists.certificates.record_cache_expires_seconds"] = 1234 28 | return settings 29 | 30 | def test_changeset_is_accessible(self): 31 | resp = self.app.head(self.records_uri, headers=self.headers) 32 | records_timestamp = int(resp.headers["ETag"][1:-1]) 33 | 34 | resp = self.app.get(self.changeset_uri, headers=self.headers) 35 | data = resp.json 36 | 37 | assert "metadata" in data 38 | assert "timestamp" in data 39 | assert "changes" in data 40 | assert data["metadata"]["id"] == "certificates" 41 | assert len(data["changes"]) == 1 42 | assert data["changes"][0]["dev-edition"] is True 43 | assert data["timestamp"] == records_timestamp 44 | 45 | def test_last_modified_header_is_set(self): 46 | resp = self.app.get(self.changeset_uri, headers=self.headers) 47 | timestamp = resp.json["timestamp"] 48 | 49 | dt = parsedate_to_datetime(resp.headers["Last-Modified"]) 50 | 51 | assert dt.timestamp() == int(timestamp / 1000) 52 | 53 | def test_changeset_can_be_filtered(self): 54 | resp = self.app.post_json(self.records_uri, {}, headers=self.headers) 55 | before = resp.json["data"]["last_modified"] 56 | self.app.post_json(self.records_uri, {}, headers=self.headers) 57 | 58 | resp = self.app.get(self.changeset_uri, headers=self.headers) 59 | assert len(resp.json["changes"]) == 3 60 | 61 | resp = self.app.get( 62 | self.changeset_uri + f'&_since="{before}"', headers=self.headers 63 | ) 64 | assert len(resp.json["changes"]) == 1 65 | 66 | def test_tombstones_are_returned(self): 67 | resp = self.app.get(self.records_uri, headers=self.headers) 68 | before = resp.headers["ETag"] 69 | # Delete one record. 70 | self.app.delete(self.records_uri + "?_limit=1", headers=self.headers) 71 | 72 | resp = self.app.get( 73 | self.changeset_uri + f"&_since={before}", headers=self.headers 74 | ) 75 | 76 | assert len(resp.json["changes"]) == 1 77 | assert "deleted" in resp.json["changes"][0] 78 | 79 | def test_changeset_is_not_publicly_accessible(self): 80 | # By default other users cannot read. 81 | user_headers = { 82 | **self.headers, 83 | **get_user_headers("some-user"), 84 | } 85 | self.app.get(self.changeset_uri, status=401) 86 | self.app.get(self.changeset_uri, headers=user_headers, status=403) 87 | 88 | # Add read permissions to everyone. 89 | self.app.patch_json( 90 | "/buckets/blocklists", 91 | {"permissions": {"read": ["system.Everyone"]}}, 92 | headers=self.headers, 93 | ) 94 | 95 | self.app.get(self.changeset_uri, headers=user_headers, status=200) 96 | self.app.get(self.changeset_uri, status=200) 97 | 98 | def test_changeset_returns_404_if_collection_is_unknown(self): 99 | changeset_uri = "/buckets/blocklists/collections/fjuvrb/changeset?_expected=42" 100 | self.app.get(changeset_uri, headers=self.headers, status=404) 101 | 102 | def test_timestamp_is_validated(self): 103 | self.app.get( 104 | self.changeset_uri + "&_since=abc", headers=self.headers, status=400 105 | ) 106 | self.app.get( 107 | self.changeset_uri + "&_since=42", headers=self.headers, status=400 108 | ) 109 | self.app.get( 110 | self.changeset_uri + "&_since=*)(!(objectClass=*)", 111 | headers=self.headers, 112 | status=400, 113 | ) 114 | self.app.get(self.changeset_uri + '&_since="42"', headers=self.headers) 115 | 116 | def test_expected_param_is_mandatory(self): 117 | self.app.get(self.changeset_uri.split("?")[0], headers=self.headers, status=400) 118 | 119 | def test_limit_is_supported(self): 120 | self.app.post_json(self.records_uri, {}, headers=self.headers) 121 | 122 | resp = self.app.get(self.changeset_uri + "&_limit=1", headers=self.headers) 123 | assert len(resp.json["changes"]) == 1 124 | 125 | def test_extra_param_is_allowed(self): 126 | self.app.get(self.changeset_uri + "&_extra=abc", headers=self.headers) 127 | 128 | def test_cache_control_headers_are_set(self): 129 | resp = self.app.get(self.changeset_uri, headers=self.headers) 130 | assert resp.headers["Cache-Control"] == "max-age=1234" 131 | 132 | def test_raises_original_backend_errors(self): 133 | backend = self.app.app.registry.storage 134 | with mock.patch.object(backend, "resource_timestamp") as mocked: 135 | mocked.side_effect = storage_exceptions.BackendError 136 | changeset_uri = ( 137 | "/buckets/blocklists/collections/certificates/changeset?_expected=42" 138 | ) 139 | self.app.get(changeset_uri, headers=self.headers, status=503) 140 | 141 | 142 | class ReadonlyTest(BaseWebTest, unittest.TestCase): 143 | changeset_uri = "/buckets/monitor/collections/changes/changeset?_expected=42" 144 | 145 | def setUp(self): 146 | super().setUp() 147 | # Mark storage as readonly. 148 | # We can't do it from test app settings because we need 149 | # the initial bucket and collection). 150 | self.app.app.registry.storage.readonly = True 151 | 152 | def test_changeset_returns_404_if_collection_is_unknown(self): 153 | changeset_uri = "/buckets/blocklists/collections/fjuvrb/changeset?_expected=42" 154 | self.app.get(changeset_uri, headers=self.headers, status=404) 155 | 156 | 157 | class MonitorChangesetViewTest(BaseWebTest, unittest.TestCase): 158 | records_uri = "/buckets/blocklists/collections/{cid}/records" 159 | changeset_uri = "/buckets/monitor/collections/changes/changeset?_expected=42" 160 | 161 | def setUp(self): 162 | super().setUp() 163 | self.create_collection("blocklists", "cfr") 164 | self.app.post_json( 165 | self.records_uri.format(cid="cfr"), SAMPLE_RECORD, headers=self.headers 166 | ) 167 | self.app.post_json( 168 | self.records_uri.format(cid="certificates"), 169 | SAMPLE_RECORD, 170 | headers=self.headers, 171 | ) 172 | 173 | @classmethod 174 | def get_app_settings(cls, extras=None): 175 | settings = super().get_app_settings(extras) 176 | settings["kinto.changes.since_max_age_days"] = 1 177 | return settings 178 | 179 | def test_changeset_exists_for_monitor_changes(self): 180 | resp = self.app.head( 181 | self.records_uri.format(cid="certificates"), headers=self.headers 182 | ) 183 | records_timestamp = int(resp.headers["ETag"].strip('"')) 184 | 185 | resp = self.app.get(self.changeset_uri) 186 | data = resp.json 187 | 188 | assert data["timestamp"] == records_timestamp 189 | assert len(data["changes"]) == 2 190 | assert data["changes"][0]["collection"] == "certificates" 191 | 192 | def test_changeset_redirects_if_since_is_too_old(self): 193 | resp = self.app.get(self.changeset_uri + '&_since="42"') 194 | 195 | assert resp.status_code == 307 196 | assert resp.headers["Location"] == ( 197 | "https://www.kinto-storage.org/v1" 198 | "/buckets/monitor/collections/changes/changeset?_expected=42" 199 | ) 200 | 201 | def test_limit_is_supported(self): 202 | resp = self.app.get(self.changeset_uri + "&_limit=1", headers=self.headers) 203 | assert len(resp.json["changes"]) == 1 204 | 205 | def test_filter_by_collection(self): 206 | resp = self.app.get( 207 | self.changeset_uri + "&bucket=blocklists&collection=cfr", 208 | headers=self.headers, 209 | ) 210 | assert len(resp.json["changes"]) == 1 211 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/changes/test_utils.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from unittest import mock 3 | 4 | from kinto_remote_settings.changes.utils import changes_object 5 | from pyramid.request import Request 6 | 7 | 8 | class ChangesRecordTest(unittest.TestCase): 9 | def test_single_hardcoded(self): 10 | request = Request.blank(path="/") 11 | request.route_path = mock.Mock() 12 | request.route_path.return_value = "/buckets/a/collections/b" 13 | request.registry = mock.Mock() 14 | request.registry.settings = {} 15 | timestamp = 1525457597166 16 | entry = changes_object(request, "a", "b", timestamp) 17 | 18 | self.assertEqual( 19 | entry, 20 | { 21 | "bucket": "a", 22 | "collection": "b", 23 | "host": "", 24 | "id": "9527d115-6191-fa49-a530-8fbfc4997755", 25 | "last_modified": timestamp, 26 | }, 27 | ) 28 | 29 | def test_another_hardcoded(self): 30 | request = Request.blank(path="/") 31 | request.route_path = mock.Mock() 32 | request.route_path.return_value = "/buckets/a/collections/b" 33 | request.registry = mock.Mock() 34 | request.registry.settings = {"http_host": "https://localhost:443"} 35 | timestamp = 1525457597166 36 | entry = changes_object(request, "a", "b", timestamp) 37 | 38 | self.assertEqual( 39 | entry, 40 | { 41 | "bucket": "a", 42 | "collection": "b", 43 | "host": "https://localhost:443", 44 | "id": "fa48a96d-1600-f561-8645-3395acb08a5a", 45 | "last_modified": timestamp, 46 | }, 47 | ) 48 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/signer/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla-services/kinto-dist/1511dbbb76ee69970bbd29eb56b7811974ce2ba1/kinto-remote-settings/tests/signer/__init__.py -------------------------------------------------------------------------------- /kinto-remote-settings/tests/signer/config/autocreate.ini: -------------------------------------------------------------------------------- 1 | [server:main] 2 | use = egg:waitress#main 3 | host = 0.0.0.0 4 | port = 8888 5 | 6 | [app:main] 7 | use = egg:kinto 8 | multiauth.policies = basicauth 9 | 10 | kinto.includes = kinto_remote_settings.signer 11 | kinto.storage_backend = kinto.core.storage.memory 12 | kinto.storage_url = 13 | kinto.cache_backend = kinto.core.cache.memory 14 | kinto.cache_url = 15 | kinto.permission_backend = kinto.core.permission.memory 16 | kinto.permission_url = 17 | 18 | kinto.bucket_create_principals = system.Everyone 19 | kinto.bucket_read_principals = system.Everyone 20 | kinto.bucket_write_principals = system.Everyone 21 | 22 | kinto.signer.auto_create_resources = true 23 | kinto.signer.auto_create_resources_principals = account:admin system.Authenticated 24 | kinto.signer.resources = 25 | /buckets/main-workspace -> /buckets/main-preview -> /buckets/main 26 | /buckets/security-state-workspace/collections/onecrl -> /buckets/security-state/collections/onecrl 27 | kinto.signer.ecdsa.private_key = /path/to/private.pem 28 | kinto.signer.ecdsa.public_key = /path/to/public.pem 29 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/signer/config/autograph.yaml: -------------------------------------------------------------------------------- 1 | server: 2 | listen: "0.0.0.0:8000" 3 | # cache 500k nonces to protect from authorization replay attacks 4 | noncecachesize: 524288 5 | idletimeout: 60s 6 | readtimeout: 60s 7 | writetimeout: 60s 8 | 9 | statsd: 10 | addr: "127.0.0.1:8125" 11 | namespace: "autograph." 12 | buflen: 1 13 | 14 | heartbeat: 15 | hsmchecktimeout: 100ms 16 | dbchecktimeout: 150ms 17 | 18 | # The keys below are testing keys that do not grant any power 19 | signers: 20 | # a p384 key, the standard 21 | - id: appkey1 22 | type: contentsignature 23 | x5u: https://bucket.example.net/appkey1.pem 24 | privatekey: | 25 | -----BEGIN EC PARAMETERS----- 26 | BgUrgQQAIg== 27 | -----END EC PARAMETERS----- 28 | -----BEGIN EC PRIVATE KEY----- 29 | MIGkAgEBBDAzX2TrGOr0WE92AbAl+nqnpqh25pKCLYNMTV2hJHztrkVPWOp8w0mh 30 | scIodK8RMpagBwYFK4EEACKhZANiAATiTcWYbt0Wg63dO7OXvpptNG0ryxv+v+Js 31 | JJ5Upr3pFus5fZyKxzP9NPzB+oFhL/xw3jMx7X5/vBGaQ2sJSiNlHVkqZgzYF6JQ 32 | 4yUyiqTY7v67CyfUPA1BJg/nxOS9m3o= 33 | -----END EC PRIVATE KEY----- 34 | 35 | - id: appkey2 36 | type: contentsignature 37 | x5u: https://bucket.example.net/appkey2.pem 38 | privatekey: | 39 | -----BEGIN EC PRIVATE KEY----- 40 | MIGkAgEBBDDzB8n4AOghssIP8Y1/qBLAh3uW8w5i75fZG6qQDTGbOGZbpooeQvdk 41 | agQT/dt8/KqgBwYFK4EEACKhZANiAARBmh+6Wc7CvAWylhyEsw5CMy7eSC5nfOo9 42 | rszb+aoRxxe/PFrebfgqIBGx8EpXN+DT6QX5dZTLqcjj7GMWx50UvJ1+kIKTLbUx 43 | +8Q7KIqH8pQ40GJbFySJS01LyNkqgqc= 44 | -----END EC PRIVATE KEY----- 45 | 46 | 47 | authorizations: 48 | - id: alice 49 | key: fs5wgcer9qj819kfptdlp8gm227ewxnzvsuj9ztycsx08hfhzu 50 | signers: 51 | - appkey1 52 | - id: bob 53 | key: 9vh6bhlc10y63ow2k4zke7k0c3l9hpr8mo96p92jmbfqngs9e7d 54 | signers: 55 | - appkey2 56 | 57 | monitoring: 58 | key: 19zd4w3xirb5syjgdx8atq6g91m03bdsmzjifs2oddivswlu9qs 59 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/signer/config/bob.ecdsa.private.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN EC PRIVATE KEY----- 2 | MIGkAgEBBDDzB8n4AOghssIP8Y1/qBLAh3uW8w5i75fZG6qQDTGbOGZbpooeQvdk 3 | agQT/dt8/KqgBwYFK4EEACKhZANiAARBmh+6Wc7CvAWylhyEsw5CMy7eSC5nfOo9 4 | rszb+aoRxxe/PFrebfgqIBGx8EpXN+DT6QX5dZTLqcjj7GMWx50UvJ1+kIKTLbUx 5 | +8Q7KIqH8pQ40GJbFySJS01LyNkqgqc= 6 | -----END EC PRIVATE KEY----- 7 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/signer/config/ecdsa.private.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN EC PARAMETERS----- 2 | BgUrgQQAIg== 3 | -----END EC PARAMETERS----- 4 | -----BEGIN EC PRIVATE KEY----- 5 | MIGkAgEBBDAzX2TrGOr0WE92AbAl+nqnpqh25pKCLYNMTV2hJHztrkVPWOp8w0mh 6 | scIodK8RMpagBwYFK4EEACKhZANiAATiTcWYbt0Wg63dO7OXvpptNG0ryxv+v+Js 7 | JJ5Upr3pFus5fZyKxzP9NPzB+oFhL/xw3jMx7X5/vBGaQ2sJSiNlHVkqZgzYF6JQ 8 | 4yUyiqTY7v67CyfUPA1BJg/nxOS9m3o= 9 | -----END EC PRIVATE KEY----- 10 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/signer/config/ecdsa.public.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN PUBLIC KEY----- 2 | MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE4k3FmG7dFoOt3Tuzl76abTRtK8sb/r/i 3 | bCSeVKa96RbrOX2ciscz/TT8wfqBYS/8cN4zMe1+f7wRmkNrCUojZR1ZKmYM2Bei 4 | UOMlMoqk2O7+uwsn1DwNQSYP58TkvZt6 5 | -----END PUBLIC KEY----- 6 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/signer/config/signer.ini: -------------------------------------------------------------------------------- 1 | [server:main] 2 | use = egg:waitress#main 3 | host = 0.0.0.0 4 | port = 8888 5 | 6 | [app:main] 7 | use = egg:kinto 8 | kinto.userid_hmac_secret = aujourd'hui encore, il fait beau en bretagne. 9 | multiauth.policies = basicauth 10 | 11 | kinto.includes = kinto_remote_settings.signer 12 | kinto.plugins.history 13 | kinto.plugins.flush 14 | kinto_emailer 15 | 16 | signer.to_review_enabled = true 17 | 18 | kinto.signer.resources = 19 | /buckets/alice/collections/source -> /buckets/alice/collections/destination 20 | /buckets/alice/collections/from -> /buckets/alice/collections/preview -> /buckets/alice/collections/to 21 | /buckets/bob/collections/source-> /buckets/bob/collections/destination 22 | /buckets/stage -> /buckets/preview -> /buckets/prod 23 | 24 | kinto.signer.signer_backend = kinto_remote_settings.signer.backends.autograph 25 | kinto.signer.autograph.server_url = http://localhost:8000 26 | kinto.signer.autograph.hawk_id = alice 27 | kinto.signer.autograph.hawk_secret = fs5wgcer9qj819kfptdlp8gm227ewxnzvsuj9ztycsx08hfhzu 28 | 29 | kinto.signer.bob.autograph.hawk_id = bob 30 | kinto.signer.bob.autograph.hawk_secret = 9vh6bhlc10y63ow2k4zke7k0c3l9hpr8mo96p92jmbfqngs9e7d 31 | 32 | [loggers] 33 | keys = root, kinto 34 | 35 | [handlers] 36 | keys = console 37 | 38 | [formatters] 39 | keys = color 40 | 41 | [logger_root] 42 | level = INFO 43 | handlers = console 44 | 45 | [logger_kinto] 46 | level = DEBUG 47 | handlers = console 48 | qualname = kinto 49 | 50 | [handler_console] 51 | class = StreamHandler 52 | args = (sys.stderr,) 53 | level = NOTSET 54 | formatter = color 55 | 56 | [formatter_color] 57 | class = logging_color_formatter.ColorFormatter 58 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/signer/support.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | try: 5 | import ConfigParser as configparser 6 | except ImportError: 7 | import configparser 8 | 9 | from kinto import main as kinto_main 10 | from kinto.core.testing import BaseWebTest as CoreWebTest 11 | from kinto.core.testing import DummyRequest, get_user_headers 12 | 13 | 14 | __all__ = ["BaseWebTest", "DummyRequest", "get_user_headers"] 15 | 16 | 17 | here = os.path.abspath(os.path.dirname(__file__)) 18 | 19 | 20 | class BaseWebTest(CoreWebTest): 21 | api_prefix = "v1" 22 | entry_point = kinto_main 23 | config = "config/signer.ini" 24 | 25 | def __init__(self, *args, **kwargs): 26 | super(BaseWebTest, self).__init__(*args, **kwargs) 27 | self.headers.update(get_user_headers("mat")) 28 | 29 | @classmethod 30 | def get_app_settings(cls, extras=None): 31 | ini_path = os.path.join(here, cls.config) 32 | config = configparser.ConfigParser() 33 | config.read(ini_path) 34 | settings = dict(config.items("app:main")) 35 | settings["signer.to_review_enabled"] = False 36 | return settings 37 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/signer/test_autocreate_resources.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from .support import BaseWebTest 4 | 5 | 6 | class AutocreateTest(BaseWebTest, unittest.TestCase): 7 | config = "config/autocreate.ini" 8 | 9 | def test_resources_were_created(self): 10 | write_perms = sorted(["system.Authenticated", "account:admin"]) 11 | 12 | r = self.app.get("/buckets/main-workspace", headers=self.headers) 13 | assert sorted(r.json["permissions"]["write"]) == write_perms 14 | 15 | r = self.app.get("/buckets/security-state-workspace", headers=self.headers) 16 | assert sorted(r.json["permissions"]["write"]) == write_perms 17 | 18 | r = self.app.get( 19 | "/buckets/security-state-workspace/collections/onecrl", headers=self.headers 20 | ) 21 | assert sorted(r.json["permissions"]["write"]) == write_perms 22 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/signer/test_generate_keypair.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | import unittest 3 | 4 | from kinto_remote_settings.signer.backends.local_ecdsa import ECDSASigner 5 | from kinto_remote_settings.signer.generate_keypair import generate_keypair 6 | 7 | 8 | class KeyPairGeneratorTest(unittest.TestCase): 9 | def test_generated_keypairs_can_be_loaded(self): 10 | private_key_location = tempfile.mktemp("private_key") 11 | public_key_location = tempfile.mktemp("public_key") 12 | 13 | generate_keypair(private_key_location, public_key_location) 14 | backend = ECDSASigner(private_key=private_key_location) 15 | backend.sign("test") 16 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/signer/test_serializer.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from kinto_remote_settings.signer.serializer import canonical_json 4 | 5 | 6 | # 7 | # Kinto specific 8 | # 9 | 10 | 11 | def test_supports_records_as_iterators(): 12 | records = iter([{"bar": "baz", "last_modified": "45678", "id": "1"}]) 13 | canonical_json(records, "45678") 14 | 15 | 16 | def test_provides_records_in_data_along_last_modified(): 17 | records = [{"bar": "baz", "last_modified": "45678", "id": "1"}] 18 | serialized = json.loads(canonical_json(records, "45678")) 19 | assert "data" in serialized 20 | assert "last_modified" in serialized 21 | 22 | 23 | def test_orders_records_by_id(): 24 | records = [ 25 | {"bar": "baz", "last_modified": "45678", "id": "2"}, 26 | {"foo": "bar", "last_modified": "12345", "id": "1"}, 27 | ] 28 | serialized = json.loads(canonical_json(records, "45678")) 29 | assert serialized["last_modified"] == "45678" 30 | assert serialized["data"][0]["id"] == "1" 31 | assert serialized["data"][1]["id"] == "2" 32 | 33 | 34 | def test_removes_deleted_items(): 35 | record = {"bar": "baz", "last_modified": "45678", "id": "2"} 36 | deleted_record = {"deleted": True, "last_modified": "12345", "id": "1"} 37 | records = [deleted_record, record] 38 | serialized = canonical_json(records, "42") 39 | assert [record] == json.loads(serialized)["data"] 40 | 41 | 42 | # 43 | # Standard 44 | # 45 | 46 | 47 | def test_does_not_alter_records(): 48 | records = [ 49 | {"foo": "bar", "last_modified": "12345", "id": "1"}, 50 | {"bar": "baz", "last_modified": "45678", "id": "2"}, 51 | ] 52 | canonical_json(records, "45678") 53 | 54 | assert records == [ 55 | {"foo": "bar", "last_modified": "12345", "id": "1"}, 56 | {"bar": "baz", "last_modified": "45678", "id": "2"}, 57 | ] 58 | 59 | 60 | def test_preserves_data(): 61 | records = [ 62 | {"foo": "bar", "last_modified": "12345", "id": "1"}, 63 | {"bar": "baz", "last_modified": "45678", "id": "2"}, 64 | ] 65 | serialized = canonical_json(records, "45678") 66 | assert records == json.loads(serialized)["data"] 67 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/signer/test_signer.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | import unittest 4 | from base64 import urlsafe_b64decode, urlsafe_b64encode 5 | from unittest import mock 6 | 7 | import pytest 8 | from kinto_remote_settings.signer.backends import ( 9 | autograph, 10 | base, 11 | exceptions, 12 | local_ecdsa, 13 | ) 14 | 15 | 16 | SIGNATURE = ( 17 | "ikfq6qOV85vR7QaNCTldVvvtcNpPIICqqMp3tfyiT7fHCgFNq410SFnIfjAPgSa" 18 | "jEtxxyGtZFMoI_BzO_1y5oShLtX0LH4wx_Wft7wz17T7fFqpDQ9hFZzTOPBwZUIbx" 19 | ) 20 | 21 | 22 | def save_key(key, key_name): 23 | tmp = tempfile.mktemp(key_name) 24 | with open(tmp, "wb+") as tmp_file: 25 | tmp_file.write(key) 26 | return tmp 27 | 28 | 29 | class BaseSignerTest(unittest.TestCase): 30 | def test_base_method_raises_unimplemented(self): 31 | signer = base.SignerBase() 32 | with pytest.raises(NotImplementedError): 33 | signer.sign("TEST") 34 | 35 | 36 | class ECDSASignerTest(unittest.TestCase): 37 | @classmethod 38 | def get_backend(cls, **options): 39 | return local_ecdsa.ECDSASigner(**options) 40 | 41 | @classmethod 42 | def setUpClass(cls): 43 | sk, vk = local_ecdsa.ECDSASigner.generate_keypair() 44 | cls.sk_location = save_key(sk, "signing-key") 45 | cls.vk_location = save_key(vk, "verifying-key") 46 | cls.signer = cls.get_backend(private_key=cls.sk_location) 47 | 48 | @classmethod 49 | def tearDownClass(cls): 50 | os.remove(cls.sk_location) 51 | os.remove(cls.vk_location) 52 | 53 | def test_keyloading_fails_if_no_settings(self): 54 | backend = self.get_backend(public_key=self.vk_location) 55 | with pytest.raises(ValueError): 56 | backend.load_private_key() 57 | 58 | def test_key_loading_works(self): 59 | key = self.signer.load_private_key() 60 | assert key is not None 61 | 62 | def test_signer_roundtrip(self): 63 | signature = self.signer.sign("this is some text") 64 | self.signer.verify("this is some text", signature) 65 | 66 | def test_base64url_encoding(self): 67 | signature_bundle = self.signer.sign("this is some text") 68 | b64signature = signature_bundle["signature"] 69 | 70 | decoded_signature = urlsafe_b64decode(b64signature.encode("utf-8")) 71 | b64urlsignature = urlsafe_b64encode(decoded_signature).decode("utf-8") 72 | signature_bundle["signature"] = b64urlsignature 73 | signature_bundle["signature_encoding"] = "rs_base64url" 74 | 75 | self.signer.verify("this is some text", signature_bundle) 76 | 77 | def test_wrong_signature_raises_an_error(self): 78 | signature_bundle = {"signature": SIGNATURE, "mode": "p384ecdsa", "ref": ""} 79 | 80 | with pytest.raises(exceptions.BadSignatureError): 81 | self.signer.verify("Text not matching with the sig.", signature_bundle) 82 | 83 | def test_signer_returns_a_base64_string(self): 84 | signature = self.signer.sign("this is some text")["signature"] 85 | urlsafe_b64decode(signature.encode("utf-8")) # Raise if wrong. 86 | 87 | def test_load_private_key_raises_if_no_key_specified(self): 88 | with pytest.raises(ValueError): 89 | self.get_backend().load_private_key() 90 | 91 | def test_public_key_can_be_loaded_from_public_key_pem(self): 92 | signer = self.get_backend(public_key=self.vk_location) 93 | signer.load_public_key() 94 | 95 | def test_public_key_can_be_loaded_from_private_key_pem(self): 96 | signer = self.get_backend(private_key=self.sk_location) 97 | signer.load_public_key() 98 | 99 | def test_load_public_key_raises_an_error_if_missing_settings(self): 100 | with pytest.raises(ValueError) as excinfo: 101 | self.get_backend() 102 | msg = "Please, specify either a private_key or public_key location." 103 | assert str(excinfo.value) == msg 104 | 105 | @mock.patch("kinto_remote_settings.signer.backends.local_ecdsa.ECDSASigner") 106 | def test_load_from_settings(self, mocked_signer): 107 | local_ecdsa.load_from_settings( 108 | { 109 | "signer.ecdsa.private_key": mock.sentinel.private_key, 110 | "signer.ecdsa.public_key": mock.sentinel.public_key, 111 | }, 112 | prefix="signer.", 113 | ) 114 | 115 | mocked_signer.assert_called_with( 116 | private_key=mock.sentinel.private_key, public_key=mock.sentinel.public_key 117 | ) 118 | 119 | def test_load_from_settings_fails_if_no_public_or_private_key(self): 120 | with pytest.raises(ValueError) as excinfo: 121 | local_ecdsa.load_from_settings({}, "") 122 | msg = ( 123 | "Please specify either kinto.signer.ecdsa.private_key or " 124 | "kinto.signer.ecdsa.public_key in the settings." 125 | ) 126 | assert str(excinfo.value) == msg 127 | 128 | 129 | class AutographSignerTest(unittest.TestCase): 130 | def setUp(self): 131 | self.signer = autograph.AutographSigner( 132 | hawk_id="alice", 133 | hawk_secret="fs5wgcer9qj819kfptdlp8gm227ewxnzvsuj9ztycsx08hfhzu", 134 | server_url="http://localhost:8000", 135 | ) 136 | 137 | @mock.patch("kinto_remote_settings.signer.backends.autograph.requests") 138 | def test_request_is_being_crafted_with_payload_as_input(self, requests): 139 | response = mock.MagicMock() 140 | response.json.return_value = [{"signature": SIGNATURE, "x5u": "", "ref": ""}] 141 | requests.post.return_value = response 142 | signature_bundle = self.signer.sign("test data") 143 | requests.post.assert_called_with( 144 | "http://localhost:8000/sign/data", 145 | auth=self.signer.auth, 146 | json=[{"input": "dGVzdCBkYXRh"}], 147 | ) 148 | assert signature_bundle["signature"] == SIGNATURE 149 | 150 | @mock.patch("kinto_remote_settings.signer.backends.autograph.AutographSigner") 151 | def test_load_from_settings(self, mocked_signer): 152 | autograph.load_from_settings( 153 | { 154 | "signer.autograph.server_url": mock.sentinel.server_url, 155 | "signer.autograph.hawk_id": mock.sentinel.hawk_id, 156 | "signer.autograph.hawk_secret": mock.sentinel.hawk_secret, 157 | }, 158 | prefix="signer.", 159 | ) 160 | 161 | mocked_signer.assert_called_with( 162 | server_url=mock.sentinel.server_url, 163 | hawk_id=mock.sentinel.hawk_id, 164 | hawk_secret=mock.sentinel.hawk_secret, 165 | ) 166 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/signer/test_signer_attachments.py: -------------------------------------------------------------------------------- 1 | import random 2 | import re 3 | import string 4 | import unittest 5 | from unittest import mock 6 | 7 | from .support import BaseWebTest, get_user_headers 8 | 9 | 10 | RE_ISO8601 = re.compile(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}\+00:00") 11 | 12 | 13 | class SignerAttachmentsTest(BaseWebTest, unittest.TestCase): 14 | def setUp(self): 15 | super().setUp() 16 | # Patch calls to Autograph. 17 | patch = mock.patch("kinto_remote_settings.signer.backends.autograph.requests") 18 | self.addCleanup(patch.stop) 19 | self.mocked_autograph = patch.start() 20 | 21 | def fake_sign(): 22 | fake_signature = "".join(random.sample(string.ascii_lowercase, 10)) 23 | return [ 24 | { 25 | "signature": "", 26 | "hash_algorithm": "", 27 | "signature_encoding": "", 28 | "content-signature": fake_signature, 29 | "x5u": "", 30 | "ref": "", 31 | } 32 | ] 33 | 34 | self.mocked_autograph.post.return_value.json.side_effect = fake_sign 35 | 36 | self.headers = get_user_headers("tarte:en-pion") 37 | resp = self.app.get("/", headers=self.headers) 38 | self.userid = resp.json["user"]["id"] 39 | 40 | self.other_headers = get_user_headers("Sam:Wan Heilss") 41 | resp = self.app.get("/", headers=self.other_headers) 42 | self.other_userid = resp.json["user"]["id"] 43 | 44 | self.app.put_json(self.source_bucket, headers=self.headers) 45 | self.app.put_json(self.source_collection, headers=self.headers) 46 | self.app.put_json( 47 | self.source_bucket + "/groups/reviewers", 48 | {"data": {"members": [self.other_userid]}}, 49 | headers=self.headers, 50 | ) 51 | 52 | @classmethod 53 | def get_app_settings(cls, extras=None): 54 | settings = super().get_app_settings(extras) 55 | 56 | settings["kinto.includes"] += " kinto_attachment" 57 | 58 | cls.source_bucket = "/buckets/alice" 59 | cls.source_collection = cls.source_bucket + "/collections/scid" 60 | cls.destination_bucket = "/buckets/alice" 61 | cls.destination_collection = cls.destination_bucket + "/collections/dcid" 62 | 63 | settings["kinto.signer.resources"] = "%s -> %s" % ( 64 | cls.source_collection, 65 | cls.destination_collection, 66 | ) 67 | 68 | settings["signer.to_review_enabled"] = "false" 69 | 70 | settings["attachment.base_path"] = "/tmp" 71 | 72 | return settings 73 | 74 | def initialize(self): 75 | r = self.app.post_json( 76 | self.source_collection + "/records", 77 | {"data": {"title": "hello"}}, 78 | headers=self.headers, 79 | ) 80 | r = r.json["data"] 81 | uri = self.source_collection + "/records/" + r["id"] + "/attachment" 82 | self.upload_file( 83 | uri=uri, 84 | files=[("attachment", "image.jpg", b"--fake--")], 85 | headers=self.headers, 86 | ) 87 | 88 | self.app.patch_json( 89 | self.source_collection, 90 | {"data": {"status": "to-review"}}, 91 | headers=self.headers, 92 | ) 93 | self.app.patch_json( 94 | self.source_collection, 95 | {"data": {"status": "to-sign"}}, 96 | headers=self.other_headers, 97 | ) 98 | 99 | def upload_file(self, uri, files, params=[], headers={}): 100 | content_type, body = self.app.encode_multipart(params, files) 101 | headers = headers.copy() 102 | headers["Content-Type"] = content_type 103 | resp = self.app.post(uri, body, headers=headers) 104 | return resp 105 | 106 | def test_attachment_is_enabled(self): 107 | r = self.app.get("/") 108 | assert "attachments" in r.json["capabilities"] 109 | 110 | def test_attachment_is_published_on_final_collection(self): 111 | self.initialize() 112 | 113 | r = self.app.get(self.destination_collection + "/records", headers=self.headers) 114 | record = r.json["data"][0] 115 | 116 | assert "attachment" in record 117 | 118 | def test_attachment_can_be_replaced(self): 119 | self.initialize() 120 | 121 | r = self.app.get(self.destination_collection + "/records", headers=self.headers) 122 | record = r.json["data"][0] 123 | 124 | attachment_before = record["attachment"]["hash"] 125 | 126 | uri = self.source_collection + "/records/" + record["id"] + "/attachment" 127 | self.upload_file( 128 | uri=uri, 129 | files=[("attachment", "image.jpg", b"--other-fake--")], 130 | headers=self.headers, 131 | ) 132 | 133 | self.app.patch_json( 134 | self.source_collection, 135 | {"data": {"status": "to-review"}}, 136 | headers=self.headers, 137 | ) 138 | self.app.patch_json( 139 | self.source_collection, 140 | {"data": {"status": "to-sign"}}, 141 | headers=self.other_headers, 142 | ) 143 | 144 | r = self.app.get(self.destination_collection + "/records", headers=self.headers) 145 | record = r.json["data"][0] 146 | attachment_after = record["attachment"]["hash"] 147 | 148 | assert attachment_before != attachment_after 149 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/signer/test_updater.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import unittest 3 | from unittest import mock 4 | 5 | import pytest 6 | from kinto.core.storage.exceptions import RecordNotFoundError 7 | from kinto_remote_settings.signer.updater import LocalUpdater 8 | from kinto_remote_settings.signer.utils import STATUS 9 | 10 | from .support import DummyRequest 11 | 12 | 13 | class LocalUpdaterTest(unittest.TestCase): 14 | def setUp(self): 15 | self.storage = mock.MagicMock() 16 | self.permission = mock.MagicMock() 17 | self.signer_instance = mock.MagicMock() 18 | self.updater = LocalUpdater( 19 | source={"bucket": "sourcebucket", "collection": "sourcecollection"}, 20 | destination={"bucket": "destbucket", "collection": "destcollection"}, 21 | signer=self.signer_instance, 22 | storage=self.storage, 23 | permission=self.permission, 24 | ) 25 | 26 | # Resource events are bypassed completely in this test suite. 27 | patcher = mock.patch("kinto_remote_settings.signer.utils.build_request") 28 | self.addCleanup(patcher.stop) 29 | patcher.start() 30 | 31 | def patch(self, obj, *args, **kwargs): 32 | patcher = mock.patch.object(obj, *args, **kwargs) 33 | self.addCleanup(patcher.stop) 34 | return patcher.start() 35 | 36 | def test_updater_raises_if_resources_are_not_set_properly(self): 37 | with pytest.raises(ValueError) as excinfo: 38 | LocalUpdater( 39 | source={"bucket": "source"}, 40 | destination={}, 41 | signer=self.signer_instance, 42 | storage=self.storage, 43 | permission=self.permission, 44 | ) 45 | assert str(excinfo.value) == ( 46 | "Resources should contain both " "bucket and collection" 47 | ) 48 | 49 | def test_get_source_records_asks_storage_for_records(self): 50 | self.storage.list_all.return_value = [] 51 | 52 | self.updater.get_source_records() 53 | self.storage.list_all.assert_called_with( 54 | resource_name="record", 55 | parent_id="/buckets/sourcebucket/collections/sourcecollection", 56 | ) 57 | 58 | def test_get_destination_records(self): 59 | # We want to test get_destination_records with some records. 60 | records = [ 61 | {"id": idx, "foo": "bar %s" % idx, "last_modified": 42 - idx} 62 | for idx in range(1, 4) 63 | ] 64 | self.storage.list_all.return_value = records 65 | self.updater.get_destination_records() 66 | self.storage.resource_timestamp.assert_called_with( 67 | resource_name="record", 68 | parent_id="/buckets/destbucket/collections/destcollection", 69 | ) 70 | self.storage.list_all.assert_called_with( 71 | resource_name="record", 72 | parent_id="/buckets/destbucket/collections/destcollection", 73 | ) 74 | 75 | def test_push_records_to_destination(self): 76 | self.patch(self.updater, "get_destination_records", return_value=([], 1324)) 77 | records = [ 78 | {"id": idx, "foo": "bar %s" % idx, "last_modified": 42 - idx} 79 | for idx in range(1, 4) 80 | ] 81 | self.patch(self.updater, "get_source_records", return_value=(records, 1325)) 82 | self.updater.push_records_to_destination(DummyRequest()) 83 | assert self.storage.update.call_count == 3 84 | assert self.storage.update.call_args_list[0][1] == { 85 | "obj": {"id": 1, "foo": "bar 1"}, 86 | "object_id": 1, 87 | "parent_id": "/buckets/destbucket/collections/destcollection", 88 | "resource_name": "record", 89 | } 90 | 91 | def test_push_records_removes_deleted_records(self): 92 | self.patch(self.updater, "get_destination_records", return_value=([], 1324)) 93 | records = [ 94 | {"id": idx, "foo": "bar %s" % idx, "last_modified": 42 - idx} 95 | for idx in range(0, 2) 96 | ] 97 | records.extend( 98 | [{"id": idx, "deleted": True, "last_modified": 42} for idx in range(3, 5)] 99 | ) 100 | self.patch(self.updater, "get_source_records", return_value=(records, 1325)) 101 | self.updater.push_records_to_destination(DummyRequest()) 102 | assert self.updater.get_source_records.call_count == 1 103 | assert self.storage.update.call_count == 2 104 | assert self.storage.delete.call_count == 2 105 | 106 | def test_push_records_skip_already_deleted_records(self): 107 | # In case the record doesn't exists in the destination 108 | # a RecordNotFoundError is raised. 109 | self.storage.delete.side_effect = RecordNotFoundError() 110 | self.patch(self.updater, "get_destination_records", return_value=([], 1324)) 111 | records = [ 112 | {"id": idx, "foo": "bar %s" % idx, "last_modified": 42 - idx} 113 | for idx in range(0, 2) 114 | ] 115 | records.extend( 116 | [{"id": idx, "deleted": True, "last_modified": 42} for idx in range(3, 5)] 117 | ) 118 | self.patch(self.updater, "get_source_records", return_value=(records, 1325)) 119 | # Calling the updater should not raise the RecordNotFoundError. 120 | self.updater.push_records_to_destination(DummyRequest()) 121 | 122 | def test_push_records_to_destination_with_no_destination_changes(self): 123 | self.patch(self.updater, "get_destination_records", return_value=([], None)) 124 | records = [ 125 | {"id": idx, "foo": "bar %s" % idx, "last_modified": 42 - idx} 126 | for idx in range(1, 4) 127 | ] 128 | self.patch(self.updater, "get_source_records", return_value=(records, 1325)) 129 | self.updater.push_records_to_destination(DummyRequest()) 130 | assert self.updater.get_source_records.call_count == 1 131 | assert self.storage.update.call_count == 3 132 | 133 | def test_set_destination_signature_modifies_the_destination_collection(self): 134 | self.storage.get.return_value = {"id": 1234, "last_modified": 1234} 135 | self.updater.set_destination_signature( 136 | mock.sentinel.signature, {}, DummyRequest() 137 | ) 138 | 139 | self.storage.update.assert_called_with( 140 | resource_name="collection", 141 | object_id="destcollection", 142 | parent_id="/buckets/destbucket", 143 | obj={"id": 1234, "signature": mock.sentinel.signature}, 144 | ) 145 | 146 | def test_set_destination_signature_copies_kinto_admin_ui_fields(self): 147 | self.storage.get.return_value = { 148 | "id": 1234, 149 | "sort": "-age", 150 | "last_modified": 1234, 151 | } 152 | self.updater.set_destination_signature( 153 | mock.sentinel.signature, 154 | {"displayFields": ["name"], "sort": "size"}, 155 | DummyRequest(), 156 | ) 157 | 158 | self.storage.update.assert_called_with( 159 | resource_name="collection", 160 | object_id="destcollection", 161 | parent_id="/buckets/destbucket", 162 | obj={ 163 | "id": 1234, 164 | "signature": mock.sentinel.signature, 165 | "sort": "-age", 166 | "displayFields": ["name"], 167 | }, 168 | ) 169 | 170 | def test_update_source_status_modifies_the_source_collection(self): 171 | self.storage.get.return_value = { 172 | "id": 1234, 173 | "last_modified": 1234, 174 | "status": "to-sign", 175 | } 176 | 177 | with mock.patch("kinto_remote_settings.signer.updater.datetime") as mocked: 178 | mocked.datetime.now().isoformat.return_value = "2018-04-09" 179 | self.updater.update_source_status(STATUS.SIGNED, DummyRequest()) 180 | 181 | self.storage.update.assert_called_with( 182 | resource_name="collection", 183 | object_id="sourcecollection", 184 | parent_id="/buckets/sourcebucket", 185 | obj={ 186 | "id": 1234, 187 | "last_review_by": "basicauth:bob", 188 | "last_review_date": "2018-04-09", 189 | "last_signature_by": "basicauth:bob", 190 | "last_signature_date": "2018-04-09", 191 | "status": "signed", 192 | }, 193 | ) 194 | 195 | def test_create_destination_updates_collection_permissions(self): 196 | collection_uri = "/buckets/destbucket/collections/destcollection" 197 | request = DummyRequest() 198 | request.route_path.return_value = collection_uri 199 | self.updater.create_destination(request) 200 | request.registry.permission.replace_object_permissions.assert_called_with( 201 | collection_uri, {"read": ("system.Everyone",)} 202 | ) 203 | 204 | def test_create_destination_creates_bucket(self): 205 | request = DummyRequest() 206 | self.updater.create_destination(request) 207 | request.registry.storage.create.assert_any_call( 208 | resource_name="bucket", parent_id="", obj={"id": "destbucket"} 209 | ) 210 | 211 | def test_create_destination_creates_collection(self): 212 | bucket_id = "/buckets/destbucket" 213 | request = DummyRequest() 214 | self.updater.create_destination(request) 215 | request.registry.storage.create.assert_any_call( 216 | resource_name="collection", 217 | parent_id=bucket_id, 218 | obj={"id": "destcollection"}, 219 | ) 220 | 221 | def test_sign_and_update_destination(self): 222 | records = [ 223 | {"id": idx, "foo": "bar %s" % idx, "last_modified": idx} 224 | for idx in range(1, 3) 225 | ] 226 | self.storage.list_all.return_value = records 227 | 228 | self.patch(self.storage, "update_records") 229 | self.patch(self.updater, "get_destination_records", return_value=([], "0")) 230 | self.patch(self.updater, "push_records_to_destination") 231 | self.patch(self.updater, "set_destination_signature") 232 | 233 | self.updater.sign_and_update_destination(DummyRequest(), {"id": "source"}) 234 | 235 | assert self.updater.get_destination_records.call_count == 1 236 | assert self.updater.push_records_to_destination.call_count == 1 237 | assert self.updater.set_destination_signature.call_count == 1 238 | 239 | def test_refresh_signature_does_not_push_records(self): 240 | self.storage.list_all.return_value = [] 241 | self.patch(self.updater, "set_destination_signature") 242 | self.patch(self.updater, "push_records_to_destination") 243 | 244 | self.updater.refresh_signature(DummyRequest(), "signed") 245 | 246 | assert self.updater.set_destination_signature.call_count == 1 247 | assert self.updater.push_records_to_destination.call_count == 0 248 | 249 | def test_refresh_signature_restores_status_on_source(self): 250 | self.storage.list_all.return_value = [] 251 | with mock.patch("kinto_remote_settings.signer.updater.datetime") as mocked: 252 | mocked.datetime.now.return_value = datetime.datetime(2010, 10, 31) 253 | 254 | self.updater.refresh_signature(DummyRequest(), "work-in-progress") 255 | 256 | new_attrs = { 257 | "status": "work-in-progress", 258 | "last_signature_by": "basicauth:bob", 259 | "last_signature_date": "2010-10-31T00:00:00", 260 | } 261 | self.storage.update.assert_any_call( 262 | resource_name="collection", 263 | parent_id="/buckets/sourcebucket", 264 | object_id="sourcecollection", 265 | obj=new_attrs, 266 | ) 267 | -------------------------------------------------------------------------------- /kinto-remote-settings/tests/signer/test_utils.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import pytest 4 | from kinto_remote_settings.signer import utils 5 | from pyramid.exceptions import ConfigurationError 6 | 7 | 8 | class ParseResourcesTest(unittest.TestCase): 9 | def test_missing_arrow_raises_an_exception(self): 10 | raw_resources = """ 11 | foo bar 12 | """ 13 | with pytest.raises(ConfigurationError): 14 | utils.parse_resources(raw_resources) 15 | 16 | def test_non_local_first_argument_raises_an_exception(self): 17 | raw_resources = """ 18 | foo -> bar 19 | bar -> baz 20 | """ 21 | with pytest.raises(ConfigurationError): 22 | utils.parse_resources(raw_resources) 23 | 24 | def test_malformed_url_raises_an_exception(self): 25 | raw_resources = """ 26 | /buckets/sbid/scid -> /buckets/dbid/collections/dcid 27 | """ 28 | with pytest.raises(ConfigurationError): 29 | utils.parse_resources(raw_resources) 30 | 31 | def test_outnumbered_urls_raises_an_exception(self): 32 | raw_resources = ( 33 | "/buckets/sbid/scid -> " 34 | "/buckets/dbid/collections/dcid -> " 35 | "/buckets/dbid/collections/dcid -> " 36 | "/buckets/sbid/scid" 37 | ) 38 | with pytest.raises(ConfigurationError): 39 | utils.parse_resources(raw_resources) 40 | 41 | def test_returned_resources_match_the_expected_format(self): 42 | raw_resources = """ 43 | /buckets/sbid/collections/scid -> /buckets/dbid/collections/dcid 44 | """ 45 | resources = utils.parse_resources(raw_resources) 46 | assert resources == { 47 | "/buckets/sbid/collections/scid": { 48 | "source": {"bucket": "sbid", "collection": "scid"}, 49 | "destination": {"bucket": "dbid", "collection": "dcid"}, 50 | } 51 | } 52 | 53 | def test_returned_resources_match_the_legacy_format(self): 54 | raw_resources = """ 55 | sbid/scid -> dbid/dcid 56 | """ 57 | resources = utils.parse_resources(raw_resources) 58 | assert resources == { 59 | "/buckets/sbid/collections/scid": { 60 | "source": {"bucket": "sbid", "collection": "scid"}, 61 | "destination": {"bucket": "dbid", "collection": "dcid"}, 62 | } 63 | } 64 | 65 | raw_resources = """ 66 | sbid/scid ; dbid/dcid 67 | """ 68 | resources = utils.parse_resources(raw_resources) 69 | assert resources == { 70 | "/buckets/sbid/collections/scid": { 71 | "source": {"bucket": "sbid", "collection": "scid"}, 72 | "destination": {"bucket": "dbid", "collection": "dcid"}, 73 | } 74 | } 75 | 76 | def test_spaces_are_supported(self): 77 | raw_resources = """ 78 | /buckets/bid1/collections/scid1 -> /buckets/bid1/collections/dcid1 79 | /buckets/bid2/collections/scid2 -> /buckets/bid2/collections/dcid2 80 | """ 81 | resources = utils.parse_resources(raw_resources) 82 | assert len(resources) == 2 83 | assert ( 84 | resources["/buckets/bid1/collections/scid1"]["source"]["bucket"] == "bid1" 85 | ) 86 | assert ( 87 | resources["/buckets/bid2/collections/scid2"]["source"]["bucket"] == "bid2" 88 | ) 89 | 90 | def test_multiple_resources_are_supported(self): 91 | raw_resources = """ 92 | /buckets/sbid1/collections/scid1 -> /buckets/dbid1/collections/dcid1 93 | /buckets/sbid2/collections/scid2 -> /buckets/dbid2/collections/dcid2 94 | """ 95 | resources = utils.parse_resources(raw_resources) 96 | assert len(resources) == 2 97 | 98 | def test_a_preview_collection_is_supported(self): 99 | raw_resources = ( 100 | "/buckets/stage/collections/cid -> " 101 | "/buckets/preview/collections/cid -> " 102 | "/buckets/prod/collections/cid -> " 103 | ) 104 | resources = utils.parse_resources(raw_resources) 105 | assert resources == { 106 | "/buckets/stage/collections/cid": { 107 | "source": {"bucket": "stage", "collection": "cid"}, 108 | "preview": {"bucket": "preview", "collection": "cid"}, 109 | "destination": {"bucket": "prod", "collection": "cid"}, 110 | } 111 | } 112 | 113 | def test_resources_should_be_space_separated(self): 114 | raw_resources = ( 115 | "/buckets/sbid1/collections/scid -> /buckets/dbid1/collections/dcid," 116 | "/buckets/sbid2/collections/scid -> /buckets/dbid2/collections/dcid" 117 | ) 118 | with self.assertRaises(ConfigurationError): 119 | utils.parse_resources(raw_resources) 120 | 121 | raw_resources = "sbid1/scid -> dbid1/dcid,sbid2/scid -> dbid2/dcid" 122 | with self.assertRaises(ConfigurationError): 123 | utils.parse_resources(raw_resources) 124 | 125 | def test_resources_must_be_valid_names(self): 126 | raw_resources = ( 127 | "/buckets/sbi+d1/collections/scid -> /buckets/dbid1/collections/dci,d" 128 | ) 129 | with self.assertRaises(ConfigurationError) as e: 130 | utils.parse_resources(raw_resources) 131 | assert repr(e.exception).startswith( 132 | 'ConfigurationError("Malformed resource: ' 133 | "bucket or collection id is invalid" 134 | ) 135 | 136 | def test_resources_can_be_defined_per_bucket(self): 137 | raw_resources = "/buckets/stage -> /buckets/preview -> /buckets/prod" 138 | resources = utils.parse_resources(raw_resources) 139 | assert resources == { 140 | "/buckets/stage": { 141 | "source": {"bucket": "stage", "collection": None}, 142 | "preview": {"bucket": "preview", "collection": None}, 143 | "destination": {"bucket": "prod", "collection": None}, 144 | } 145 | } 146 | 147 | def test_cannot_mix_per_bucket_and_per_collection(self): 148 | raw_resources = "/buckets/stage -> /buckets/prod/collections/boom" 149 | with self.assertRaises(ConfigurationError): 150 | utils.parse_resources(raw_resources) 151 | 152 | raw_resources = ( 153 | "/buckets/stage/collections/boom -> " 154 | "/buckets/preview/collections/boom -> " 155 | "/buckets/prod" 156 | ) 157 | with self.assertRaises(ConfigurationError): 158 | utils.parse_resources(raw_resources) 159 | 160 | raw_resources = ( 161 | "/buckets/stage -> /buckets/preview/collections/boom -> /buckets/prod" 162 | ) 163 | with self.assertRaises(ConfigurationError): 164 | utils.parse_resources(raw_resources) 165 | 166 | raw_resources = "/buckets/stage/collections/boom -> /buckets/prod" 167 | with self.assertRaises(ConfigurationError): 168 | utils.parse_resources(raw_resources) 169 | 170 | def test_cannot_repeat_source_preview_or_destination(self): 171 | raw_resources = "/buckets/stage -> /buckets/stage -> /buckets/prod" 172 | with self.assertRaises(ConfigurationError): 173 | utils.parse_resources(raw_resources) 174 | 175 | raw_resources = "/buckets/stage -> /buckets/preview -> /buckets/stage" 176 | with self.assertRaises(ConfigurationError): 177 | utils.parse_resources(raw_resources) 178 | 179 | raw_resources = "/buckets/stage -> /buckets/preview -> /buckets/preview" 180 | with self.assertRaises(ConfigurationError): 181 | utils.parse_resources(raw_resources) 182 | 183 | def test_cannot_repeat_resources(self): 184 | # Repeated source. 185 | raw_resources = """ 186 | /buckets/stage -> /buckets/preview1 -> /buckets/prod1 187 | /buckets/stage -> /buckets/preview2 -> /buckets/prod2 188 | """ 189 | with self.assertRaises(ConfigurationError): 190 | utils.parse_resources(raw_resources) 191 | 192 | # Repeated reviews. 193 | raw_resources = """ 194 | /buckets/stage1 -> /buckets/preview -> /buckets/prod1 195 | /buckets/stage2 -> /buckets/preview -> /buckets/prod2 196 | """ 197 | with self.assertRaises(ConfigurationError): 198 | utils.parse_resources(raw_resources) 199 | 200 | # Repeated destination. 201 | raw_resources = """ 202 | /buckets/stage1 -> /buckets/prod 203 | /buckets/stage2 -> /buckets/preview -> /buckets/prod 204 | """ 205 | with self.assertRaises(ConfigurationError): 206 | utils.parse_resources(raw_resources) 207 | 208 | # Source in other's preview. 209 | raw_resources = """ 210 | /buckets/stage -> /buckets/preview -> /buckets/prod 211 | /buckets/bid1 -> /buckets/stage -> /buckets/bid2 212 | """ 213 | with self.assertRaises(ConfigurationError): 214 | utils.parse_resources(raw_resources) 215 | 216 | # Source in other's destination. 217 | raw_resources = """ 218 | /buckets/b/collections/c -> /buckets/b/collections/c2 -> /buckets/b/collections/c3 219 | /buckets/b/collections/ca -> /buckets/b/collections/cb -> /buckets/b/collections/c 220 | """ 221 | with self.assertRaises(ConfigurationError): 222 | utils.parse_resources(raw_resources) 223 | 224 | # Preview in other's destination. 225 | raw_resources = """ 226 | /buckets/b/collections/c0 -> /buckets/b/collections/c1 -> /buckets/b/collections/c2 227 | /buckets/b/collections/ca -> /buckets/b/collections/cb -> /buckets/b/collections/c1 228 | """ 229 | with self.assertRaises(ConfigurationError): 230 | utils.parse_resources(raw_resources) 231 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.isort] 2 | profile = "black" 3 | lines_after_imports = 2 4 | src_paths = ["kinto-remote-settings", "tests"] 5 | 6 | [tool.pytest.ini_options] 7 | # https://docs.pytest.org/en/6.2.x/usage.html 8 | # -ra: shows test summary for all EXCEPT passed and passed with output 9 | # --showlocals: show local variables in tracebacks 10 | # --tb=native: traceback printing with Python standard library formatting 11 | addopts = "-ra --showlocals --tb=native" 12 | sensitive_url = "https://settings-writer.prod.mozaws.net/v1/admin" 13 | asyncio_mode = "auto" 14 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": ["config:base"], 3 | "docker-compose": { 4 | "digest": { 5 | "enabled": false 6 | } 7 | }, 8 | "dockerfile": { 9 | "pinDigests": true 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | black 2 | flake8 3 | httpie 4 | isort 5 | kinto-http 6 | pytest 7 | pytest-asyncio 8 | pytest-selenium 9 | webtest 10 | -------------------------------------------------------------------------------- /requirements.in: -------------------------------------------------------------------------------- 1 | kinto[postgresql,memcached,monitoring] 2 | kinto-attachment 3 | kinto-emailer 4 | uWSGI 5 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mozilla-services/kinto-dist/1511dbbb76ee69970bbd29eb56b7811974ce2ba1/tests/__init__.py -------------------------------------------------------------------------------- /tests/browser_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from kinto_http.patch_type import JSONPatch 3 | from selenium.common.exceptions import NoSuchElementException 4 | from selenium.webdriver.common.by import By 5 | from selenium.webdriver.remote.webdriver import WebDriver 6 | from selenium.webdriver.remote.webelement import WebElement 7 | 8 | from .conftest import Auth, ClientFactory 9 | 10 | 11 | pytestmark = pytest.mark.asyncio 12 | 13 | 14 | async def test_review_signoff( 15 | base_url: str, 16 | selenium: WebDriver, 17 | make_client: ClientFactory, 18 | auth: Auth, 19 | editor_auth: Auth, 20 | reviewer_auth: Auth, 21 | ): 22 | client = make_client(auth) 23 | editor_client = make_client(editor_auth) 24 | reviewer_client = make_client(reviewer_auth) 25 | 26 | selenium.get(base_url) 27 | 28 | sign_in(selenium, reviewer_auth) 29 | 30 | editor_id = (await editor_client.server_info())["user"]["id"] 31 | reviewer_id = (await reviewer_client.server_info())["user"]["id"] 32 | 33 | await client.create_bucket(id="main-workspace", if_not_exists=True) 34 | await client.create_collection( 35 | id="product-integrity", 36 | bucket="main-workspace", 37 | permissions={"write": [editor_id, reviewer_id]}, 38 | if_not_exists=True, 39 | ) 40 | data = JSONPatch([{"op": "add", "path": "/data/members/0", "value": editor_id}]) 41 | await client.patch_group(id="product-integrity-editors", changes=data) 42 | data = JSONPatch([{"op": "add", "path": "/data/members/0", "value": reviewer_id}]) 43 | await client.patch_group(id="product-integrity-reviewers", changes=data) 44 | await client.create_record( 45 | bucket="main-workspace", collection="product-integrity", data={"testing": 123} 46 | ) 47 | await editor_client.patch_collection( 48 | id="product-integrity", bucket="main-workspace", data={"status": "to-review"} 49 | ) 50 | 51 | selenium.get( 52 | base_url 53 | + "/#/buckets/main-workspace/collections/product-integrity/simple-review" 54 | ) 55 | selenium.refresh() 56 | 57 | approve_button: WebElement = selenium.find_element( 58 | By.XPATH, "//button[contains(., 'Approve')]" 59 | ) 60 | assert approve_button, "Approve button not found" 61 | assert approve_button.text == "Approve" 62 | assert approve_button.is_displayed() 63 | 64 | reject_button: WebElement = selenium.find_element( 65 | By.XPATH, "//button[contains(., 'Reject')]" 66 | ) 67 | assert reject_button, "Reject button not found" 68 | assert reject_button.text == "Reject" 69 | assert reject_button.is_displayed() 70 | 71 | approve_button.click() 72 | 73 | # find and click show readonly buckets/collections 74 | readonly_checkbox: WebElement = selenium.find_element(By.ID, "read-only-toggle") 75 | assert readonly_checkbox, "Readonly checkbox not found" 76 | assert readonly_checkbox.is_displayed() 77 | readonly_checkbox.click() 78 | 79 | # find and click on main bucket product-integrity collection 80 | product_integrity: WebElement = selenium.find_element( 81 | By.XPATH, 82 | "//a[@href='#/buckets/main/collections/product-integrity/records' and contains(., 'product-integrity')]", 83 | ) 84 | assert product_integrity, "product-integrity collection not found under main bucket" 85 | assert product_integrity.is_displayed() 86 | product_integrity.click() 87 | 88 | # find and ensure record was properly signed to main bucket 89 | data: WebElement = selenium.find_element(By.XPATH, "//code") 90 | assert data, "Record not found in product-integrity collection under main bucket" 91 | assert data.is_displayed() 92 | assert data.text == '{"testing":123}' 93 | 94 | 95 | def sign_in(selenium: WebDriver, auth: Auth): 96 | # find and select Kinto Account Auth for login 97 | kinto_auth_radio_button: WebElement = selenium.find_element( 98 | By.XPATH, "//input[@value='accounts']" 99 | ) 100 | assert kinto_auth_radio_button, "Kinto Account Auth radio button not found" 101 | kinto_auth_radio_button.click() 102 | 103 | # ensure account credentials fields render 104 | account_creds_title: WebElement = selenium.find_element( 105 | By.ID, "root_credentials__title" 106 | ) 107 | assert account_creds_title, "Account credentials title not found" 108 | assert account_creds_title.text == "Account credentials*" 109 | assert account_creds_title.is_displayed() 110 | 111 | # enter login username 112 | account_creds_user: WebElement = selenium.find_element( 113 | By.ID, "root_credentials_username" 114 | ) 115 | assert account_creds_user, "Account credentials username entry not found" 116 | assert account_creds_user.is_displayed() 117 | account_creds_user.send_keys(auth[0]) 118 | 119 | # enter login password 120 | account_creds_pass: WebElement = selenium.find_element( 121 | By.ID, "root_credentials_password" 122 | ) 123 | assert account_creds_pass, "Account credentials password entry not found" 124 | assert account_creds_pass.is_displayed() 125 | account_creds_pass.send_keys(auth[1]) 126 | 127 | # sign in 128 | sign_in_button: WebElement = selenium.find_element(By.CLASS_NAME, "btn-info") 129 | assert sign_in_button, "Sign in button not found" 130 | assert sign_in_button.text == "Sign in using Kinto Account Auth" 131 | assert sign_in_button.is_displayed() 132 | sign_in_button.click() 133 | 134 | # determine if successfully logged in to admin home page 135 | try: 136 | server_info: WebElement = selenium.find_element( 137 | By.XPATH, 138 | "//div[@class='card-header' and contains(., 'Server information')]", 139 | ) 140 | assert server_info, "Server information not found" 141 | assert server_info.text == "Server information" 142 | assert server_info.is_displayed() 143 | except NoSuchElementException: 144 | pytest.fail("Login was unsuccessful") 145 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Tuple 2 | 3 | import pytest 4 | import requests 5 | from kinto_http import AsyncClient, KintoException 6 | from pytest import FixtureRequest 7 | from requests.adapters import HTTPAdapter 8 | from selenium.webdriver.firefox.options import Options 9 | from selenium.webdriver.remote.webdriver import WebDriver 10 | from urllib3.util.retry import Retry 11 | 12 | 13 | DEFAULT_SERVER = "http://localhost:8888/v1" 14 | DEFAULT_AUTH = "user:pass" 15 | DEFAULT_EDITOR_AUTH = "editor:pass" 16 | DEFAULT_REVIEWER_AUTH = "reviewer:pass" 17 | DEFAULT_BUCKET = "main-workspace" 18 | DEFAULT_COLLECTION = "product-integrity" 19 | 20 | Auth = Tuple[str, str] 21 | ClientFactory = Callable[[Auth], AsyncClient] 22 | 23 | 24 | def pytest_addoption(parser): 25 | parser.addoption( 26 | "--server", 27 | action="store", 28 | default=DEFAULT_SERVER, 29 | help="Kinto server (in form 'http(s)://:/v1')", 30 | ) 31 | parser.addoption( 32 | "--auth", 33 | action="store", 34 | default=DEFAULT_AUTH, 35 | help="Basic authentication", 36 | ) 37 | parser.addoption( 38 | "--editor-auth", 39 | action="store", 40 | default=DEFAULT_EDITOR_AUTH, 41 | help="Basic authentication for editor", 42 | ) 43 | parser.addoption( 44 | "--reviewer-auth", 45 | action="store", 46 | default=DEFAULT_REVIEWER_AUTH, 47 | help="Basic authentication for reviewer", 48 | ) 49 | parser.addoption( 50 | "--bucket", 51 | action="store", 52 | default=DEFAULT_BUCKET, 53 | help="Source bucket", 54 | ) 55 | parser.addoption( 56 | "--collection", 57 | action="store", 58 | default=DEFAULT_COLLECTION, 59 | help="Source collection", 60 | ) 61 | parser.addoption( 62 | "--keep-existing", 63 | action="store_true", 64 | default=False, 65 | help="Keep existing collection data", 66 | ) 67 | 68 | 69 | @pytest.fixture(scope="session") 70 | def server(request) -> str: 71 | return request.config.getoption("--server") 72 | 73 | 74 | @pytest.fixture(scope="session") 75 | def auth(request) -> Auth: 76 | return tuple(request.config.getoption("--auth").split(":")) 77 | 78 | 79 | @pytest.fixture(scope="session") 80 | def editor_auth(request) -> Auth: 81 | return tuple(request.config.getoption("--editor-auth").split(":")) 82 | 83 | 84 | @pytest.fixture(scope="session") 85 | def reviewer_auth(request) -> Auth: 86 | return tuple(request.config.getoption("--reviewer-auth").split(":")) 87 | 88 | 89 | @pytest.fixture(scope="session") 90 | def source_bucket(request) -> str: 91 | return request.config.getoption("--bucket") 92 | 93 | 94 | @pytest.fixture(scope="session") 95 | def source_collection(request) -> str: 96 | return request.config.getoption("--collection") 97 | 98 | 99 | @pytest.fixture(scope="session") 100 | def keep_existing(request) -> bool: 101 | return request.config.getoption("--keep-existing") 102 | 103 | 104 | @pytest.fixture 105 | def make_client( 106 | server: str, source_bucket: str, source_collection: str 107 | ) -> ClientFactory: 108 | """Factory as fixture for creating a Kinto AsyncClient used for tests. 109 | 110 | Args: 111 | server (str): Kinto server (in form 'http(s)://:/v1') 112 | source_bucket (str): Source bucket 113 | source_collection (str): Source collection 114 | 115 | Returns: 116 | AsyncClient: AsyncClient 117 | """ 118 | 119 | def _make_client(auth: Auth) -> AsyncClient: 120 | request_session = requests.Session() 121 | retries = Retry( 122 | total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504] 123 | ) 124 | request_session.mount( 125 | f"{server.split('://')[0]}://", HTTPAdapter(max_retries=retries) 126 | ) 127 | 128 | create_user(request_session, server, auth) 129 | 130 | return AsyncClient( 131 | server_url=server, 132 | auth=auth, 133 | bucket=source_bucket, 134 | collection=source_collection, 135 | retry=5, 136 | ) 137 | 138 | return _make_client 139 | 140 | 141 | @pytest.fixture(autouse=True) 142 | async def flush_default_collection( 143 | make_client: ClientFactory, 144 | auth: Auth, 145 | source_bucket: str, 146 | source_collection: str, 147 | ): 148 | yield 149 | client = make_client(auth) 150 | 151 | try: 152 | await client.delete_collection( 153 | id=source_collection, bucket=source_bucket, if_exists=True 154 | ) 155 | except KintoException as e: 156 | # in the case where a user doesn't have permissions to delete 157 | print(e) 158 | 159 | 160 | @pytest.fixture(scope="session", autouse=True) 161 | def verify_url(request: FixtureRequest, base_url: str): 162 | """Verifies the base URL""" 163 | verify = request.config.option.verify_base_url 164 | if base_url and verify: 165 | session = requests.Session() 166 | retries = Retry(backoff_factor=0.1, status_forcelist=[500, 502, 503, 504]) 167 | session.mount(base_url, HTTPAdapter(max_retries=retries)) 168 | session.get(base_url, verify=False) 169 | 170 | 171 | @pytest.fixture 172 | def firefox_options(firefox_options: Options) -> Options: 173 | firefox_options.headless = True 174 | return firefox_options 175 | 176 | 177 | @pytest.fixture 178 | def selenium(selenium: WebDriver) -> WebDriver: 179 | selenium.set_window_size(1024, 600) 180 | selenium.maximize_window() 181 | selenium.implicitly_wait(5) 182 | return selenium 183 | 184 | 185 | def create_user(request_session: requests.Session, server: str, auth: Auth): 186 | # check if user already exists before creating 187 | r = request_session.get(server, auth=auth) 188 | if "user" not in r.json(): 189 | assert request_session.put( 190 | f"{server}/accounts/{auth[0]}", 191 | json={"data": {"password": auth[1]}}, 192 | ) 193 | -------------------------------------------------------------------------------- /tests/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | : "${SERVER:=http://web:8888/v1}" 5 | 6 | usage() { 7 | echo "usage: ./run.sh start" 8 | echo "" 9 | echo " start Start tests" 10 | echo "" 11 | exit 1 12 | } 13 | 14 | [ $# -lt 1 ] && usage 15 | 16 | case $1 in 17 | start) 18 | wget -q --tries=180 --retry-connrefused --waitretry=1 -O /dev/null $SERVER || (echo "Can't reach $SERVER" && exit 1) 19 | http -q --check-status $SERVER/__heartbeat__ 20 | pytest integration_test.py --server $SERVER 21 | pytest --driver Remote --capability browserName firefox --base-url $SERVER/admin --verify-base-url browser_test.py --server $SERVER 22 | ;; 23 | *) 24 | exec "$@" 25 | ;; 26 | esac 27 | --------------------------------------------------------------------------------