├── VERSION ├── .gitignore ├── hooks └── post_gen_project.sh ├── {{cookiecutter.repostory_name}} ├── devops │ ├── tf │ │ ├── main │ │ │ ├── envs │ │ │ │ ├── prod │ │ │ │ │ ├── main.tf │ │ │ │ │ ├── vars.tf │ │ │ │ │ ├── versions.tf │ │ │ │ │ ├── backend.tf │ │ │ │ │ └── terraform.tfvars │ │ │ │ ├── staging │ │ │ │ │ ├── main.tf │ │ │ │ │ ├── vars.tf │ │ │ │ │ ├── versions.tf │ │ │ │ │ ├── backend.tf │ │ │ │ │ └── terraform.tfvars │ │ │ │ └── common │ │ │ │ │ ├── versions.tf │ │ │ │ │ ├── vars.tf │ │ │ │ │ └── main.tf │ │ │ ├── files │ │ │ │ ├── authorized_keys │ │ │ │ ├── nginx │ │ │ │ │ ├── monitoring_certs │ │ │ │ │ │ ├── monitoring.crt.txt │ │ │ │ │ │ ├── monitoring.key.txt │ │ │ │ │ │ └── monitoring-ca.crt.txt │ │ │ │ │ ├── config_helpers │ │ │ │ │ │ ├── brotli.conf │ │ │ │ │ │ └── gzip.conf │ │ │ │ │ └── templates │ │ │ │ │ │ └── default.conf.template │ │ │ │ ├── envrc │ │ │ │ ├── env │ │ │ │ ├── cloud-init.yml │ │ │ │ └── docker-compose.yml │ │ │ └── modules │ │ │ │ ├── networking │ │ │ │ ├── vars.tf │ │ │ │ ├── output.tf │ │ │ │ └── network.tf │ │ │ │ ├── backend │ │ │ │ ├── ec2-keys.tf │ │ │ │ ├── parameters.ssh-keys.tf │ │ │ │ ├── parameters.docker-compose.tf │ │ │ │ ├── vars.tf │ │ │ │ ├── ec2-profile.tf │ │ │ │ ├── parameters.env.tf │ │ │ │ ├── alb.tf │ │ │ │ ├── domain.tf │ │ │ │ ├── ec2-autoscale.tf │ │ │ │ ├── parameters.nginx.tf │ │ │ │ └── security.tf │ │ │ │ └── database │ │ │ │ ├── vars.tf │ │ │ │ ├── security.tf │ │ │ │ ├── output.tf │ │ │ │ └── rds.tf │ │ └── core │ │ │ ├── vars.tf │ │ │ ├── terraform.tfvars │ │ │ ├── backend.tf │ │ │ └── main.tf │ ├── packer │ │ ├── build.sh │ │ └── docker-optimized.pkr.hcl │ ├── vultr_tf │ │ └── core │ │ │ ├── vars.tf │ │ │ ├── backend.tf │ │ │ ├── vars_cloud_init.tf │ │ │ ├── vultr-cloud-init.tftpl │ │ │ └── main.tf │ ├── scripts │ │ ├── deploy-backend.sh │ │ ├── vars.sh │ │ └── build-backend.sh │ └── vultr_scripts │ │ ├── vultr-get-instances.py │ │ ├── vultr-deploy.py │ │ └── vultr-update-cloudinit.py ├── app │ ├── src │ │ ├── {{cookiecutter.django_project_name}} │ │ │ ├── __init__.py │ │ │ ├── {{cookiecutter.django_default_app_name}} │ │ │ │ ├── views.py │ │ │ │ ├── models.py │ │ │ │ ├── __init__.py │ │ │ │ ├── apps.py │ │ │ │ ├── {% if cookiecutter.use_channels == "y" %}schemas.py{% endif %} │ │ │ │ ├── {% if cookiecutter.monitoring == "y" %}business_metrics.py{% endif %} │ │ │ │ ├── tests │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── settings.py │ │ │ │ │ ├── test_database.py │ │ │ │ │ ├── {% if cookiecutter.use_channels == "y" %}test_websocket.py{% endif %} │ │ │ │ │ ├── test_setup.py │ │ │ │ │ ├── conftest.py │ │ │ │ │ └── test_settings.py │ │ │ │ ├── admin.py │ │ │ │ ├── management │ │ │ │ │ └── commands │ │ │ │ │ │ ├── {% if cookiecutter.use_celery == "y" %}flush_queue.py{% endif %} │ │ │ │ │ │ └── {% if cookiecutter.use_celery == "y" %}move_tasks.py{% endif %} │ │ │ │ ├── {% if cookiecutter.use_celery == "y" %}tasks.py{% endif %} │ │ │ │ ├── {% if cookiecutter.monitoring == "y" %}metrics.py{% endif %} │ │ │ │ ├── {% if cookiecutter.use_channels == "y" %}consumers.py{% endif %} │ │ │ │ └── email.py │ │ │ ├── wsgi.py │ │ │ ├── api │ │ │ │ ├── {% if cookiecutter.use_rest_framework == "y" %}pagination.py{% endif %} │ │ │ │ ├── {% if cookiecutter.use_rest_framework == "y" %}routers.py{% endif %} │ │ │ │ ├── {% if cookiecutter.use_rest_framework == "y" %}serializers.py{% endif %} │ │ │ │ └── {% if cookiecutter.use_rest_framework == "y" %}views.py{% endif %} │ │ │ ├── asgi.py │ │ │ ├── urls.py │ │ │ └── {% if cookiecutter.use_celery == 'y' %}celery.py{% endif %} │ │ ├── pytest.ini │ │ ├── mypy.ini │ │ ├── manage.py │ │ └── healthcheck.py │ └── envs │ │ └── prod │ │ ├── entrypoint.sh │ │ ├── {% if cookiecutter.monitoring == "y" %}prometheus-cleanup.sh{% endif %} │ │ ├── gunicorn.conf.py │ │ ├── {% if cookiecutter.use_celery == 'y' %}celery-entrypoint.sh{% endif %} │ │ └── Dockerfile ├── backups │ ├── bin │ │ ├── requirements.txt │ │ ├── list-backups.sh │ │ ├── backup-db-to-email.sh │ │ ├── rotate-local-backups.sh │ │ ├── restore-db.sh │ │ ├── common.sh │ │ ├── backup-file-to-b2.sh │ │ ├── backup-db.sh │ │ ├── emailhelper.py │ │ └── serve_metrics.py │ ├── docker-entrypoint.sh │ ├── cron.d │ │ └── backup │ └── Dockerfile ├── bin │ ├── run-manage-py.sh │ ├── dbshell.sh │ └── prepare-os.sh ├── deploy-to-aws.sh ├── envs │ ├── prod │ │ ├── .vuln.env.template │ │ ├── .env.template │ │ └── docker-compose.yml │ └── dev │ │ ├── docker-compose.yml │ │ └── .env.template ├── .dockerignore ├── .gitignore ├── .github │ ├── dependabot.yml │ └── workflows │ │ ├── cd.yml │ │ ├── ci.yml │ │ └── cruft-updates.yml ├── nginx │ ├── monitoring_certs │ │ └── README.md │ ├── config_helpers │ │ ├── brotli.conf │ │ └── gzip.conf │ └── templates │ │ └── default.conf.template ├── .shellcheckrc ├── letsencrypt_setup.sh ├── alloy │ └── config.alloy ├── setup-dev.sh ├── setup-prod.sh ├── deploy.sh ├── docs │ └── 3rd_party │ │ └── cookiecutter-rt-django │ │ └── CHANGELOG.md ├── SECURITY.md ├── README_vultr.md ├── pyproject.toml ├── noxfile.py └── README_AWS.md ├── .git-blame-ignore-revs ├── docker └── node-exporter │ ├── Dockerfile │ └── entrypoint.sh ├── .shellcheckrc ├── .github ├── dependabot.yml └── workflows │ ├── ci.yml │ └── publish-docker-node-exporter.yml ├── CONTRIBUTING.md ├── pyproject.toml ├── LICENSE ├── README.md ├── SECURITY.md ├── cookiecutter.json ├── features.md └── noxfile.py /VERSION: -------------------------------------------------------------------------------- 1 | 0.2.0 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .nox 3 | __pycache__ 4 | *.egg-info/ -------------------------------------------------------------------------------- /hooks/post_gen_project.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | ruff format . 4 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/envs/prod/main.tf: -------------------------------------------------------------------------------- 1 | ../common/main.tf -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/envs/prod/vars.tf: -------------------------------------------------------------------------------- 1 | ../common/vars.tf -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/envs/staging/main.tf: -------------------------------------------------------------------------------- 1 | ../prod/main.tf -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/envs/staging/vars.tf: -------------------------------------------------------------------------------- 1 | ../prod/vars.tf -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/files/authorized_keys: -------------------------------------------------------------------------------- 1 | ${ec2_ssh_key} -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/envs/prod/versions.tf: -------------------------------------------------------------------------------- 1 | ../common/versions.tf -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/envs/staging/versions.tf: -------------------------------------------------------------------------------- 1 | ../prod/versions.tf -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/files/nginx/monitoring_certs/monitoring.crt.txt: -------------------------------------------------------------------------------- 1 | "replace-me" -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/files/nginx/monitoring_certs/monitoring.key.txt: -------------------------------------------------------------------------------- 1 | "replace-me" -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/files/nginx/monitoring_certs/monitoring-ca.crt.txt: -------------------------------------------------------------------------------- 1 | "replace-me" -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/views.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/backups/bin/requirements.txt: -------------------------------------------------------------------------------- 1 | prometheus_client==0.21.1 2 | b2sdk==2.8.0 3 | structlog==25.1.0 4 | b2==4.3.0 -------------------------------------------------------------------------------- /.git-blame-ignore-revs: -------------------------------------------------------------------------------- 1 | # Migrate code style to ruff format 2 | 9cf03b2e9f96de2304709b7150a7608c6ba6adfb 3 | bf05db212bea174f5cf3d7f05f1151baae7f6cd0 -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/core/vars.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | } 4 | 5 | variable "region" { 6 | type = string 7 | } -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/models.py: -------------------------------------------------------------------------------- 1 | from django.db import models # noqa 2 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/core/terraform.tfvars: -------------------------------------------------------------------------------- 1 | region = "{{ cookiecutter.aws_region }}" 2 | name = "{{ cookiecutter.aws_project_name }}" -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/packer/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # initialize packer script and building image 4 | packer init . 5 | 6 | packer build docker-optimized.pkr.hcl -------------------------------------------------------------------------------- /docker/node-exporter/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM prom/node-exporter:latest 2 | 3 | COPY entrypoint.sh / 4 | RUN mkdir -p /home/nobody/textfile_collector_metrics 5 | 6 | ENTRYPOINT ["/entrypoint.sh"] -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/bin/run-manage-py.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ "$(basename "$0")" == 'bin' ]; then 3 | cd .. 4 | fi 5 | docker compose exec app sh -c "python manage.py $*" 6 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/backups/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | printenv > /etc/environment && cron && tail -f /var/log/cron.log & 4 | uv run /root/serve_metrics.py -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/deploy-to-aws.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # shellcheck disable=2086 4 | ./devops/scripts/build-backend.sh "$1" 5 | ./devops/scripts/deploy-backend.sh "$1" 6 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/networking/vars.tf: -------------------------------------------------------------------------------- 1 | variable "name" {} 2 | variable "env" {} 3 | variable "azs" {} 4 | variable "vpc_cidr" {} 5 | variable "subnet_cidrs" {} -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/backups/cron.d/backup: -------------------------------------------------------------------------------- 1 | 0 3 * * * root /root/backup-db.sh >> /var/log/cron.log 2>&1 2 | 0 5 * * * root /root/rotate-local-backups.sh >> /var/log/cron.log 2>&1 3 | 4 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/backend/ec2-keys.tf: -------------------------------------------------------------------------------- 1 | resource "aws_key_pair" "self" { 2 | key_name = "${var.name}-${var.env}-key" 3 | public_key = var.ec2_ssh_key 4 | } -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/vultr_tf/core/vars.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | type = string 3 | } 4 | 5 | variable "vultr_api_key" { 6 | type = string 7 | sensitive = true 8 | } 9 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | python_files = tests.py test_*.py *_tests.py 3 | DJANGO_SETTINGS_MODULE = {{cookiecutter.django_project_name}}.{{cookiecutter.django_default_app_name}}.tests.settings -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/files/envrc: -------------------------------------------------------------------------------- 1 | export APP_NAME=${name} 2 | export APP_ENV=${env} 3 | export AWS_ACCOUNT_ID=${account_id} 4 | export AWS_ECR_BASE_URL=${ecr_base_url} 5 | export AWS_ECR_TAG=${ecr_image} 6 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/vultr_tf/core/backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | 3 | required_providers { 4 | vultr = { 5 | source = "vultr/vultr" 6 | version = "~> 2.15.1" 7 | } 8 | } 9 | 10 | } -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/database/vars.tf: -------------------------------------------------------------------------------- 1 | variable "name" {} 2 | variable "env" {} 3 | 4 | variable "vpc_id" {} 5 | variable "vpc_cidr" {} 6 | variable "subnets" {} 7 | variable "azs" {} 8 | variable "instance_type" {} 9 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/envs/common/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 4.0" 6 | } 7 | } 8 | 9 | required_version = "~> 1.0" 10 | } -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/envs/prod/backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "{{ cookiecutter.aws_infra_bucket }}" 4 | key = "prod/main.tfstate" 5 | region = "{{ cookiecutter.aws_region }}" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/envs/staging/backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "{{ cookiecutter.aws_infra_bucket }}" 4 | key = "staging/main.tfstate" 5 | region = "{{ cookiecutter.aws_region }}" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/__init__.py: -------------------------------------------------------------------------------- 1 | default_app_config = "{{cookiecutter.django_project_name}}.{{cookiecutter.django_default_app_name}}.apps.{{cookiecutter.django_default_app_name|title}}Config" 2 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/envs/prod/.vuln.env.template: -------------------------------------------------------------------------------- 1 | ENV=prod 2 | # Full URL of the DefectDojo instance 3 | DD_URL= 4 | # API v2 key for the DefectDojo instance 5 | DD_API_KEY= 6 | # Product name in DefectDojo - a unique name to identify the current application 7 | DD_PRODUCT= 8 | SENTRY_DSN= 9 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/wsgi.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from django.core.wsgi import get_wsgi_application 4 | 5 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{cookiecutter.django_project_name}}.settings") 6 | 7 | application = get_wsgi_application() 8 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/.dockerignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.sqlite3 3 | *~ 4 | *.egg-info/ 5 | /docker-compose.yml 6 | /.idea/ 7 | /redis/ 8 | /db/ 9 | /letsencrypt/ 10 | /nginx/ 11 | .env 12 | .venv 13 | venv 14 | .backups/ 15 | .envrc 16 | .terraform.lock.hcl 17 | .terraform/ 18 | .nox/ 19 | __pycache__ 20 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/api/{% if cookiecutter.use_rest_framework == "y" %}pagination.py{% endif %}: -------------------------------------------------------------------------------- 1 | from rest_framework.pagination import CursorPagination as BaseCursorPagination 2 | 3 | 4 | class CursorPagination(BaseCursorPagination): 5 | ordering = "-id" 6 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.sqlite3 3 | *~ 4 | *.egg-info/ 5 | /docker-compose.yml 6 | /.idea/ 7 | /redis/ 8 | /db/ 9 | /letsencrypt/ 10 | .env 11 | .vuln.env 12 | .venv 13 | venv 14 | media/ 15 | .backups/ 16 | .envrc 17 | .terraform.lock.hcl 18 | .terraform/ 19 | .nox/ 20 | __pycache__ 21 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/api/{% if cookiecutter.use_rest_framework == "y" %}routers.py{% endif %}: -------------------------------------------------------------------------------- 1 | from rest_framework.routers import DefaultRouter 2 | 3 | # from .views import SomeModelViewSet 4 | 5 | router = DefaultRouter() 6 | # router.register(r"some", SomeModelViewSet) 7 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/apps.py: -------------------------------------------------------------------------------- 1 | from django.apps import AppConfig 2 | 3 | 4 | class {{cookiecutter.django_default_app_name|title}}Config(AppConfig): 5 | name = "{{cookiecutter.django_project_name}}.{{cookiecutter.django_default_app_name}}" 6 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/{% if cookiecutter.use_channels == "y" %}schemas.py{% endif %}: -------------------------------------------------------------------------------- 1 | from typing import Literal 2 | 3 | from pydantic import BaseModel 4 | 5 | 6 | class Heartbeat(BaseModel): 7 | type: Literal["Heartbeat"] = "Heartbeat" 8 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/backend/parameters.ssh-keys.tf: -------------------------------------------------------------------------------- 1 | resource "aws_ssm_parameter" "ssh-keys" { 2 | name = "/application/${var.name}/${var.env}/.ssh/authorized_keys" 3 | type = "SecureString" 4 | value = templatefile("../../files/authorized_keys", { 5 | ec2_ssh_key = var.ec2_ssh_key 6 | }) 7 | } 8 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | plugins = 3 | mypy_django_plugin.main, 4 | mypy_drf_plugin.main 5 | strict_optional = True 6 | ignore_missing_imports = True 7 | 8 | [mypy.plugins.django-stubs] 9 | django_settings_module = "{{cookiecutter.django_project_name}}.{{cookiecutter.django_default_app_name}}.tests.settings" 10 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/networking/output.tf: -------------------------------------------------------------------------------- 1 | output "vpc_id" { 2 | value = module.vpc.vpc_id 3 | } 4 | 5 | output "vpc_cidr_block" { 6 | value = module.vpc.vpc_cidr_block 7 | } 8 | 9 | output "subnets" { 10 | value = module.vpc.public_subnets 11 | } 12 | 13 | output "azs" { 14 | value = module.vpc.azs 15 | } 16 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/{% if cookiecutter.monitoring == "y" %}business_metrics.py{% endif %}: -------------------------------------------------------------------------------- 1 | from django_business_metrics.v0 import BusinessMetricsManager, active_users, users 2 | 3 | metrics_manager = BusinessMetricsManager() 4 | 5 | metrics_manager.add(users).add(active_users) 6 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/tests/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file is required by pytest, otherwise import errors will pop up: 3 | 4 | project/core/tests/conftest.py:8: in 5 | from .models import User 6 | E ImportError: attempted relative import with no known parent package 7 | """ 8 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "pip" 4 | directory: "/app/src" 5 | schedule: 6 | interval: "daily" 7 | open-pull-requests-limit: 0 8 | - package-ecosystem: "docker" 9 | directory: "/app/envs/prod" 10 | schedule: 11 | interval: "weekly" 12 | open-pull-requests-limit: 0 13 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/tests/settings.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from {{cookiecutter.django_project_name}}.settings import * # noqa: E402,F403 4 | 5 | os.environ["DEBUG_TOOLBAR"] = "False" 6 | 7 | {% if cookiecutter.monitoring == "y" %} 8 | PROMETHEUS_EXPORT_MIGRATIONS = False 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/api/{% if cookiecutter.use_rest_framework == "y" %}serializers.py{% endif %}: -------------------------------------------------------------------------------- 1 | # from rest_framework.serializers import HyperlinkedModelSerializer 2 | 3 | 4 | # class SomeModelSerializer(HyperlinkedModelSerializer): 5 | # class Meta: 6 | # model = SomeModel 7 | # fields = ["field1", "field2", "field3"] 8 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/api/{% if cookiecutter.use_rest_framework == "y" %}views.py{% endif %}: -------------------------------------------------------------------------------- 1 | # from rest_framework.viewsets import ModelViewSet 2 | 3 | # from .serializers import SomeModelSerializer 4 | 5 | 6 | # class SomeModelViewSet(ModelViewSet): 7 | # queryset = SomeModel.objects.all() 8 | # serializer_class = SomeModelSerializer 9 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/networking/network.tf: -------------------------------------------------------------------------------- 1 | module "vpc" { 2 | source = "terraform-aws-modules/vpc/aws" 3 | version = "3.19.0" 4 | 5 | name = "${var.name}-${var.env}-vpc" 6 | cidr = var.vpc_cidr 7 | 8 | azs = var.azs 9 | public_subnets = var.subnet_cidrs 10 | enable_nat_gateway = false 11 | enable_vpn_gateway = false 12 | } 13 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/core/backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "{{ cookiecutter.aws_infra_bucket }}" 4 | key = "core.tfstate" 5 | region = "{{ cookiecutter.aws_region }}" 6 | } 7 | 8 | required_providers { 9 | aws = { 10 | source = "hashicorp/aws" 11 | version = "~> 4.0" 12 | } 13 | } 14 | 15 | required_version = "~> 1.0" 16 | } -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/nginx/monitoring_certs/README.md: -------------------------------------------------------------------------------- 1 | Go to [promehtues-grafana-monitoring](https://github.com/reef-technologies/prometheus-grafana-monitoring) and generate a cert-key pair for this project (see prometheus-grafana-monitoring's README to find out how to do that). 2 | Copy the generated cert-key pair along with `ca.crt` and place there, named `cert.crt`, `cert.key` and `ca.crt`, respectively. 3 | -------------------------------------------------------------------------------- /.shellcheckrc: -------------------------------------------------------------------------------- 1 | # disable common false-positive errors to ease adoption in existing projects 2 | disable=SC1090-SC1092 # disable errors related to sourcing files 3 | disable=SC2034 # disable errors related to unused variables 4 | disable=SC2028 # swapping echo for printf requires more testing to ensure correctness 5 | # disable errors related to cookiecutter templating: 6 | disable=SC1054,SC1056,SC1072,SC1073,SC1083,SC1009 7 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/envs/prod/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # We assume that WORKDIR is defined in Dockerfile 4 | 5 | ./prometheus-cleanup.sh 6 | PROMETHEUS_EXPORT_MIGRATIONS=0 ./manage.py wait_for_database --timeout 10 7 | # this seems to be the only place to put this for AWS deployments to pick it up 8 | PROMETHEUS_EXPORT_MIGRATIONS=0 ./manage.py migrate 9 | 10 | gunicorn -c gunicorn.conf.py 11 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/tests/test_database.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from django.contrib.auth.models import User 3 | 4 | pytestmark = pytest.mark.django_db 5 | 6 | 7 | def test__database__save_object(): 8 | User(username="dummy", password="unhashed").save() 9 | assert User.objects.all().last().username == "dummy" 10 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/backups/bin/list-backups.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 4 | source "${SCRIPT_DIR}/common.sh" 5 | 6 | echo "Local backups:" 7 | find "$BACKUP_LOCAL_DIR" -name "*.dump.zstd" | sort -r 8 | 9 | if [ -n "${B2_BUCKET}" ]; then 10 | echo "B2 backups:" 11 | uv run b2 ls --long "b2://$B2_BUCKET${B2_FOLDER:+/$B2_FOLDER/}" 12 | fi -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/.shellcheckrc: -------------------------------------------------------------------------------- 1 | # disable common false-positive errors to ease adoption in existing projects 2 | disable=SC1090-SC1092 # disable errors related to sourcing files 3 | disable=SC2034 # disable errors related to unused variables 4 | disable=SC2028 # swapping echo for printf requires more testing to ensure correctness 5 | # disable errors related to cookiecutter templating: 6 | disable=SC1054,SC1056,SC1072,SC1073,SC1083,SC1009 7 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "pip" 4 | directory: "/{{cookiecutter.repostory_name}}/app/src" 5 | schedule: 6 | interval: "daily" 7 | - package-ecosystem: "docker" 8 | directory: "/{{cookiecutter.repostory_name}}/app/envs/prod" 9 | schedule: 10 | interval: "weekly" 11 | - package-ecosystem: "docker" 12 | directory: "/docker" 13 | schedule: 14 | interval: "weekly" 15 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/scripts/deploy-backend.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -xe 3 | 4 | THIS_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) 5 | source "$THIS_DIR"/vars.sh 6 | 7 | cd "$PROJECT_DIR"/app 8 | 9 | echo "Deploying Backend: ${APP_NAME}" 10 | docker push "${APP_OWNER}".dkr.ecr."${APP_REGION}".amazonaws.com/"${APP_NAME}":latest 11 | 12 | aws autoscaling start-instance-refresh --region "${APP_REGION}" --auto-scaling-group-name "${APP_NAME}" 13 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/admin.py: -------------------------------------------------------------------------------- 1 | from django.contrib import admin # noqa 2 | from django.contrib.admin import register # noqa 3 | 4 | 5 | admin.site.site_header = "{{ cookiecutter.django_project_name }} Administration" 6 | admin.site.site_title = "{{ cookiecutter.django_project_name }}" 7 | admin.site.index_title = "Welcome to {{ cookiecutter.django_project_name }} Administration" 8 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/backups/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM postgres:17 2 | WORKDIR /root 3 | COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/ 4 | COPY --from=getsentry/sentry-cli:latest /bin/sentry-cli /bin/ 5 | RUN apt-get update && apt-get install -y python3.11 cron zstd 6 | COPY --chmod=0755 docker-entrypoint.sh / 7 | COPY --chmod=0644 cron.d/backup /etc/cron.d/backup 8 | COPY bin ./ 9 | RUN uv venv .venv && uv pip install -r requirements.txt 10 | 11 | CMD ["/docker-entrypoint.sh"] -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/bin/dbshell.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$(basename "$0")" == 'bin' ]; then 4 | cd .. 5 | fi 6 | 7 | . .env 8 | 9 | if [[ "$DATABASE_URL" =~ "@db:" ]]; then 10 | DOCKER_NETWORK={{cookiecutter.repostory_name}}_default 11 | else 12 | DOCKER_NETWORK=host 13 | fi 14 | 15 | # this works even if `app` container doesn't have psql installed (where `bin/run-manage-py.sh dbshell` fails) 16 | docker run -it --rm --network "$DOCKER_NETWORK" postgres::16-alpine psql "$DATABASE_URL" 17 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/backups/bin/backup-db-to-email.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | if [ ! -f "$1" ]; then 5 | echo "Pass existing backup file name as the first argument" 6 | find "$BACKUP_LOCAL_DIR" -name "*.dump.zstd" | sort -r 7 | exit 127 8 | fi 9 | 10 | date 11 | 12 | EMAIL_CREDS="${EMAIL_HOST_USER}:${EMAIL_HOST_PASSWORD}@${EMAIL_HOST}:${EMAIL_PORT}" uv run emailhelper.py --from "${DEFAULT_FROM_EMAIL}" --to "${EMAIL_TARGET}" --subject "Database backup" -f "$1" 13 | 14 | echo "Email sent successfully" -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/backend/parameters.docker-compose.tf: -------------------------------------------------------------------------------- 1 | data "aws_partition" "self" {} 2 | 3 | resource "aws_ssm_parameter" "compose" { 4 | name = "/application/${var.name}/${var.env}/docker-compose.yml" 5 | type = "SecureString" 6 | value = templatefile("../../files/docker-compose.yml", { 7 | name = var.name 8 | env = var.env 9 | region = var.region 10 | ecr_base_url = var.ecr_base_url 11 | ecr_image = var.ecr_image 12 | }) 13 | } -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/backups/bin/rotate-local-backups.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ -n "${BACKUP_LOCAL_ROTATE_KEEP_LAST:-}" ]; then 3 | echo "Rotating backup files - keeping ${BACKUP_LOCAL_ROTATE_KEEP_LAST} last ones" 4 | files_to_delete=$(find /var/backups -name "*.dump.zstd" | sort -r | tail -n "+${BACKUP_LOCAL_ROTATE_KEEP_LAST}") 5 | echo "$files_to_delete" | xargs --no-run-if-empty rm 6 | echo "Removed:" 7 | echo "$files_to_delete" 8 | else 9 | echo "BACKUP_LOCAL_ROTATE_KEEP_LAST is not set, skipping backup rotation" 10 | fi -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/core/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | } 4 | 5 | resource "aws_ecr_repository" "app" { 6 | name = "${var.name}-prod" 7 | image_tag_mutability = "MUTABLE" 8 | 9 | image_scanning_configuration { 10 | scan_on_push = true 11 | } 12 | } 13 | 14 | resource "aws_ecr_repository" "app_staging" { 15 | name = "${var.name}-staging" 16 | image_tag_mutability = "MUTABLE" 17 | 18 | image_scanning_configuration { 19 | scan_on_push = true 20 | } 21 | } -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/backups/bin/restore-db.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 4 | source "${SCRIPT_DIR}/common.sh" 5 | 6 | if [[ $# -ne 1 ]]; then 7 | echo "Usage: ./restore-db.sh " 8 | "${SCRIPT_DIR}"/list-backups.sh 9 | exit 2 10 | fi 11 | 12 | if [[ "$1" == b2://* || "$1" == b2id://* ]]; then 13 | uv run b2 cat "$1" | pg_restore -c -d "$DATABASE_URL" 14 | else 15 | pg_restore -c -d "$DATABASE_URL" < "$1" 16 | fi 17 | 18 | echo 'restore finished' -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/backend/vars.tf: -------------------------------------------------------------------------------- 1 | variable "name" {} 2 | variable "env" {} 3 | variable "region" {} 4 | 5 | variable "vpc_id" {} 6 | variable "vpc_cidr" {} 7 | variable "subnets" {} 8 | variable "azs" {} 9 | 10 | variable "base_ami_id" {} 11 | variable "base_domain_name" {} 12 | 13 | variable "domain_name" {} 14 | variable "ec2_ssh_key" {} 15 | 16 | variable "ecr_base_url" {} 17 | variable "ecr_image" {} 18 | 19 | variable "instance_type" {} 20 | variable "health_check_type" {} 21 | variable "account_id" {} 22 | variable "database" {} -------------------------------------------------------------------------------- /docker/node-exporter/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -x 4 | 5 | INSTANCE_TYPE=$(wget http://169.254.169.254/latest/meta-data/instance-type -O- --timeout=5) 6 | [ -z "$INSTANCE_TYPE" ] && INSTANCE_TYPE='UNKNOWN' 7 | 8 | cat << EOF > /home/nobody/textfile_collector_metrics/instance_type.prom 9 | # HELP node_aws_ec2_instance_type type of ec2 instance 10 | # TYPE node_aws_ec2_instance_type gauge 11 | node_aws_ec2_instance_type{instance_type="$INSTANCE_TYPE"} 1 12 | EOF 13 | 14 | exec /bin/node_exporter --collector.textfile.directory=/home/nobody/textfile_collector_metrics/ "$@" -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/scripts/vars.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # shellcheck disable=SC2034 3 | [ "$1" != "staging" ] && [ "$1" != "prod" ] && echo "Please provide environment name to deploy: staging or prod" && exit 1; 4 | 5 | PROJECT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/../../ 6 | 7 | APP_SUFFIX="-$1" 8 | 9 | APP_OWNER=$(aws sts get-caller-identity --region us-east-1 --query "Account" --output text) 10 | APP_REGION="{{ cookiecutter.aws_region }}" 11 | APP_NAME="{{ cookiecutter.aws_project_name }}${APP_SUFFIX}" 12 | CLOUDFRONT_BUCKET="${APP_NAME}-spa${APP_SUFFIX}" -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/database/security.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "db" { 2 | name = "${var.name}-db-sg" 3 | vpc_id = var.vpc_id 4 | 5 | ingress { 6 | description = "allow traffic to postgres port from within VPC" 7 | from_port = 5432 8 | to_port = 5432 9 | protocol = "tcp" 10 | cidr_blocks = [var.vpc_cidr] 11 | } 12 | 13 | egress { 14 | from_port = 0 15 | to_port = 0 16 | protocol = "-1" 17 | cidr_blocks = ["0.0.0.0/0"] 18 | } 19 | } -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/tests/{% if cookiecutter.use_channels == "y" %}test_websocket.py{% endif %}: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from ..schemas import Heartbeat 4 | 5 | 6 | @pytest.mark.asyncio 7 | @pytest.mark.django_db(transaction=True) 8 | async def test__websocket__heartbeat(communicator) -> None: 9 | """Check websocket consumer receiving message""" 10 | await communicator.send_json_to(Heartbeat().dict()) 11 | response = await communicator.receive_json_from() 12 | assert response == Heartbeat().dict() 13 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/database/output.tf: -------------------------------------------------------------------------------- 1 | output "connection_string" { 2 | value = "postgres://${aws_db_instance.self.username}:${aws_db_instance.self.password}@${aws_db_instance.self.endpoint}/${aws_db_instance.self.db_name}" 3 | sensitive = true 4 | } 5 | 6 | output "user" { 7 | value = aws_db_instance.self.username 8 | } 9 | 10 | output "password" { 11 | value = aws_db_instance.self.password 12 | sensitive = true 13 | } 14 | 15 | output "endpoint" { 16 | value = aws_db_instance.self.endpoint 17 | } 18 | 19 | output "port" { 20 | value = aws_db_instance.self.port 21 | } 22 | 23 | output "name" { 24 | value = aws_db_instance.self.db_name 25 | } -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/letsencrypt_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | RELPATH="$(dirname "$0")" 4 | ABSPATH="$(realpath "$RELPATH")" 5 | 6 | cd "$ABSPATH" 7 | 8 | source ./.env 9 | mkdir -p "$ABSPATH/letsencrypt/etc/dhparams" 10 | 11 | docker run -it --rm \ 12 | -v "$ABSPATH/letsencrypt/etc:/etc/letsencrypt" \ 13 | alpine/openssl \ 14 | dhparam -out /etc/letsencrypt/dhparams/dhparam.pem 2048 15 | 16 | docker run --entrypoint certbot -it --rm \ 17 | -v "$ABSPATH/letsencrypt/etc:/etc/letsencrypt" \ 18 | -p 80:80\ 19 | ghcr.io/reef-technologies/nginx-rt:v1.2.2 \ 20 | certonly \ 21 | --standalone --preferred-challenges http\ 22 | -d "$NGINX_HOST" 23 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import os 3 | import sys 4 | 5 | 6 | def main(): 7 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{cookiecutter.django_project_name}}.settings") 8 | try: 9 | from django.core.management import execute_from_command_line 10 | except ImportError as exc: 11 | raise ImportError( 12 | "Couldn't import Django. Are you sure it's installed and " 13 | "available on your PYTHONPATH environment variable? Did you " 14 | "forget to activate a virtual environment?" 15 | ) from exc 16 | execute_from_command_line(sys.argv) 17 | 18 | 19 | if __name__ == "__main__": 20 | main() 21 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/backups/bin/common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | if [ -z "${_COMMON_SH_LOADED:-}" ]; then 5 | PATH=/usr/local/sbin:/usr/local/bin:$PATH 6 | 7 | check_env_vars() { 8 | local required_vars=("$@") 9 | local missing_vars="" 10 | for var in "${required_vars[@]}"; do 11 | if [ -z "${!var}" ]; then 12 | missing_vars+="$var " 13 | fi 14 | done 15 | 16 | if [ -n "$missing_vars" ]; then 17 | echo "Error: The following required environment variables are missing: $missing_vars" >&2 18 | exit 2 19 | fi 20 | } 21 | 22 | if [ -n "${SENTRY_DSN}" ]; then 23 | export SENTRY_DSN 24 | eval "$(sentry-cli bash-hook)" 25 | fi 26 | 27 | _COMMON_SH_LOADED=true 28 | fi -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/backups/bin/backup-file-to-b2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 4 | source "${SCRIPT_DIR}/common.sh" 5 | 6 | check_env_vars B2_APPLICATION_KEY_ID B2_APPLICATION_KEY B2_BUCKET 7 | 8 | if [ "$1" == "-" ]; then 9 | B2_FILENAME="$2" 10 | [ -n "$B2_FILENAME" ] || (echo "Pass backup file name as the second argument if stdin was provided as data source">&2; exit 2) 11 | elif [ ! -f "$1" ]; then 12 | echo "Pass existing backup file name as the first argument" 13 | exit 2 14 | else 15 | B2_FILENAME="$(basename "$1")" 16 | fi 17 | 18 | if [ -n "${B2_FOLDER:-}" ]; then 19 | B2_FILENAME="$B2_FOLDER/$B2_FILENAME" 20 | fi 21 | 22 | uv run b2 file upload "$B2_BUCKET" "$1" "$B2_FILENAME" 23 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/envs/prod/{% if cookiecutter.monitoring == "y" %}prometheus-cleanup.sh{% endif %}: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | if [ -n "$PROMETHEUS_MULTIPROC_DIR" ]; then 5 | if [ -d "$PROMETHEUS_MULTIPROC_DIR" ]; then 6 | # Delete prometheus live metric files in PROMETHEUS_MULTIPROC_DIR, but not in its subdirectories to not 7 | # interfere with other processes. Note that this is equivalent to what multiprocess.mark_process_dead does, 8 | # see https://github.com/prometheus/client_python/blob/master/prometheus_client/multiprocess.py#L159 9 | find "$PROMETHEUS_MULTIPROC_DIR" -maxdepth 1 -type f -name 'gauge_live*_*.db' -delete 10 | else 11 | # Ensure the directory exists 12 | mkdir -p "$PROMETHEUS_MULTIPROC_DIR" 13 | fi 14 | fi 15 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/tests/test_setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | This test file is here always to indicate that all dependencies were properly installed and the CI was able to run tests. 3 | It also verifies if the healthcheck endpoint is functioning correctly 4 | """ 5 | 6 | from datetime import timedelta 7 | 8 | import pytest 9 | from django.utils.timezone import now 10 | from freezegun import freeze_time 11 | 12 | 13 | def test__setup(db, some): 14 | with freeze_time(now() - timedelta(days=1)): 15 | assert some == 1 16 | 17 | with pytest.raises(ZeroDivisionError): 18 | 1 / 0 19 | 20 | 21 | def test__alive_endpoint(client): 22 | response = client.get("/alive/") 23 | assert response.status_code == 200 24 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/asgi.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | {% if cookiecutter.use_channels == "y" %} 4 | from channels.routing import ProtocolTypeRouter, URLRouter 5 | {% endif %} 6 | from django.core.asgi import get_asgi_application 7 | 8 | # init django before importing urls 9 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{cookiecutter.django_project_name}}.settings") 10 | http_app = get_asgi_application() 11 | 12 | {% if cookiecutter.use_channels == "y" %} 13 | from .urls import ws_urlpatterns # noqa 14 | {% endif %} 15 | 16 | {% if cookiecutter.use_channels == "y" %} 17 | 18 | application = ProtocolTypeRouter( 19 | { 20 | "http": http_app, 21 | "websocket": URLRouter(ws_urlpatterns), 22 | } 23 | ) 24 | {% else %} 25 | application = http_app 26 | {% endif %} 27 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/backend/ec2-profile.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_role" "self" { 2 | name = "${var.name}-${var.env}-ec2-role" 3 | 4 | assume_role_policy = jsonencode({ 5 | Version = "2012-10-17", 6 | Statement = [ 7 | { 8 | Effect = "Allow", 9 | Principal = { 10 | Service: "ec2.amazonaws.com" 11 | }, 12 | Action = "sts:AssumeRole" 13 | } 14 | ] 15 | }) 16 | 17 | managed_policy_arns = [ 18 | "arn:aws:iam::aws:policy/AmazonSSMReadOnlyAccess", 19 | "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", 20 | "arn:aws:iam::aws:policy/CloudWatchLogsFullAccess" 21 | ] 22 | } 23 | 24 | resource "aws_iam_instance_profile" "self" { 25 | name = "${var.name}-${var.env}-ec2-profile" 26 | role = aws_iam_role.self.name 27 | } 28 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/.github/workflows/cd.yml: -------------------------------------------------------------------------------- 1 | name: Deploy to AWS on push to certain branches 2 | 3 | on: 4 | push: 5 | branches: 6 | - 'deploy-*' 7 | 8 | jobs: 9 | deploy: 10 | env: 11 | AWS_ACCESS_KEY_ID: {% raw %} ${{ secrets.DEPLOYMENT_AWS_ACCESS_KEY_ID }} {% endraw %} 12 | 13 | AWS_SECRET_ACCESS_KEY: {% raw %} ${{ secrets.DEPLOYMENT_AWS_SECRET_ACCESS_KEY }} {% endraw %} 14 | 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v2 18 | with: 19 | fetch-depth: 0 20 | - name: deploy to aws 21 | run: | 22 | set -e 23 | export ENVIRONMENT=${GITHUB_REF_NAME:7} 24 | ./deploy-to-aws.sh $ENVIRONMENT 25 | export TAG=deployed-${ENVIRONMENT}-`date -u +"%Y-%m-%dT%H.%M.%S"` 26 | git tag $TAG 27 | git push origin $TAG -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/tests/conftest.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Generator 2 | 3 | import pytest 4 | {% if cookiecutter.use_channels == "y" %} 5 | import pytest_asyncio 6 | from channels.testing import WebsocketCommunicator 7 | 8 | from ...asgi import application 9 | {% endif %} 10 | 11 | 12 | @pytest.fixture 13 | def some() -> Generator[int, None, None]: 14 | # setup code 15 | yield 1 16 | # teardown code 17 | {% if cookiecutter.use_channels == "y" %} 18 | 19 | 20 | @pytest_asyncio.fixture 21 | async def communicator(): 22 | communicator = WebsocketCommunicator(application, "/ws/v0/") 23 | connected, _ = await communicator.connect() 24 | assert connected 25 | yield communicator 26 | await communicator.disconnect(200) 27 | {% endif %} -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/backups/bin/backup-db.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" 4 | source "${SCRIPT_DIR}/common.sh" 5 | 6 | check_env_vars DATABASE_URL 7 | 8 | TARGET_FILENAME="db_dump_$(date +%Y-%m-%d_%H%M%S).Fc.dump.zstd" 9 | 10 | DUMP_DB_TO_STDOUT=( 11 | pg_dump -Fc --compress=zstd -c --if-exists "$DATABASE_URL" 12 | ) 13 | 14 | if [ -n "${B2_BUCKET}" ]; then 15 | "${DUMP_DB_TO_STDOUT[@]}" | "${SCRIPT_DIR}"/backup-file-to-b2.sh - "${TARGET_FILENAME}" 16 | else 17 | mkdir -p "$BACKUP_LOCAL_DIR" 18 | TARGET="$BACKUP_LOCAL_DIR/$TARGET_FILENAME" 19 | "${DUMP_DB_TO_STDOUT[@]}" > "$TARGET" 20 | 21 | if [ -n "${EMAIL_HOST:-}" ] && [ -n "${EMAIL_TARGET:-}" ]; then 22 | "${SCRIPT_DIR}"/backup-db-to-email.sh "${TARGET}" 23 | fi 24 | fi 25 | 26 | echo "$TARGET_FILENAME" -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/management/commands/{% if cookiecutter.use_celery == "y" %}flush_queue.py{% endif %}: -------------------------------------------------------------------------------- 1 | from django.core.management.base import BaseCommand 2 | 3 | from {{cookiecutter.django_project_name}}.celery import flush_tasks, get_num_tasks_in_queue 4 | 5 | 6 | class Command(BaseCommand): 7 | help = "Flush task queue." 8 | 9 | def add_arguments(self, parser) -> None: 10 | parser.add_argument("queue", type=str, help="Queue name to flush") 11 | 12 | def handle(self, *args, **kwargs): 13 | queue_name = kwargs["queue"] 14 | 15 | num_tasks = get_num_tasks_in_queue(queue_name) 16 | self.stdout.write(f"Found {num_tasks} tasks in '{queue_name}' queue") 17 | if not num_tasks: 18 | return 19 | 20 | flush_tasks(queue_name) 21 | self.stdout.write("All done") 22 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/vultr_tf/core/vars_cloud_init.tf: -------------------------------------------------------------------------------- 1 | variable "DEPLOY_SSH_KEY" { 2 | // private ssh key for cloning github repo 3 | type = string 4 | sensitive = true 5 | } 6 | 7 | // variables for .env file 8 | variable "DOTENV_SECRET_KEY" { 9 | type = string 10 | sensitive = true 11 | } 12 | 13 | variable "DOTENV_POSTGRES_HOST" { 14 | type = string 15 | sensitive = true 16 | } 17 | 18 | variable "DOTENV_POSTGRES_USER" { 19 | type = string 20 | sensitive = true 21 | } 22 | 23 | variable "DOTENV_POSTGRES_PASSWORD" { 24 | type = string 25 | sensitive = true 26 | } 27 | 28 | variable "DOTENV_DATABASE_POOL_URL" { 29 | type = string 30 | sensitive = true 31 | } 32 | 33 | variable "DOTENV_DATABASE_URL" { 34 | type = string 35 | sensitive = true 36 | } 37 | 38 | variable "DOTENV_SENTRY_DSN" { 39 | type = string 40 | sensitive = true 41 | } 42 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/tests/test_settings.py: -------------------------------------------------------------------------------- 1 | from importlib import import_module 2 | 3 | import pytest 4 | 5 | 6 | def test__settings__celery_beat_schedule(settings): 7 | """Ensure that CELERY_BEAT_SCHEDULE points to existing tasks""" 8 | 9 | if not hasattr(settings, "CELERY_BEAT_SCHEDULE"): 10 | pytest.skip("CELERY_BEAT_SCHEDULE is not defined") 11 | 12 | paths = {task["task"] for task in settings.CELERY_BEAT_SCHEDULE.values()} 13 | for path in paths: 14 | module_path, task_name = path.rsplit(".", maxsplit=1) 15 | try: 16 | module = import_module(module_path) 17 | except ImportError: 18 | pytest.fail(f"The module '{module_path}' does not exist") 19 | 20 | if not hasattr(module, task_name): 21 | pytest.fail(f"The task '{task_name}' does not exist in {module_path}") 22 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/vultr_scripts/vultr-get-instances.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # get list of all instances in Vultr account 3 | # save their IDs and IPs into files which will be used by \ 4 | # `vultr-deploy.py` and `vultr-update-cloudinit.py` 5 | 6 | import subprocess 7 | from pathlib import Path 8 | 9 | pwd = Path(__file__).parent 10 | 11 | instance_id = pwd / "instances_id.txt" 12 | instance_ip = pwd / "instances_ip.txt" 13 | 14 | res = subprocess.check_output(["vultr-cli", "instance", "list", "ipv4"]).decode("utf-8").split("\n") 15 | 16 | ids = [] 17 | ips = [] 18 | for line in res[1:]: # skip header 19 | line_items = line.split("\t") 20 | if len(line_items) != 13: 21 | continue 22 | ids.append(line_items[0].strip()) 23 | ips.append(line_items[1].strip()) 24 | 25 | with open(instance_ip, "w") as f: 26 | f.write("\n".join(ips)) 27 | 28 | with open(instance_id, "w") as f: 29 | f.write("\n".join(ids)) 30 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/vultr_scripts/vultr-deploy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # deploy to list of IPs from `instances_ip.txt` (see `vultr-get-instances.py`) 3 | 4 | import subprocess 5 | from pathlib import Path 6 | 7 | pwd = Path(__file__).parent 8 | 9 | 10 | with open(pwd / "instances_ip.txt") as f: 11 | ips = f.readlines() 12 | 13 | errs = [] 14 | for ip in ips: 15 | print("deploying to", ip) 16 | try: 17 | res = subprocess.Popen( 18 | ["git", "push", f"root@{ip.strip()}:~/repos/{{ cookiecutter.django_project_name }}-central.git"], 19 | env={ 20 | "GIT_SSH_COMMAND": "ssh -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" 21 | }, 22 | ).communicate() 23 | except subprocess.CalledProcessError: 24 | errs.append(ip) 25 | else: 26 | print("res", res) 27 | 28 | for err_ip in errs: 29 | print("error deploying to", err_ip) 30 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/alloy/config.alloy: -------------------------------------------------------------------------------- 1 | discovery.docker "containerlogs" { 2 | host = "unix:///var/run/docker.sock" 3 | } 4 | 5 | discovery.relabel "containerlogs" { 6 | targets = [] 7 | 8 | rule { 9 | source_labels = ["__meta_docker_container_name"] 10 | regex = "/(.*)" 11 | target_label = "container" 12 | } 13 | 14 | rule { 15 | source_labels = ["__meta_docker_container_log_stream"] 16 | target_label = "logstream" 17 | } 18 | } 19 | 20 | loki.source.docker "containerlogs" { 21 | host = "unix:///var/run/docker.sock" 22 | targets = discovery.docker.containerlogs.targets 23 | forward_to = [loki.write.default.receiver] 24 | relabel_rules = discovery.relabel.containerlogs.rules 25 | } 26 | 27 | loki.write "default" { 28 | endpoint { 29 | url = sys.env("LOKI_URL") + "/loki/api/v1/push" 30 | 31 | basic_auth { 32 | username = sys.env("LOKI_USER") 33 | password = sys.env("LOKI_PASSWORD") 34 | } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/setup-dev.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright 2017, Reef Technologies (reef.pl), All rights reserved. 3 | 4 | set -euo pipefail 5 | 6 | PROJECT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) 7 | ENV_DIR="./envs/dev" 8 | # shellcheck disable=SC2164 9 | cd "${PROJECT_DIR}" 10 | 11 | if [[ ! -d ".venv" ]]; then 12 | python3.11 -m venv .venv 13 | fi 14 | 15 | # Create a lock file if doesn't exist 16 | if [[ ! -f "uv.lock" ]]; then 17 | uv lock 18 | fi 19 | # Install Python dependencies 20 | uv sync --all-groups 21 | 22 | # Create .env from the template if doesn't exist 23 | if [[ ! -f "${ENV_DIR}/.env" ]]; then 24 | cp "${ENV_DIR}/.env.template" "${ENV_DIR}/.env" 25 | fi 26 | 27 | # Set symlinks 28 | ln -sf "${ENV_DIR}/.env" .env 29 | ln -sf "${ENV_DIR}/docker-compose.yml" docker-compose.yml 30 | 31 | # shellcheck disable=SC2164 32 | cd "${PROJECT_DIR}/app/" 33 | if [[ -L "Dockerfile" ]]; then 34 | unlink Dockerfile 35 | fi 36 | if [[ -L "src/entrypoint.sh" ]]; then 37 | unlink src/entrypoint.sh 38 | fi 39 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/setup-prod.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright 2017, Reef Technologies (reef.pl), All rights reserved. 3 | 4 | set -euo pipefail 5 | 6 | PROJECT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) 7 | ENV_DIR="./envs/prod" 8 | # shellcheck disable=SC2164 9 | cd "${PROJECT_DIR}" 10 | 11 | # Create .env from the template if doesn't exist 12 | if [[ ! -f "${ENV_DIR}/.env" ]]; then 13 | cp "${ENV_DIR}/.env.template" "${ENV_DIR}/.env" 14 | fi 15 | 16 | {% if cookiecutter.vulnerabilities_scanning == 'y' %} 17 | # Create .vuln.env from the template if doesn't exist 18 | if [[ ! -f "${ENV_DIR}/.vuln.env" ]]; then 19 | cp "${ENV_DIR}/.vuln.env.template" "${ENV_DIR}/.vuln.env" 20 | fi 21 | ln -sf "${ENV_DIR}/.vuln.env" .vuln.env 22 | {% endif %} 23 | 24 | # Set symlinks 25 | ln -sf "${ENV_DIR}/.env" .env 26 | ln -sf "${ENV_DIR}/docker-compose.yml" docker-compose.yml 27 | # shellcheck disable=SC2164 28 | cd "${PROJECT_DIR}/app/" 29 | ln -sf "${ENV_DIR}/Dockerfile" Dockerfile 30 | ln -sf ".${ENV_DIR}/entrypoint.sh" src/entrypoint.sh 31 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/envs/common/vars.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | type = string 3 | } 4 | 5 | variable "name" { 6 | type = string 7 | } 8 | 9 | variable "env" { 10 | type = string 11 | } 12 | 13 | variable "base_ami_image" { 14 | type = string 15 | } 16 | 17 | variable "base_ami_image_owner" { 18 | type = string 19 | } 20 | 21 | variable "vpc_cidr" { 22 | type = string 23 | } 24 | 25 | variable "subnet_cidrs" { 26 | type = set(string) 27 | } 28 | 29 | variable "azs" { 30 | type = set(string) 31 | } 32 | 33 | variable "base_domain_name" { 34 | type = string 35 | } 36 | 37 | variable "domain_name" { 38 | type = string 39 | } 40 | 41 | variable "ec2_ssh_key" { 42 | type = string 43 | } 44 | 45 | variable "instance_type" { 46 | description = "EC2 instance type" 47 | type = string 48 | } 49 | 50 | variable "rds_instance_type" { 51 | description = "RDS instance type" 52 | type = string 53 | } 54 | 55 | variable "autoscaling_health_check_type" { 56 | description = "either EC2 or ELB" 57 | type = string 58 | } 59 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/envs/prod/gunicorn.conf.py: -------------------------------------------------------------------------------- 1 | import multiprocessing 2 | 3 | import environ 4 | {% if cookiecutter.monitoring == "y" %} 5 | from prometheus_client import multiprocess 6 | {% endif %} 7 | 8 | env = environ.Env() 9 | 10 | workers = env.int("GUNICORN_WORKERS", 2 * multiprocessing.cpu_count() + 1) 11 | max_workers = env.int("GUNICORN_MAX_WORKERS", 0) 12 | if max_workers > 0: 13 | workers = min(max_workers, workers) 14 | threads = env.int("GUNICORN_THREADS", 1) 15 | preload_app = env.bool("GUNICORN_PRELOAD_APP", True) 16 | bind = "unix:/var/run/gunicorn/gunicorn.sock" 17 | {% if cookiecutter.use_channels == "y" %} 18 | wsgi_app = "{{ cookiecutter.django_project_name }}.asgi:application" 19 | {% else %} 20 | wsgi_app = "{{ cookiecutter.django_project_name }}.wsgi:application" 21 | {% endif %} 22 | access_logfile = "-" 23 | {% if cookiecutter.use_channels == "y" %} 24 | worker_class = "uvicorn.workers.UvicornWorker" 25 | {% endif %} 26 | 27 | 28 | {% if cookiecutter.monitoring == "y" %} 29 | def child_exit(server, worker): 30 | multiprocess.mark_process_dead(worker.pid) 31 | {% endif %} 32 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/management/commands/{% if cookiecutter.use_celery == "y" %}move_tasks.py{% endif %}: -------------------------------------------------------------------------------- 1 | from django.core.management.base import BaseCommand 2 | 3 | from {{cookiecutter.django_project_name}}.celery import get_num_tasks_in_queue, move_tasks 4 | 5 | 6 | class Command(BaseCommand): 7 | help = "Reschedule dead letter tasks." 8 | 9 | def add_arguments(self, parser) -> None: 10 | parser.add_argument("source_queue", type=str, help="Source queue name") 11 | parser.add_argument("destination_queue", type=str, help="Destination queue name") 12 | 13 | def handle(self, *args, **kwargs): 14 | source_queue = kwargs["source_queue"] 15 | destination_queue = kwargs["destination_queue"] 16 | 17 | num_tasks = get_num_tasks_in_queue(source_queue) 18 | self.stdout.write(f"Found {num_tasks} tasks in '{source_queue}' queue") 19 | if not num_tasks: 20 | return 21 | 22 | move_tasks(source_queue, destination_queue) 23 | self.stdout.write("All done") 24 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/backend/parameters.env.tf: -------------------------------------------------------------------------------- 1 | resource "random_uuid" "random_uuid" {} 2 | 3 | resource "aws_ssm_parameter" "envrc" { 4 | name = "/application/${var.name}/${var.env}/.envrc" 5 | type = "SecureString" 6 | value = templatefile("../../files/envrc", { 7 | name = var.name 8 | env = var.env 9 | region = var.region 10 | account_id = var.account_id 11 | ecr_base_url = var.ecr_base_url 12 | ecr_image = var.ecr_image 13 | }) 14 | } 15 | 16 | 17 | resource "aws_ssm_parameter" "env" { 18 | name = "/application/${var.name}/${var.env}/.env" 19 | type = "SecureString" 20 | value = templatefile("../../files/env", { 21 | name = var.name 22 | env = var.env 23 | region = var.region 24 | secret_key = random_uuid.random_uuid.result 25 | 26 | database_name = var.database.name 27 | database_user = var.database.user 28 | database_password = var.database.password 29 | database_connection_string = var.database.connection_string 30 | }) 31 | } -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/vultr_scripts/vultr-update-cloudinit.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # update cloud-init data 3 | # this should be used only to UPDATE the data, initialization should be done via Terraform 4 | # see vultr_tf/core/vultr-cloud-init.tftpl 5 | import subprocess 6 | from pathlib import Path 7 | 8 | pwd = Path(__file__).parent 9 | 10 | # cloud-init script 11 | # use `vultr-cli instance user-data get ` to get existing data 12 | user_data = pwd / "userdata.txt" 13 | assert user_data.exists() 14 | 15 | with open(pwd / "instances_id.txt") as f: 16 | for instance_id in f.readlines(): 17 | print("instance id", instance_id) 18 | # res = subprocess.check_output(['vultr-cli', 'instance', 'user-data', 'get', instance_id.strip()]) 19 | res = subprocess.check_output( 20 | [ 21 | "vultr-cli", 22 | "instance", 23 | "user-data", 24 | "set", 25 | instance_id.strip(), 26 | "-d", 27 | str(user_data), 28 | ] 29 | ) 30 | print("res", res, type(res)) 31 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/{% if cookiecutter.use_celery == "y" %}tasks.py{% endif %}: -------------------------------------------------------------------------------- 1 | import structlog 2 | from celery import Task 3 | from celery.utils.log import get_task_logger 4 | 5 | from {{cookiecutter.django_project_name}}.celery import app 6 | 7 | logger = structlog.wrap_logger(get_task_logger(__name__)) 8 | 9 | 10 | def send_to_dead_letter_queue(task: Task, exc, task_id, args, kwargs, einfo): 11 | """Hook to put a task into dead letter queue when it fails.""" 12 | if task.app.conf.task_always_eager: 13 | return # do not run failed task again in eager mode 14 | 15 | logger.warning( 16 | "Sending failed task to dead letter queue", 17 | task=task, 18 | exc=exc, 19 | task_id=task_id, 20 | args=args, 21 | kwargs=kwargs, 22 | einfo=einfo, 23 | ) 24 | task.apply_async(args=args, kwargs=kwargs, queue="dead_letter") 25 | 26 | 27 | @app.task(on_failure=send_to_dead_letter_queue) 28 | def demo_task(x, y): 29 | logger.info("adding two numbers", x=x, y=y) 30 | return x + y 31 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | First of - thank you for using this project and willingness to contribute ❤️ 4 | 5 | ## Getting started 6 | 7 | Main goal of the project is to provide a quick way to create production ready Django apps. 8 | Due to complexity of such goal solution implemented is highly opinionated and may not fit everyone's needs. 9 | 10 | - all changes must comply with [Reef Technologies conventions](https://github.com/reef-technologies/handbook#73-conventions) 11 | - before putting a lot of effort into a PR with a feature, please open an issue to discuss it first to avoid wasting time on something that may not be merged. 12 | - the rule of thumb is that if it is not useful for 90% of projects, it should not be included 13 | 14 | ## Development 15 | 16 | Before creating PR run: 17 | 18 | ```sh 19 | nox 20 | ``` 21 | 22 | to check if everything is OK. 23 | 24 | Denote any breaking changes in [CHANGELOG.md]({{cookiecutter.repostory_name}}/docs/3rd_party/cookiecutter-rt-django/CHANGELOG.md). 25 | When updating dependencies that has breaking changes add a link to the changelog of the dependency prepend the entry with `**BREAKING**` tag. 26 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/backend/alb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_lb" "self" { 2 | name = "${var.name}-${var.env}" 3 | internal = false 4 | load_balancer_type = "application" 5 | subnets = var.subnets 6 | security_groups = [aws_security_group.public.id] 7 | enable_deletion_protection = false 8 | } 9 | 10 | resource "aws_lb_target_group" "self" { 11 | name = "${var.name}-${var.env}" 12 | port = 8000 13 | protocol = "HTTP" 14 | vpc_id = var.vpc_id 15 | target_type = "instance" 16 | 17 | health_check { 18 | enabled = true 19 | port = 8000 20 | path = "/alive/" 21 | matcher = "200-302" 22 | } 23 | } 24 | 25 | resource "aws_lb_listener" "self" { 26 | load_balancer_arn = aws_lb.self.arn 27 | port = "443" 28 | protocol = "HTTPS" 29 | ssl_policy = "ELBSecurityPolicy-2016-08" 30 | certificate_arn = aws_acm_certificate.self.arn 31 | 32 | default_action { 33 | type = "forward" 34 | target_group_arn = aws_lb_target_group.self.arn 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/healthcheck.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import socket 5 | import sys 6 | 7 | 8 | def healthcheck(socket_path: str, url: str): 9 | with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s: 10 | s.settimeout(1) 11 | s.connect(socket_path) 12 | req = f"GET {url} HTTP/1.1\r\nHost: localhost\r\n\r\n" 13 | s.sendall(req.encode()) 14 | response = s.recv(64) 15 | assert response, "No response received" 16 | 17 | status_code = int(response.decode().split()[1]) 18 | assert status_code == 200, f"Unexpected status code: {status_code}" 19 | 20 | sys.stdout.write("OK\n") 21 | 22 | 23 | if __name__ == "__main__": 24 | parser = argparse.ArgumentParser() 25 | parser.add_argument("socket_path", type=str, help="Path to the socket file") 26 | parser.add_argument("--url", type=str, required=False, default="/alive/", help="URL to check") 27 | args = parser.parse_args() 28 | 29 | try: 30 | healthcheck(args.socket_path, args.url) 31 | except Exception as e: 32 | sys.stderr.write(f"Error: {e}\n") 33 | sys.exit(1) 34 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/scripts/build-backend.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -xe 3 | 4 | THIS_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) 5 | source "$THIS_DIR"/vars.sh 6 | 7 | cd "$PROJECT_DIR" 8 | 9 | DATE_UTC=$(date -u) 10 | TIMESTAMP_UTC=$(date +%s) 11 | COMMIT_HASH=$(git rev-parse --short HEAD || echo -n "local") 12 | 13 | echo "Building Backend: ${APP_NAME}" 14 | 15 | ./setup-prod.sh 16 | 17 | aws ecr get-login-password --region "${APP_REGION}" | docker login --username AWS --password-stdin "${APP_OWNER}".dkr.ecr."${APP_REGION}".amazonaws.com 18 | 19 | DOCKER_BUILDKIT=1 docker build \ 20 | -f app/Dockerfile \ 21 | --progress plain \ 22 | --platform linux/amd64 \ 23 | -t "${APP_NAME}" \ 24 | --label build_date_utc="$DATE_UTC" \ 25 | --label build_timestamp_utc="$TIMESTAMP_UTC" \ 26 | --label git_commit_hash="$COMMIT_HASH" \ 27 | . 28 | docker tag "${APP_NAME}":latest "${APP_OWNER}".dkr.ecr."${APP_REGION}".amazonaws.com/"${APP_NAME}":latest 29 | docker tag "${APP_NAME}":latest "${APP_OWNER}".dkr.ecr."${APP_REGION}".amazonaws.com/"${APP_NAME}":"${COMMIT_HASH}" 30 | 31 | docker push "${APP_OWNER}".dkr.ecr."${APP_REGION}".amazonaws.com/"${APP_NAME}":"${COMMIT_HASH}" 32 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/files/env: -------------------------------------------------------------------------------- 1 | ENV=${env} 2 | NGINX_HOST=localhost 3 | DEBUG=0 4 | SECRET_KEY=${secret_key} 5 | POSTGRES_DB=${database_name} 6 | POSTGRES_USER=${database_user} 7 | POSTGRES_PASSWORD=${database_password} 8 | DATABASE_URL=${database_connection_string} 9 | 10 | EMAIL_BACKEND=django.core.mail.backends.filebased.EmailBackend 11 | EMAIL_FILE_PATH=/tmp/email 12 | EMAIL_HOST=smtp.sendgrid.net 13 | EMAIL_PORT=587 14 | EMAIL_USE_TLS=1 15 | EMAIL_HOST_USER=apikey 16 | EMAIL_HOST_PASSWORD= 17 | DEFAULT_FROM_EMAIL= 18 | 19 | SENTRY_DSN= 20 | HTTPS_REDIRECT=n 21 | HTTPS_PROXY_HEADER=X_SCHEME 22 | CSP_ENABLED=n 23 | CSP_REPORT_ONLY=n 24 | CSP_REPORT_URL= 25 | CSP_DEFAULT_SRC="'none'" 26 | CSP_SCRIPT_SRC="'self'" 27 | CSP_STYLE_SRC="'self'" 28 | CSP_FONT_SRC="'self'" 29 | CSP_IMG_SRC="'self'" 30 | CSP_MEDIA_SRC="'self'" 31 | CSP_OBJECT_SRC="'self'" 32 | CSP_FRAME_SRC="'self'" 33 | CSP_CONNECT_SRC="'self'" 34 | CSP_CHILD_SRC="'self'" 35 | CSP_MANIFEST_SRC="'self'" 36 | CSP_WORKER_SRC="'self'" 37 | CSP_BLOCK_ALL_MIXED_CONTENT=y 38 | CSP_EXCLUDE_URL_PREFIXES= 39 | BACKUP_LOCAL_ROTATE_KEEP_LAST= 40 | BACKUP_B2_BUCKET= 41 | BACKUP_B2_FOLDER= 42 | BACKUP_B2_APPLICATION_KEY_ID= 43 | BACKUP_B2_APPLICATION_KEY= 44 | DATABASE_POOL_URL= 45 | CHANNELS_BACKEND_URL=redis://redis:6379/1 -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/nginx/config_helpers/brotli.conf: -------------------------------------------------------------------------------- 1 | brotli {% if cookiecutter.nginx_compression_enabled == 'y'%}on{% else %}off{% endif %}; 2 | brotli_static {% if cookiecutter.nginx_compression_enabled == 'y'%}on{% else %}off{% endif %}; 3 | 4 | brotli_comp_level 6; 5 | brotli_types 6 | # text/html is always in brotli_types 7 | text/richtext 8 | text/plain 9 | text/css 10 | text/x-script 11 | text/x-component 12 | text/x-java-source 13 | text/x-markdown 14 | application/javascript 15 | application/x-javascript 16 | text/javascript 17 | text/js 18 | image/x-icon 19 | application/x-perl 20 | application/x-httpd-cgi 21 | text/xml 22 | application/xml 23 | application/xml+rss 24 | application/json 25 | multipart/bag 26 | multipart/mixed 27 | application/xhtml+xml 28 | font/ttf 29 | font/otf 30 | font/x-woff 31 | image/svg+xml 32 | application/vnd.ms-fontobject 33 | application/ttf 34 | application/x-ttf 35 | application/otf 36 | application/x-otf 37 | application/truetype 38 | application/opentype 39 | application/x-opentype 40 | application/font-woff 41 | application/eot 42 | application/font 43 | application/font-sfnt 44 | application/wasm; 45 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/backend/domain.tf: -------------------------------------------------------------------------------- 1 | data "aws_route53_zone" "self" { 2 | name = var.base_domain_name 3 | } 4 | 5 | resource "aws_route53_record" "a" { 6 | zone_id = data.aws_route53_zone.self.zone_id 7 | name = var.domain_name 8 | type = "A" 9 | 10 | alias { 11 | name = aws_lb.self.dns_name 12 | zone_id = aws_lb.self.zone_id 13 | evaluate_target_health = true 14 | } 15 | } 16 | 17 | resource "aws_acm_certificate" "self" { 18 | domain_name = var.domain_name 19 | validation_method = "DNS" 20 | 21 | tags = { 22 | Project = var.name 23 | Env = var.env 24 | } 25 | 26 | lifecycle { 27 | create_before_destroy = true 28 | } 29 | } 30 | 31 | resource "aws_route53_record" "cert-validation" { 32 | for_each = { 33 | for dvo in aws_acm_certificate.self.domain_validation_options: dvo.domain_name => { 34 | name = dvo.resource_record_name 35 | record = dvo.resource_record_value 36 | type = dvo.resource_record_type 37 | } 38 | } 39 | 40 | allow_overwrite = true 41 | name = each.value.name 42 | records = [each.value.record] 43 | ttl = 60 44 | type = each.value.type 45 | zone_id = data.aws_route53_zone.self.zone_id 46 | } -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/files/nginx/config_helpers/brotli.conf: -------------------------------------------------------------------------------- 1 | brotli {% if cookiecutter.nginx_compression_enabled == 'y'%}on{% else %}off{% endif %}; 2 | brotli_static {% if cookiecutter.nginx_compression_enabled == 'y'%}on{% else %}off{% endif %}; 3 | 4 | brotli_comp_level 6; 5 | brotli_types 6 | # text/html is always in brotli_types 7 | text/richtext 8 | text/plain 9 | text/css 10 | text/x-script 11 | text/x-component 12 | text/x-java-source 13 | text/x-markdown 14 | application/javascript 15 | application/x-javascript 16 | text/javascript 17 | text/js 18 | image/x-icon 19 | application/x-perl 20 | application/x-httpd-cgi 21 | text/xml 22 | application/xml 23 | application/xml+rss 24 | application/json 25 | multipart/bag 26 | multipart/mixed 27 | application/xhtml+xml 28 | font/ttf 29 | font/otf 30 | font/x-woff 31 | image/svg+xml 32 | application/vnd.ms-fontobject 33 | application/ttf 34 | application/x-ttf 35 | application/otf 36 | application/x-otf 37 | application/truetype 38 | application/opentype 39 | application/x-opentype 40 | application/font-woff 41 | application/eot 42 | application/font 43 | application/font-sfnt 44 | application/wasm; 45 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/envs/prod/terraform.tfvars: -------------------------------------------------------------------------------- 1 | # each of this vars can be overridden by adding ENVIRONMENT variable with name: 2 | # TF_VAR_var_name="value" 3 | 4 | name = "{{ cookiecutter.aws_project_name }}" 5 | region = "{{ cookiecutter.aws_region }}" 6 | env = "prod" 7 | 8 | # VPC and subnet CIDR settings, change them if you need to pair 9 | # multiple CIDRs (i.e. with different component) 10 | vpc_cidr = "10.2.0.0/16" 11 | subnet_cidrs = ["10.2.1.0/24", "10.2.2.0/24"] 12 | azs = ["{{ cookiecutter.aws_region}}c", "{{ cookiecutter.aws_region}}d"] 13 | 14 | # By default, we have an ubuntu image 15 | base_ami_image = "{{ cookiecutter.aws_ami_image}}" 16 | base_ami_image_owner = "{{ cookiecutter.aws_ami_image_owner }}" 17 | 18 | # domain setting 19 | base_domain_name = "{{ cookiecutter.aws_base_domain_name }}" 20 | domain_name = "{{ cookiecutter.aws_domain_name }}" 21 | 22 | # default ssh key 23 | ec2_ssh_key = "{{ cookiecutter.aws_ec2_ssh_key }}" 24 | 25 | instance_type = "t3.medium" 26 | rds_instance_type = "db.t3.small" 27 | 28 | # defines if we use EC2-only healthcheck or ELB healthcheck 29 | # EC2 healthcheck reacts only on internal EC2 checks (i.e. if machine cannot be reached) 30 | # recommended for staging = EC2, for prod = ELB 31 | autoscaling_health_check_type = "ELB" 32 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/envs/staging/terraform.tfvars: -------------------------------------------------------------------------------- 1 | # each of this vars can be overridden by adding ENVIRONMENT variable with name: 2 | # TF_VAR_var_name="value" 3 | 4 | name = "{{ cookiecutter.aws_project_name }}" 5 | region = "{{ cookiecutter.aws_region }}" 6 | env = "staging" 7 | 8 | # VPC and subnet CIDR settings, change them if you need to pair 9 | # multiple CIDRs (i.e. with different component) 10 | vpc_cidr = "10.20.0.0/16" 11 | subnet_cidrs = ["10.20.1.0/24", "10.20.2.0/24"] 12 | azs = ["{{ cookiecutter.aws_region }}a", "{{ cookiecutter.aws_region }}b"] 13 | 14 | # By default, we have an ubuntu image 15 | base_ami_image = "{{ cookiecutter.aws_ami_image }}" 16 | base_ami_image_owner = "{{ cookiecutter.aws_ami_image_owner }}" 17 | 18 | # domain setting 19 | base_domain_name = "{{ cookiecutter.aws_base_domain_name }}" 20 | domain_name = "{{ cookiecutter.aws_staging_domain_name }}" 21 | 22 | # default ssh key 23 | ec2_ssh_key = "{{ cookiecutter.aws_ec2_ssh_key }}" 24 | 25 | instance_type = "t3.medium" 26 | rds_instance_type = "db.t3.small" 27 | 28 | # defines if we use EC2-only healthcheck or ELB healthcheck 29 | # EC2 healthcheck reacts only on internal EC2 checks (i.e. if machine cannot be reached) 30 | # recommended for staging = EC2, for prod = ELB 31 | autoscaling_health_check_type = "EC2" 32 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Copyright 2024, Reef Technologies (reef.pl), All rights reserved. 3 | set -eux 4 | 5 | if [ ! -f ".env" ]; then 6 | echo "\e[31mPlease setup the environment first!\e[0m"; 7 | exit 1; 8 | fi 9 | 10 | docker compose build 11 | 12 | # Tag the first image from multi-stage app Dockerfile to mark it as not dangling 13 | BASE_IMAGE=$(docker images --quiet --filter="label=builder=true" | head -n1) 14 | docker image tag "${BASE_IMAGE}" {{cookiecutter.django_project_name}}/app-builder 15 | 16 | # collect static files to external storage while old app is still running 17 | # docker compose run --rm app sh -c "python manage.py collectstatic --no-input" 18 | 19 | SERVICES=$(docker compose ps --services 2>/dev/null \ 20 | | grep -v -e 'is not set' -e db -e redis) 21 | 22 | # shellcheck disable=2086 23 | docker compose stop $SERVICES 24 | 25 | docker compose up -d db # in case it hasn't been launched before 26 | # backup db before any database changes 27 | docker compose run --rm backups ./backup-db.sh 28 | # start the app container only in order to perform migrations 29 | docker compose run --rm app sh -c "python manage.py wait_for_database --timeout 10; python manage.py migrate" 30 | 31 | # start everything 32 | docker compose up -d 33 | 34 | # Clean all dangling images 35 | docker images --quiet --filter=dangling=true \ 36 | | xargs --no-run-if-empty docker rmi \ 37 | || true 38 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/nginx/config_helpers/gzip.conf: -------------------------------------------------------------------------------- 1 | gzip {% if cookiecutter.nginx_compression_enabled == 'y'%}on{% else %}off{% endif %}; 2 | gzip_static {% if cookiecutter.nginx_compression_enabled == 'y'%}on{% else %}off{% endif %}; 3 | gzip_proxied {% if cookiecutter.nginx_compression_enabled == 'y'%}any{% else %}off{% endif %}; 4 | 5 | gzip_vary on; 6 | gzip_comp_level 6; 7 | gzip_buffers 16 8k; 8 | gzip_http_version 1.1; 9 | gzip_types 10 | # text/html is always in gzip_types 11 | text/richtext 12 | text/plain 13 | text/css 14 | text/x-script 15 | text/x-component 16 | text/x-java-source 17 | text/x-markdown 18 | application/javascript 19 | application/x-javascript 20 | text/javascript 21 | text/js 22 | image/x-icon 23 | application/x-perl 24 | application/x-httpd-cgi 25 | text/xml 26 | application/xml 27 | application/xml+rss 28 | application/json 29 | multipart/bag 30 | multipart/mixed 31 | application/xhtml+xml 32 | font/ttf 33 | font/otf 34 | font/x-woff 35 | image/svg+xml 36 | application/vnd.ms-fontobject 37 | application/ttf 38 | application/x-ttf 39 | application/otf 40 | application/x-otf 41 | application/truetype 42 | application/opentype 43 | application/x-opentype 44 | application/font-woff 45 | application/eot 46 | application/font 47 | application/font-sfnt 48 | application/wasm; 49 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/files/nginx/config_helpers/gzip.conf: -------------------------------------------------------------------------------- 1 | gzip {% if cookiecutter.nginx_compression_enabled == 'y'%}on{% else %}off{% endif %}; 2 | gzip_static {% if cookiecutter.nginx_compression_enabled == 'y'%}on{% else %}off{% endif %}; 3 | gzip_proxied {% if cookiecutter.nginx_compression_enabled == 'y'%}any{% else %}off{% endif %}; 4 | 5 | gzip_vary on; 6 | gzip_comp_level 6; 7 | gzip_buffers 16 8k; 8 | gzip_http_version 1.1; 9 | gzip_types 10 | # text/html is always in gzip_types 11 | text/richtext 12 | text/plain 13 | text/css 14 | text/x-script 15 | text/x-component 16 | text/x-java-source 17 | text/x-markdown 18 | application/javascript 19 | application/x-javascript 20 | text/javascript 21 | text/js 22 | image/x-icon 23 | application/x-perl 24 | application/x-httpd-cgi 25 | text/xml 26 | application/xml 27 | application/xml+rss 28 | application/json 29 | multipart/bag 30 | multipart/mixed 31 | application/xhtml+xml 32 | font/ttf 33 | font/otf 34 | font/x-woff 35 | image/svg+xml 36 | application/vnd.ms-fontobject 37 | application/ttf 38 | application/x-ttf 39 | application/otf 40 | application/x-otf 41 | application/truetype 42 | application/opentype 43 | application/x-opentype 44 | application/font-woff 45 | application/eot 46 | application/font 47 | application/font-sfnt 48 | application/wasm; 49 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Continuous Integration 2 | 3 | on: 4 | push: 5 | branches: [master] 6 | pull_request: 7 | branches: [master] 8 | 9 | env: 10 | PYTHON_DEFAULT_VERSION: "3.11" 11 | 12 | jobs: 13 | lint: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v3 17 | with: 18 | fetch-depth: 0 19 | - name: Set up Python ${{ env.PYTHON_DEFAULT_VERSION }} 20 | uses: actions/setup-python@v4 21 | with: 22 | python-version: ${{ env.PYTHON_DEFAULT_VERSION }} 23 | - name: Install uv 24 | uses: astral-sh/setup-uv@v5 25 | with: 26 | version: "0.5.x" 27 | - name: Install dependencies 28 | run: python -m pip install --upgrade nox 29 | - name: Run linters 30 | run: nox -vs lint 31 | check_crufted_project: 32 | runs-on: ubuntu-latest 33 | steps: 34 | - uses: actions/checkout@v3 35 | with: 36 | fetch-depth: 0 37 | - name: Set up Python ${{ env.PYTHON_DEFAULT_VERSION }} 38 | uses: actions/setup-python@v4 39 | with: 40 | python-version: ${{ env.PYTHON_DEFAULT_VERSION }} 41 | - name: Install uv 42 | uses: astral-sh/setup-uv@v5 43 | with: 44 | version: "0.5.x" 45 | - name: Install dependencies 46 | run: python -m pip install --upgrade nox 47 | - name: Run checks on project created from template 48 | run: nox -vt crufted_project 49 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/envs/prod/{% if cookiecutter.use_celery == 'y' %}celery-entrypoint.sh{% endif %}: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | ./prometheus-cleanup.sh 5 | 6 | # below we define two workers types (each may have any concurrency); 7 | # each worker may have its own settings 8 | WORKERS="master worker" 9 | OPTIONS="-A {{cookiecutter.django_project_name}} -E -l ERROR --pidfile=/var/run/celery-%n.pid --logfile=/var/log/celery-%n.log" 10 | 11 | # set up settings for workers and run the latter; 12 | # here events from "celery" queue (default one, will be used if queue not specified) 13 | # will go to "master" workers, and events from "worker" queue go to "worker" workers; 14 | # by default there are no workers, but each type of worker may scale up to 4 processes 15 | # Since celery runs in root of the docker, we also need to allow it to. 16 | # shellcheck disable=2086 17 | C_FORCE_ROOT=1 nice celery multi start $WORKERS $OPTIONS \ 18 | -Q:master celery --autoscale:master=$CELERY_MASTER_CONCURRENCY,0 \ 19 | -Q:worker worker --autoscale:worker=$CELERY_WORKER_CONCURRENCY,0 20 | 21 | # shellcheck disable=2064 22 | trap "celery multi stop $WORKERS $OPTIONS; exit 0" INT TERM 23 | 24 | tail -f /var/log/celery-*.log & 25 | 26 | # check celery status periodically to exit if it crashed 27 | while true; do 28 | sleep 120 29 | echo "Checking celery status" 30 | celery -A {{cookiecutter.django_project_name}} status -t 30 > /dev/null 2>&1 || exit 1 31 | echo "Celery status OK" 32 | done 33 | -------------------------------------------------------------------------------- /.github/workflows/publish-docker-node-exporter.yml: -------------------------------------------------------------------------------- 1 | name: RT node-exporter docker image 2 | 3 | on: 4 | push: 5 | branches: 6 | - 'master' 7 | - 'docker-release' 8 | - 'docker-releases/*' 9 | 10 | tags: 11 | - '*' 12 | 13 | pull_request: 14 | branches: 15 | - 'master' 16 | 17 | env: 18 | REGISTRY: ghcr.io 19 | IMAGE_NAME: node-exporter-aws-ec2 20 | 21 | jobs: 22 | build-and-push-image: 23 | runs-on: ubuntu-latest 24 | permissions: 25 | contents: read 26 | packages: write 27 | 28 | steps: 29 | - name: Checkout repository 30 | uses: actions/checkout@v2 31 | 32 | - name: Setup Docker Buildx 33 | uses: docker/setup-buildx-action@v1 34 | 35 | - name: Log in to the Container registry 36 | uses: docker/login-action@v1 37 | with: 38 | registry: ${{ env.REGISTRY }} 39 | username: ${{ github.actor }} 40 | password: ${{ secrets.GITHUB_TOKEN }} 41 | 42 | - name: Extract metadata (tags, labels) for Docker 43 | id: meta 44 | uses: docker/metadata-action@v3 45 | with: 46 | images: ${{ env.REGISTRY }}/${{ github.repository_owner }}/${{ env.IMAGE_NAME }} 47 | 48 | - name: Build and push Docker image 49 | uses: docker/build-push-action@v2 50 | with: 51 | context: docker/node-exporter 52 | push: true 53 | tags: ${{ steps.meta.outputs.tags }} 54 | labels: ${{ steps.meta.outputs.labels }} 55 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "cookiecutter-rt-django" 3 | description = "RT Django project template - don't install try to install this as package, use it as cruft template" 4 | version = "0" 5 | license = {file = "LICENSE"} 6 | requires-python = ">=3.11" 7 | 8 | dependencies = [ 9 | "cruft", 10 | "nox", 11 | ] 12 | 13 | [project.urls] 14 | homepage = "https://github.com/reef-technologies/cookiecutter-rt-django" 15 | changelog = "https://github.com/reef-technologies/cookiecutter-rt-django/{{cookiecutter.repostory_name}}/docs/3rd_party/cookiecutter-rt-django/CHANGELOG.md" 16 | 17 | [tool.setuptools] 18 | packages = [] 19 | 20 | [project.optional-dependencies] 21 | format = ["ruff"] 22 | lint = ["ruff", "codespell[toml]"] 23 | 24 | [tool.ruff] 25 | line-length = 120 26 | exclude = [ 27 | "\\{\\{cookiecutter.repostory_name\\}\\}", 28 | ] 29 | 30 | [tool.ruff.lint] 31 | # TODO add D 32 | select = [ 33 | "E", "F", "I", "UP", "S", 34 | "TC005", 35 | ] 36 | # TODO: remove E501 once docstrings are formatted 37 | ignore = [ 38 | "D100", "D105", "D107", "D200", "D202", "D203", "D205", "D212", "D400", "D401", "D415", 39 | "D101", "D102","D103", "D104", # TODO remove once we have docstring for all public methods 40 | "E501", # TODO: remove E501 once docstrings are formatted 41 | "S101", "S108", "S603", "S607", 42 | ] 43 | 44 | [tool.ruff.lint.per-file-ignores] 45 | "__init__.py" = ["F401"] 46 | "**/test*/**.py" = ["D", "F403", "F405", "S106", "S311"] 47 | 48 | [tool.codespell] 49 | skip = '*.min.js,*.lock' 50 | ignore-words-list = 'datas' 51 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2018, Reef Technologies 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/backend/ec2-autoscale.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name_env = "${var.name}-${var.env}" 3 | cloudinit = templatefile("../../files/cloud-init.yml", { 4 | name = var.name 5 | env = var.env 6 | region = var.region 7 | }) 8 | } 9 | 10 | resource "aws_launch_template" "self" { 11 | name = local.name_env 12 | image_id = var.base_ami_id 13 | instance_type = var.instance_type 14 | 15 | iam_instance_profile { 16 | name = aws_iam_instance_profile.self.name 17 | } 18 | 19 | disable_api_termination = false 20 | key_name = aws_key_pair.self.key_name 21 | 22 | user_data = base64encode(local.cloudinit) 23 | 24 | block_device_mappings { 25 | device_name = "/dev/sda1" 26 | 27 | ebs { 28 | delete_on_termination = true 29 | encrypted = true 30 | volume_size = 20 31 | } 32 | } 33 | 34 | credit_specification { 35 | cpu_credits = "standard" 36 | } 37 | 38 | vpc_security_group_ids = [ 39 | aws_security_group.internal.id 40 | ] 41 | } 42 | 43 | resource "aws_autoscaling_group" "self" { 44 | name = local.name_env 45 | desired_capacity = 1 46 | max_size = 1 47 | min_size = 1 48 | vpc_zone_identifier = [var.subnets[0]] 49 | 50 | launch_template { 51 | id = aws_launch_template.self.id 52 | version = "$Latest" 53 | } 54 | 55 | tag { 56 | key = "Name" 57 | propagate_at_launch = true 58 | value = local.name_env 59 | } 60 | 61 | target_group_arns = [ 62 | aws_lb_target_group.self.arn 63 | ] 64 | 65 | health_check_type = var.health_check_type 66 | } 67 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/{% if cookiecutter.monitoring == "y" %}metrics.py{% endif %}: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | 4 | import prometheus_client 5 | {% if cookiecutter.use_celery == "y" %} 6 | from django.conf import settings 7 | {% endif %} 8 | from django.http import HttpResponse 9 | from django_prometheus.exports import ExportToDjangoView 10 | from prometheus_client import REGISTRY, multiprocess 11 | 12 | {% if cookiecutter.use_celery == "y" %} 13 | from ..celery import get_num_tasks_in_queue, num_tasks_in_queue 14 | {% endif %} 15 | 16 | 17 | class RecursiveMultiProcessCollector(multiprocess.MultiProcessCollector): 18 | """A multiprocess collector that scans the directory recursively""" 19 | 20 | def collect(self): 21 | files = glob.glob(os.path.join(self._path, "**/*.db"), recursive=True) 22 | return self.merge(files, accumulate=True) 23 | 24 | 25 | if is_multiprocess := bool(os.environ.get("PROMETHEUS_MULTIPROC_DIR")): 26 | registry = prometheus_client.CollectorRegistry() 27 | RecursiveMultiProcessCollector(registry) 28 | else: 29 | registry = REGISTRY 30 | 31 | 32 | def metrics_view(request): 33 | """Exports metrics as a Django view""" 34 | 35 | {% if cookiecutter.use_celery == "y" %} 36 | for queue in settings.CELERY_TASK_QUEUES: 37 | num_tasks_in_queue.labels(queue.name).set(get_num_tasks_in_queue(queue.name)) 38 | {% endif %} 39 | 40 | if is_multiprocess: 41 | return HttpResponse( 42 | prometheus_client.generate_latest(registry), 43 | content_type=prometheus_client.CONTENT_TYPE_LATEST, 44 | ) 45 | 46 | return ExportToDjangoView(request) 47 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/vultr_tf/core/vultr-cloud-init.tftpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # shell variables have to have doubled dollar sign, otherwise Terraform will try to interpolate them 3 | # the only variable with single dollar is `DEPLOY_SSH_KEY` in the block below 4 | echo "starting custom cloud-init" 5 | 6 | # add deploy ssh key to clone repo 7 | DEPLOY_KEY_FILE="/root/.ssh/id_ed25519" 8 | echo "${DEPLOY_SSH_KEY}" > $${DEPLOY_KEY_FILE} 9 | chmod 600 $${DEPLOY_KEY_FILE} 10 | 11 | DEPLOY_DIR="/root/domains/{{ cookiecutter.repostory_name }}/" 12 | REPO_DIR="/root/repos/{{ cookiecutter.repostory_name }}.git" 13 | REPO_ORIGIN="git@github.com:reef-technologies/{{ cookiecutter.django_project_name }}.git" 14 | 15 | mkdir -p /root/repos/ 16 | mkdir -p $${DEPLOY_DIR} 17 | mkdir -p /root/volumes/{{ cookiecutter.django_project_name }}-mount/ 18 | 19 | # repo init script for Vultr server 20 | ssh-keyscan github.com >> /root/.ssh/known_hosts 21 | apt install -y git 22 | GIT_SSH_COMMAND="ssh -i $${DEPLOY_KEY_FILE}" git clone --depth=1 --bare --no-checkout $${REPO_ORIGIN} $${REPO_DIR} 23 | 24 | # 1st time deploy and setup 25 | git --work-tree=$${DEPLOY_DIR} --git-dir=$${REPO_DIR} checkout -f main 26 | cp $${DEPLOY_DIR}/bin/post-receive $${REPO_DIR}/hooks/post-receive 27 | 28 | $${DEPLOY_DIR}/bin/prepare-os.sh 29 | $${DEPLOY_DIR}/setup-prod.sh 30 | 31 | # add env variables to .env file 32 | cat <> $${DEPLOY_DIR}/.env 33 | POSTGRES_HOST=${POSTGRES_HOST} 34 | POSTGRES_USER=${POSTGRES_USER} 35 | POSTGRES_PASSWORD=${POSTGRES_PASSWORD} 36 | DATABASE_POOL_URL=${DATABASE_POOL_URL} 37 | DATABASE_URL=${DATABASE_URL} 38 | SENTRY_DSN=${SENTRY_DSN} 39 | SECRET_KEY=${SECRET_KEY} 40 | 41 | EOF 42 | 43 | cd $${DEPLOY_DIR} && docker compose up --build --detach 44 | echo "finishing custom cloud-init" 45 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/vultr_tf/core/main.tf: -------------------------------------------------------------------------------- 1 | provider "vultr" { 2 | api_key = var.vultr_api_key 3 | } 4 | 5 | resource "vultr_instance" "worker" { 6 | count = 1 7 | hostname = "instance-{{ cookiecutter.django_project_name }}-${count.index + 1}" 8 | region = var.region 9 | plan = "vc2-1c-1gb" // via `vultr-cli plans list` 10 | os_id = 1743 // ubuntu 22-04, via `vultr-cli os list` 11 | ssh_key_ids = [ 12 | // uuid-4 of ssh keys added in Vultr 13 | ] 14 | enable_ipv6 = true 15 | activation_email = false 16 | label = "instance-{{ cookiecutter.django_project_name }}" 17 | backups = "disabled" 18 | 19 | user_data = templatefile("vultr-cloud-init.tftpl", { 20 | DEPLOY_SSH_KEY = var.DEPLOY_SSH_KEY 21 | SECRET_KEY = var.DOTENV_SECRET_KEY 22 | POSTGRES_HOST = var.DOTENV_POSTGRES_HOST 23 | POSTGRES_USER = var.DOTENV_POSTGRES_USER 24 | POSTGRES_PASSWORD = var.DOTENV_POSTGRES_PASSWORD 25 | DATABASE_POOL_URL = var.DOTENV_DATABASE_POOL_URL 26 | DATABASE_URL = var.DOTENV_DATABASE_URL 27 | SENTRY_DSN = var.DOTENV_SENTRY_DSN 28 | }) 29 | } 30 | 31 | resource "vultr_load_balancer" "loadbalancer" { 32 | region = var.region 33 | 34 | forwarding_rules { 35 | frontend_protocol = "https" 36 | frontend_port = 443 37 | backend_protocol = "https" 38 | backend_port = 443 39 | } 40 | 41 | health_check { 42 | path = "/alive/" 43 | port = "443" 44 | protocol = "https" 45 | response_timeout = 5 46 | unhealthy_threshold = 2 47 | check_interval = 15 48 | healthy_threshold = 4 49 | } 50 | 51 | attached_instances = [for instance in vultr_instance.worker : instance.id] 52 | } 53 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/backend/parameters.nginx.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | cert_dir = "../../files/nginx/monitoring_certs" 3 | cert_files = fileset(local.cert_dir, "*.txt") 4 | 5 | certs = length(local.cert_files) > 0 ? [for cert_file in local.cert_files : { 6 | name: replace(cert_file, ".txt", "") 7 | content: "${local.cert_dir}/${cert_file}" 8 | }] : [] 9 | 10 | helper_dir = "../../files/nginx/config_helpers" 11 | helper_files = fileset(local.helper_dir, "*") 12 | 13 | helpers = length(local.helper_files) > 0 ? [for helper_file in local.helper_files : { 14 | name: helper_file, 15 | content: "${local.helper_dir}/${helper_file}" 16 | }] : [] 17 | 18 | template_dir = "../../files/nginx/templates" 19 | template_files = fileset(local.template_dir, "*") 20 | 21 | templates = length(local.template_files) > 0 ? [for template_file in local.template_files : { 22 | name: template_file, 23 | content: "${local.template_dir}/${template_file}" 24 | }] : [] 25 | } 26 | 27 | resource "aws_ssm_parameter" "certs" { 28 | count = length(local.certs) 29 | name = "/application/${var.name}/${var.env}/nginx/monitoring_certs/${local.certs[count.index].name}" 30 | type = "SecureString" 31 | value = file(local.certs[count.index].content) 32 | } 33 | 34 | resource "aws_ssm_parameter" "helpers" { 35 | count = length(local.helpers) 36 | name = "/application/${var.name}/${var.env}/nginx/config_helpers/${local.helpers[count.index].name}" 37 | type = "SecureString" 38 | value = file(local.helpers[count.index].content) 39 | } 40 | 41 | resource "aws_ssm_parameter" "templates" { 42 | count = length(local.templates) 43 | name = "/application/${var.name}/${var.env}/nginx/templates/${local.templates[count.index].name}" 44 | type = "SecureString" 45 | value = file(local.templates[count.index].content) 46 | } -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/backend/security.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "public" { 2 | name = "${var.name}-${var.env}-public-sg" 3 | vpc_id = var.vpc_id 4 | 5 | ingress { 6 | description = "allow traffic between load-balancer and EC2 instances within VPC" 7 | from_port = 80 8 | to_port = 80 9 | protocol = "tcp" 10 | cidr_blocks = ["0.0.0.0/0"] 11 | } 12 | 13 | ingress { 14 | description = "allow traffic between load-balancer and EC2 instances within VPC" 15 | from_port = 443 16 | to_port = 443 17 | protocol = "tcp" 18 | cidr_blocks = ["0.0.0.0/0"] 19 | } 20 | 21 | egress { 22 | from_port = 0 23 | to_port = 0 24 | protocol = "-1" 25 | cidr_blocks = ["0.0.0.0/0"] 26 | } 27 | } 28 | 29 | resource "aws_security_group" "internal" { 30 | name = "${var.name}-internal-sg" 31 | vpc_id = var.vpc_id 32 | 33 | ingress { 34 | description = "allow traffic to ssh from internet" 35 | from_port = 22 36 | to_port = 22 37 | protocol = "tcp" 38 | cidr_blocks = ["51.254.203.61/32"] 39 | } 40 | 41 | ingress { 42 | description = "allow monitoring" 43 | from_port = 10443 44 | to_port = 10443 45 | protocol = "tcp" 46 | cidr_blocks = ["138.68.147.48/32", "95.179.202.73/32"] 47 | } 48 | 49 | ingress { 50 | description = "allow traffic between load-balancer and EC2 instances within VPC" 51 | from_port = 8000 52 | to_port = 8000 53 | protocol = "tcp" 54 | cidr_blocks = [var.vpc_cidr] 55 | } 56 | 57 | egress { 58 | from_port = 0 59 | to_port = 0 60 | protocol = "-1" 61 | cidr_blocks = ["0.0.0.0/0"] 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/docs/3rd_party/cookiecutter-rt-django/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # cookiecutter-rt-django Changelog 2 | 3 | Main purpose of this file is to provide a changelog for the template itself. 4 | It is not intended to be used as a changelog for the generated project. 5 | 6 | This changelog will document any know **BREAKING** changes between versions of the template. 7 | Please review this new entries carefully after applying `cruft update` before committing the changes. 8 | 9 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). 10 | 11 | Currently, `cookiecutter-rt-django` has no explicit versioning amd we purely rely on `cruft` diff. 12 | 13 | ## [Unreleased] 14 | 15 | * **BREAKING** Switched from `pdm` to [`uv`](https://docs.astral.sh/uv/) for Python dependency management. 16 | * **BREAKING** Switched from `docker-compose` v1 script to `docker compose` v2 plugin (https://docs.docker.com/compose/cli-command/) 17 | * **BREAKING** Added formatting with ruff. 18 | * **BREAKING** Started using [pdm](https://github.com/pdm-project/pdm) for managing Python dependencies. 19 | * **BREAKING** root of repository is used as docker build context instead of just `./app/`. 20 | * **BREAKING** Updated django-environ from 0.4.5 to 0.10 (https://django-environ.readthedocs.io/en/latest/changelog.html) 21 | * **BREAKING** Updated redis python package from 3.5.3 to 4.6 (breaking changes listed in https://github.com/redis/redis-py/releases/tag/v4.0.0b1) 22 | * **BREAKING** Updated Python from 3.9 to 3.11 23 | * **BREAKING** Updated Django from 3.2 to 4.2 (https://docs.djangoproject.com/en/4.2/releases/4.0/#backwards-incompatible-changes-in-4-0) 24 | * **BREAKING** Updated django-cors-headers from 3.7 to 4.0 (https://github.com/adamchainz/django-cors-headers/blob/main/CHANGELOG.rst#400-2023-05-12) 25 | * **BREAKING** Updated django-environ from 0.7 to 0.10 (https://django-environ.readthedocs.io/en/latest/changelog.html) 26 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/{% if cookiecutter.use_channels == "y" %}consumers.py{% endif %}: -------------------------------------------------------------------------------- 1 | from collections.abc import Callable 2 | from typing import Annotated, ClassVar, Union 3 | 4 | import structlog 5 | from channels.generic.websocket import AsyncWebsocketConsumer 6 | from pydantic import BaseModel, Field, TypeAdapter, ValidationError 7 | from structlog.contextvars import bound_contextvars 8 | 9 | from .schemas import Heartbeat 10 | 11 | log = structlog.get_logger(__name__) 12 | 13 | 14 | class DefaultConsumer(AsyncWebsocketConsumer): 15 | async def connect(self) -> None: 16 | await super().connect() 17 | log.info("connected", scope=self.scope) 18 | 19 | async def disconnect(self, code: int | str) -> None: 20 | log.info("disconnected", scope=self.scope, code=code) 21 | 22 | async def receive(self, text_data: str | None = None, bytes_data: bytes | None = None) -> None: 23 | with bound_contextvars(text_data=text_data, bytes_data=bytes_data): 24 | log.debug("message received") 25 | try: 26 | message: BaseModel = self.MESSAGE_MODEL.validate_json(text_data) 27 | except ValidationError as exc: 28 | errors = exc.errors() 29 | log.debug("message parsing failed", errors=errors) 30 | return 31 | 32 | handler = self.MESSAGE_HANDLERS[type(message)] 33 | log.debug("selected message handler", handler=handler) 34 | await handler(self, message) 35 | 36 | async def handle_heartbeat(self, message: Heartbeat) -> None: 37 | await self.send(text_data=message.json()) 38 | 39 | MESSAGE_HANDLERS: ClassVar[dict[BaseModel, Callable]] = { 40 | Heartbeat: handle_heartbeat, 41 | } 42 | MESSAGE_MODEL = TypeAdapter(Annotated[Union[*MESSAGE_HANDLERS.keys()], Field(discriminator="type")]) 43 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/envs/dev/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | services: 4 | redis: 5 | {% if cookiecutter.use_valkey == "y" %} 6 | image: mirror.gcr.io/valkey/valkey:8.1-alpine 7 | command: valkey-server --appendonly yes 8 | healthcheck: 9 | test: valkey-cli ping 10 | {% else %} 11 | image: mirror.gcr.io/redis:6-alpine 12 | command: redis-server --appendonly yes 13 | healthcheck: 14 | test: redis-cli ping 15 | {% endif %} 16 | volumes: 17 | - ./redis/data:/data 18 | ports: 19 | - ${REDIS_PORT}:6379 20 | 21 | db: 22 | image: mirror.gcr.io/postgres:14.0-alpine 23 | healthcheck: 24 | test: pg_isready -U ${POSTGRES_USER} || exit 1 25 | environment: 26 | - POSTGRES_DB=${POSTGRES_DB} 27 | - POSTGRES_USER=${POSTGRES_USER} 28 | - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} 29 | volumes: 30 | - ./db/data:/var/lib/postgresql/data 31 | ports: 32 | - ${POSTGRES_PORT}:5432 33 | 34 | backups: 35 | build: 36 | context: backups/ 37 | dockerfile: Dockerfile 38 | init: true 39 | restart: unless-stopped 40 | environment: 41 | - DATABASE_URL=${DATABASE_URL} 42 | - BACKUP_LOCAL_DIR=/var/backups 43 | - BACKUP_LOCAL_ROTATE_KEEP_LAST=${BACKUP_LOCAL_ROTATE_KEEP_LAST} 44 | - B2_BUCKET=${BACKUP_B2_BUCKET} 45 | - B2_FOLDER=${BACKUP_B2_FOLDER} 46 | - B2_APPLICATION_KEY_ID=${BACKUP_B2_APPLICATION_KEY_ID} 47 | - B2_APPLICATION_KEY=${BACKUP_B2_APPLICATION_KEY} 48 | - EMAIL_HOST=${EMAIL_HOST} 49 | - EMAIL_PORT=${EMAIL_PORT} 50 | - EMAIL_HOST_USER=${EMAIL_HOST_USER} 51 | - EMAIL_HOST_PASSWORD=${EMAIL_HOST_PASSWORD} 52 | - EMAIL_TARGET=${EMAIL_TARGET} 53 | - DEFAULT_FROM_EMAIL=${DEFAULT_FROM_EMAIL} 54 | - SENTRY_DSN=${SENTRY_DSN} 55 | volumes: 56 | - backups:/var/backups 57 | depends_on: 58 | - db 59 | 60 | volumes: 61 | backups: 62 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{{cookiecutter.django_default_app_name}}/email.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from collections.abc import Callable 4 | from email.mime.base import MIMEBase 5 | from email.mime.image import MIMEImage 6 | from functools import lru_cache 7 | from pathlib import Path 8 | from typing import TypeVar 9 | 10 | from django.conf import settings 11 | from django.contrib.staticfiles import finders 12 | from django.core.mail import EmailMessage 13 | from django.template import loader 14 | 15 | MIMEType = TypeVar("MIMEType", bound=MIMEBase) 16 | 17 | 18 | @lru_cache(maxsize=10) 19 | def create_attachment( 20 | path: str, 21 | mime_type: Callable[[bytes], MIMEType] = MIMEImage, # type: ignore[assignment] # https://github.com/python/mypy/issues/3737 22 | ) -> MIMEType: 23 | real_path = finders.find(path) 24 | if not real_path: 25 | raise FileNotFoundError(f"File {path} not found") 26 | content = Path(real_path).read_bytes() 27 | attachment = mime_type(content) 28 | 29 | file_name = path.rsplit("/", maxsplit=1)[-1] 30 | attachment.add_header("Content-ID", file_name) 31 | return attachment 32 | 33 | 34 | def send_mail( 35 | template_name: str, 36 | subject: str, 37 | to: list[str], 38 | from_: str = f"<{settings.DEFAULT_FROM_EMAIL}>", 39 | context: dict | None = None, 40 | attachments: list[str] | None = None, 41 | cc: list[str] | None = None, 42 | ): 43 | context = context or {} 44 | attachments = attachments or [] 45 | 46 | html = loader.render_to_string(template_name, context) 47 | 48 | message = EmailMessage( 49 | subject=subject, 50 | body=html, 51 | from_email=from_, 52 | to=to, 53 | cc=cc, 54 | attachments=[create_attachment(file) for file in attachments], 55 | ) 56 | message.content_subtype = "html" 57 | message.mixed_subtype = "related" 58 | message.send() 59 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Run linter and tests 2 | 3 | on: 4 | push: 5 | branches: [master, main] 6 | pull_request: 7 | branches: [master, main] 8 | 9 | env: 10 | PYTHON_DEFAULT_VERSION: "3.11" 11 | ENV_FILL_MISSING_VALUES: 1 12 | 13 | jobs: 14 | {% if cookiecutter.ci_use_linter == "y" %} 15 | linter: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v3 19 | with: 20 | fetch-depth: 0 21 | - name: Set up Python {% raw %}${{ env.PYTHON_DEFAULT_VERSION }}{% endraw %} 22 | 23 | uses: actions/setup-python@v4 24 | with: 25 | python-version: {% raw %}${{ env.PYTHON_DEFAULT_VERSION }}{% endraw %} 26 | 27 | - name: Install uv 28 | uses: astral-sh/setup-uv@v5 29 | with: 30 | version: "0.5.x" 31 | - name: Install nox 32 | run: python -m pip install --upgrade nox 33 | - name: Run linters 34 | run: nox -vs lint 35 | {% endif %} 36 | test: 37 | timeout-minutes: 10 38 | runs-on: ubuntu-latest 39 | steps: 40 | - uses: actions/checkout@v3 41 | with: 42 | fetch-depth: 0 43 | - name: Set up Python {% raw %}${{ env.PYTHON_DEFAULT_VERSION }}{% endraw %} 44 | 45 | uses: actions/setup-python@v4 46 | with: 47 | python-version: {% raw %}${{ env.PYTHON_DEFAULT_VERSION }}{% endraw %} 48 | 49 | - name: Install uv 50 | uses: astral-sh/setup-uv@v5 51 | with: 52 | version: "0.5.x" 53 | - name: Prepare environment 54 | run: ./setup-dev.sh 55 | - name: Run dockerized services 56 | run: docker compose up -d --wait 57 | - name: Run migrations 58 | run: cd app/src && uv run python manage.py wait_for_database --timeout 120 && uv run python manage.py migrate 59 | - name: Install nox 60 | run: python -m pip install --upgrade nox 61 | - name: Run unit tests 62 | run: nox -vs test 63 | - name: Stop dockerized services 64 | if: success() || failure() 65 | run: docker compose down -v 66 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/envs/common/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | } 4 | 5 | data "aws_caller_identity" "env" {} 6 | 7 | data "aws_ami" "base_ami" { 8 | most_recent = true 9 | 10 | filter { 11 | name = "name" 12 | values = [var.base_ami_image] 13 | } 14 | 15 | filter { 16 | name = "virtualization-type" 17 | values = ["hvm"] 18 | } 19 | 20 | owners = [var.base_ami_image_owner] 21 | } 22 | 23 | locals { 24 | ecr_base_url = "${data.aws_caller_identity.env.account_id}.dkr.ecr.${var.region}.amazonaws.com" 25 | ecr_image = "${var.name}-${var.env}:latest" 26 | } 27 | 28 | module "networking" { 29 | source = "../../modules/networking" 30 | 31 | name = var.name 32 | env = var.env 33 | azs = var.azs 34 | vpc_cidr = var.vpc_cidr 35 | subnet_cidrs = var.subnet_cidrs 36 | } 37 | 38 | module "database" { 39 | source = "../../modules/database" 40 | 41 | name = var.name 42 | env = var.env 43 | vpc_id = module.networking.vpc_id 44 | vpc_cidr = module.networking.vpc_cidr_block 45 | azs = module.networking.azs 46 | subnets = module.networking.subnets 47 | instance_type = var.rds_instance_type 48 | } 49 | 50 | module "backend" { 51 | source = "../../modules/backend" 52 | 53 | depends_on = [ 54 | module.database 55 | ] 56 | 57 | base_ami_id = data.aws_ami.base_ami.image_id 58 | 59 | name = var.name 60 | region = var.region 61 | env = var.env 62 | 63 | ecr_base_url = local.ecr_base_url 64 | ecr_image = local.ecr_image 65 | 66 | base_domain_name = var.base_domain_name 67 | domain_name = var.domain_name 68 | ec2_ssh_key = var.ec2_ssh_key 69 | 70 | vpc_id = module.networking.vpc_id 71 | vpc_cidr = module.networking.vpc_cidr_block 72 | 73 | azs = module.networking.azs 74 | subnets = module.networking.subnets 75 | 76 | instance_type = var.instance_type 77 | health_check_type = var.autoscaling_health_check_type 78 | account_id = data.aws_caller_identity.env.account_id 79 | database = module.database 80 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cookiecutter-rt-django 2 | 3 | Opinionated CookieCutter template for production-ready Django applications. 4 | 5 | ## Requirements 6 | 7 | ```sh 8 | pip install cruft ruff 9 | ``` 10 | 11 | ## Usage 12 | 13 | - Generate a new Cookiecutter template layout: 14 | ```sh 15 | cruft create https://github.com/reef-technologies/cookiecutter-rt-django 16 | ``` 17 | 18 | - See diff with 19 | ```sh 20 | cruft diff 21 | ``` 22 | 23 | - Update the project by running 24 | ```sh 25 | cruft update 26 | ``` 27 | Before committing make sure to review changes listed in `docs/3rd_party/cookiecutter-rt-django/CHANGELOG.md`. 28 | 29 | - If you have a repo which was initialized without cruft (i.e. with `cookiecutter` command), you can still link the project: 30 | ```sh 31 | cruft link https://github.com/reef-technologies/cookiecutter-rt-django 32 | ``` 33 | 34 | More on cruft: 35 | 36 | 37 | ## Automatic cruft updates 38 | 39 | This template ships with a GitHub Actions workflow that will periodically (once a week) monitor changes in the template and automatically create a pull request with updates using `cruft`. 40 | 41 | ### Setup 42 | 43 | The workflow requires permissions to create pull requests. 44 | You can enable it by going to Repository Settings -> Actions -> General -> Allow GitHub Actions to create and approve pull requests. 45 | 46 | ### Slack notifications (optional) 47 | 48 | The bot can send notifications to a Slack channel when a new pull request with updates is created. 49 | 50 | To enable this, you need to set two secrets in your repository: 51 | 52 | - `SLACK_BOT_TOKEN` (the token of your Slack app) 53 | - `SLACK_CHANNEL_ID` (the ID of the channel where you want to receive notifications) 54 | 55 | If you don't have a Slack app, follow the [instructions here](https://github.com/slackapi/slack-github-action?tab=readme-ov-file#technique-2-slack-api-method) to create one. 56 | 57 | ## License 58 | 59 | This project is licensed under the terms of the [BSD-3 License](/LICENSE) 60 | 61 | ## Changelog 62 | 63 | Breaking changes are documented in the [CHANGELOG]({{cookiecutter.repostory_name}}/docs/3rd_party/cookiecutter-rt-django/CHANGELOG.md) 64 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Overview 4 | 5 | We value the contributions of the community and welcome any input on potential security issues. 6 | To ensure the safety of our users, we encourage following responsible disclosure practices described in this document. 7 | 8 | ## Supported Versions 9 | 10 | Only the latest release version is supported. 11 | We use SemVer and encourage to pin only to the major version of our software and update to the latest minor and patch versions regularly. 12 | 13 | ## Reporting a Vulnerability 14 | 15 | If you discover a security vulnerability in our repository, we encourage you to report it to us as quickly as possible. 16 | Please do not publicly disclose the details of the vulnerability until we have had a chance to address it. 17 | 18 | ### How to Report 19 | 20 | 1. **Submit vulnerability report**: 21 | 22 | Please use [Report a vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability) functionality of this GitHub repository. 23 | 24 | Please use the following template: 25 | 26 | > ## Description of the vulnerability 27 | > 28 | > ## Steps to reproduce 29 | > 30 | > ## Potential impact 31 | > 32 | > ## Any potential fixes or mitigations 33 | > 34 | > ## How would you like to be attributed in the public changelog 35 | > 36 | > e.g., name, email, or GitHub handle 37 | 38 | 2. **Response Time**: 39 | We will acknowledge your report within 10 days and provide a timeline for fixing the vulnerability. 40 | 41 | 3. **Updates**: 42 | We will keep you updated as we work on a fix. 43 | You may be asked to provide additional information or clarification. 44 | 45 | 4. **Disclosure**: 46 | We follow a coordinated disclosure process. 47 | Once a fix is implemented, we will release it and publicly disclose the details of the vulnerability along with credits to the reporter. 48 | 49 | ## Security Updates 50 | 51 | Security updates will be communicated through our repository's release notes. 52 | Please ensure you stay up-to-date with the latest releases to protect your environment. 53 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Overview 4 | 5 | We value the contributions of the community and welcome any input on potential security issues. 6 | To ensure the safety of our users, we encourage following responsible disclosure practices described in this document. 7 | 8 | ## Supported Versions 9 | 10 | Only the latest release version is supported. 11 | We use SemVer and encourage to pin only to the major version of our software and update to the latest minor and patch versions regularly. 12 | 13 | ## Reporting a Vulnerability 14 | 15 | If you discover a security vulnerability in our repository, we encourage you to report it to us as quickly as possible. 16 | Please do not publicly disclose the details of the vulnerability until we have had a chance to address it. 17 | 18 | ### How to Report 19 | 20 | 1. **Submit vulnerability report**: 21 | 22 | Please use [Report a vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability) functionality of this GitHub repository. 23 | 24 | Please use the following template: 25 | 26 | > ## Description of the vulnerability 27 | > 28 | > ## Steps to reproduce 29 | > 30 | > ## Potential impact 31 | > 32 | > ## Any potential fixes or mitigations 33 | > 34 | > ## How would you like to be attributed in the public changelog 35 | > 36 | > e.g., name, email, or GitHub handle 37 | 38 | 2. **Response Time**: 39 | We will acknowledge your report within 10 days and provide a timeline for fixing the vulnerability. 40 | 41 | 3. **Updates**: 42 | We will keep you updated as we work on a fix. 43 | You may be asked to provide additional information or clarification. 44 | 45 | 4. **Disclosure**: 46 | We follow a coordinated disclosure process. 47 | Once a fix is implemented, we will release it and publicly disclose the details of the vulnerability along with credits to the reporter. 48 | 49 | ## Security Updates 50 | 51 | Security updates will be communicated through our repository's release notes. 52 | Please ensure you stay up-to-date with the latest releases to protect your environment. 53 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/envs/prod/Dockerfile: -------------------------------------------------------------------------------- 1 | {% if cookiecutter.use_alpine_linux == "y" %} 2 | ARG BASE_IMAGE=python:3.11-alpine 3 | {% else %} 4 | ARG BASE_IMAGE=python:3.11-slim 5 | {% endif %} 6 | 7 | FROM $BASE_IMAGE AS base-image 8 | LABEL builder=true 9 | 10 | COPY --from=ghcr.io/astral-sh/uv:0.5 /uv /uvx /bin/ 11 | 12 | WORKDIR /root/src/ 13 | 14 | COPY pyproject.toml uv.lock ./ 15 | 16 | # Enable bytecode compilation 17 | ENV UV_COMPILE_BYTECODE=1 18 | # Copy from the cache instead of linking since it's a mounted volume 19 | ENV UV_LINK_MODE=copy 20 | 21 | {% if cookiecutter.use_alpine_linux == "y" %} 22 | RUN grep uvicorn uv.lock && apk add --no-cache build-base || true 23 | RUN grep psycopg uv.lock && apk add --no-cache postgresql-dev gcc python3-dev musl-dev libffi-dev || true 24 | RUN grep Pillow uv.lock && apk add --no-cache jpeg-dev tiff-dev zlib-dev libwebp-dev gcc python3-dev musl-dev || true 25 | {% endif %} 26 | 27 | {% if cookiecutter.use_alpine_linux == "y" %} 28 | RUN apk add git 29 | {% else %} 30 | RUN apt-get update && apt-get install -y git 31 | {% endif %} 32 | 33 | RUN --mount=type=cache,target=/root/.cache/uv \ 34 | uv sync --frozen --no-install-project --no-dev --no-editable 35 | 36 | COPY ./app/src/ ./app/envs/prod/entrypoint.sh ./app/envs/prod/gunicorn.conf.py ./app/envs/prod/celery-entrypoint.sh ./app/envs/prod/prometheus-cleanup.sh /root/src/ 37 | 38 | RUN --mount=type=cache,target=/root/.cache/uv \ 39 | uv sync --frozen --no-dev --no-editable 40 | 41 | RUN ENV=prod ENV_FILL_MISSING_VALUES=1 SECRET_KEY=dummy uv run python3 manage.py collectstatic --no-input --clear 42 | 43 | 44 | FROM $BASE_IMAGE AS secondary-image 45 | LABEL builder=false 46 | 47 | {% if cookiecutter.use_alpine_linux == "y" %} 48 | RUN apk add wget 49 | {% else %} 50 | RUN apt-get update \ 51 | && apt-get install -y wget \ 52 | && rm -rf /var/lib/apt/lists/* 53 | {% endif %} 54 | 55 | WORKDIR /root/src/ 56 | ENV PYTHONUNBUFFERED=1 57 | ENV PATH="/root/src/.venv/bin:$PATH" 58 | 59 | COPY --from=base-image /root/src/ /root/src/ 60 | 61 | {% if cookiecutter.use_alpine_linux == "y" %} 62 | RUN grep psycopg uv.lock && apk add --no-cache libpq || true 63 | RUN grep Pillow uv.lock && apk add --no-cache jpeg tiff zlib libwebp || true 64 | {% endif %} 65 | 66 | CMD ["./entrypoint.sh"] 67 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/urls.py: -------------------------------------------------------------------------------- 1 | from django.conf import settings 2 | from django.contrib.admin.sites import site 3 | from django.http import HttpResponse 4 | from django.urls import include, path{% if cookiecutter.use_rest_framework == "y" %}, re_path{% endif %} 5 | 6 | {% if cookiecutter.use_rest_framework == "y" %} 7 | from drf_spectacular.views import SpectacularAPIView, SpectacularSwaggerView 8 | {% endif %} 9 | {% if cookiecutter.use_fingerprinting == "y" %} 10 | from fingerprint.views import FingerprintView 11 | {% endif %} 12 | 13 | {% if cookiecutter.use_rest_framework == "y" %} 14 | from .api.routers import router as api_router 15 | {% endif %} 16 | {% if cookiecutter.monitoring == "y" %} 17 | from .{{cookiecutter.django_default_app_name}}.business_metrics import metrics_manager 18 | {% endif %} 19 | {% if cookiecutter.use_channels == "y" %} 20 | from .{{cookiecutter.django_default_app_name}}.consumers import DefaultConsumer 21 | {% endif %} 22 | {% if cookiecutter.monitoring == "y" %} 23 | from .{{cookiecutter.django_default_app_name}}.metrics import metrics_view 24 | 25 | {% endif %} 26 | urlpatterns = [ 27 | path("alive/", lambda _: HttpResponse(b"ok")), 28 | path("admin/", site.urls), 29 | {% if cookiecutter.use_rest_framework == "y" %} 30 | re_path(r"^api/(?Pv0)/", include(api_router.urls)), 31 | re_path(r"^api/(?Pv0)/schema/$", SpectacularAPIView.as_view(), name="schema"), 32 | re_path(r"^api/(?Pv0)/schema/swagger-ui/$", SpectacularSwaggerView.as_view(url_name='schema')), 33 | re_path(r"^api/auth/", include("rest_framework.urls", namespace="rest_framework")), 34 | {% endif %} 35 | {% if cookiecutter.use_fingerprinting == "y" %} 36 | path("redirect/", FingerprintView.as_view(), name="fingerprint"), 37 | {% endif %} 38 | {% if cookiecutter.monitoring == "y" %} 39 | path("metrics", metrics_view, name="prometheus-django-metrics"), 40 | path("business-metrics", metrics_manager.view, name="prometheus-business-metrics"), 41 | {% endif %} 42 | {% if cookiecutter.use_allauth == "y" %} 43 | path("accounts/", include("allauth.urls")), 44 | {% else %} 45 | path("", include("django.contrib.auth.urls")), 46 | {% endif %} 47 | ] 48 | 49 | {% if cookiecutter.use_channels == "y" %} 50 | ws_urlpatterns = [ 51 | path("ws/v0/", DefaultConsumer.as_asgi()), 52 | ] 53 | {% endif %} 54 | 55 | if settings.DEBUG_TOOLBAR: 56 | urlpatterns += [ 57 | path("__debug__/", include("debug_toolbar.urls")), 58 | ] 59 | -------------------------------------------------------------------------------- /cookiecutter.json: -------------------------------------------------------------------------------- 1 | { 2 | "repostory_name": "project", 3 | "project_short_description": "I am lazy and haven't written any project description", 4 | "django_project_name": "project", 5 | "django_default_app_name": "core", 6 | "monitoring": "y", 7 | "monitor_view_execution_time_in_djagno": "y", 8 | "monitor_tcpstat": "y", 9 | "log_aggregating": "y", 10 | "log_aggregator_url": "https://loki.reef.pl", 11 | "postgres_user": "postgres", 12 | "use_valkey": "y", 13 | "use_rest_framework": "n", 14 | "use_celery": "y", 15 | "use_flower": "{% if cookiecutter.use_celery == 'y' %}y{% else %}n{% endif %}", 16 | "use_fingerprinting": "n", 17 | "use_channels": "y", 18 | "use_allauth": "n", 19 | "allauth_trust_external_emails": "y", 20 | "allauth_providers": "google", 21 | "vulnerabilities_scanning": "y", 22 | "sentry_dsn": "", 23 | "csp_enabled": "n", 24 | "csp_report_only": "y", 25 | "csp_report_url": "{% if cookiecutter.csp_enabled == 'y' %}https://sentry.io/api/-1/security/?sentry_key=key{% endif %}", 26 | "csp_default_src": "'none'", 27 | "csp_script_src": "'self'", 28 | "csp_style_src": "'self'", 29 | "csp_font_src": "'self'", 30 | "csp_img_src": "'self'", 31 | "csp_media_src": "'self'", 32 | "csp_object_src": "'self'", 33 | "csp_frame_src": "'self'", 34 | "csp_connect_src": "'self'", 35 | "csp_child_src": "'self'", 36 | "csp_manifest_src": "'self'", 37 | "csp_worker_src": "'self'", 38 | "csp_block_all_mixed_content": "y", 39 | "csp_exclude_url_prefixes": "", 40 | "nginx_compression_enabled": "n", 41 | "nginx_tls_early_data_enabled": "n", 42 | "use_alpine_linux": "n", 43 | "aws_use_packer": "n", 44 | "aws_project_name": "{{ cookiecutter.repostory_name }}", 45 | "aws_infra_bucket": "{{ cookiecutter.aws_project_name }}-{{ random_ascii_string(6)|lower }}", 46 | "aws_region": "us-east-1", 47 | "aws_ami_image": "{% if cookiecutter.aws_use_packer == 'n' %}*ubuntu-noble-24.04-amd64-minimal-*{% else %}docker-optimized-*{% endif %}", 48 | "aws_ami_image_owner": "{% if cookiecutter.aws_use_packer == 'n' %}099720109477{% else %}self{% endif %}", 49 | "aws_base_domain_name": "fake-domain.com", 50 | "aws_domain_name": "api.{{ cookiecutter.aws_base_domain_name }}", 51 | "aws_staging_domain_name": "staging.{{ cookiecutter.aws_domain_name }}", 52 | "aws_ec2_ssh_key": "", 53 | "ci_use_linter": "y", 54 | "ci_use_spellchecker": "y", 55 | "_copy_without_render": [ 56 | ".github/workflows/cruft-updates.yml" 57 | ], 58 | "_jinja2_env_vars": { 59 | "trim_blocks": true, 60 | "lstrip_blocks": true 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/modules/database/rds.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "random" { 2 | length = 20 3 | special = true 4 | override_special = "$." 5 | } 6 | 7 | resource "aws_db_subnet_group" "self" { 8 | name = "${var.name}-${var.env}" 9 | subnet_ids = var.subnets 10 | 11 | tags = { 12 | Project = var.name 13 | Env = var.env 14 | Name = "DB subnet group" 15 | } 16 | } 17 | 18 | resource "aws_db_parameter_group" "postgres_params" { 19 | name_prefix = "${var.name}-${var.env}-" 20 | family = "postgres16" 21 | 22 | parameter { 23 | name = "shared_buffers" 24 | value = "{DBInstanceClassMemory/32768}" # 1/4 of memory (value in 8kB blocks) 25 | } 26 | 27 | parameter { 28 | name = "effective_cache_size" 29 | value = "{DBInstanceClassMemory/16384}" # 1/2 of memory (value in 8kB blocks) 30 | apply_method = "immediate" 31 | } 32 | 33 | parameter { 34 | name = "work_mem" 35 | value = "{DBInstanceClassMemory/65536}" # 1/64 of memory (value in 1kB blocks) 36 | apply_method = "immediate" 37 | } 38 | 39 | parameter { 40 | name = "maintenance_work_mem" 41 | value = "{DBInstanceClassMemory/16384}" # 1/16 of memory (value in 1kB blocks) 42 | apply_method = "immediate" 43 | } 44 | 45 | parameter { 46 | name = "autovacuum_work_mem" 47 | value = "{DBInstanceClassMemory/16384}" # 1/16 of memory (value in 1kB blocks) 48 | apply_method = "immediate" 49 | } 50 | 51 | parameter { 52 | name = "effective_io_concurrency" 53 | value = "200" 54 | apply_method = "immediate" 55 | } 56 | 57 | parameter { 58 | name = "random_page_cost" 59 | value = "1.1" 60 | apply_method = "immediate" 61 | } 62 | 63 | lifecycle { 64 | create_before_destroy = true 65 | } 66 | } 67 | 68 | resource "aws_db_instance" "self" { 69 | identifier = "${var.name}-${var.env}-db" 70 | allocated_storage = 5 71 | max_allocated_storage = 20 72 | storage_encrypted = true 73 | engine = "postgres" 74 | engine_version = "16.8" 75 | instance_class = var.instance_type 76 | username = "master" 77 | db_name = "backend" 78 | password = random_string.random.result 79 | skip_final_snapshot = true 80 | availability_zone = var.azs[0] 81 | db_subnet_group_name = aws_db_subnet_group.self.name 82 | vpc_security_group_ids = [aws_security_group.db.id] 83 | parameter_group_name = aws_db_parameter_group.postgres_params.name 84 | 85 | tags = { 86 | Project = var.name 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/packer/docker-optimized.pkr.hcl: -------------------------------------------------------------------------------- 1 | packer { 2 | required_plugins { 3 | amazon = { 4 | version = ">= 1.0.0" 5 | source = "github.com/hashicorp/amazon" 6 | } 7 | } 8 | } 9 | 10 | local "ts" { 11 | expression = formatdate("YYYYMMDDhhmm", timestamp()) 12 | } 13 | 14 | source "amazon-ebs" "docker-optimized" { 15 | ami_name = "docker-optimized-${local.ts}" 16 | 17 | source_ami_filter { 18 | filters = { 19 | virtualization-type = "hvm" 20 | name = "*ubuntu-noble-24.04-amd64-minimal-*" 21 | root-device-type = "ebs" 22 | } 23 | 24 | owners = [ 25 | "099720109477" 26 | ] 27 | 28 | most_recent = true 29 | } 30 | 31 | instance_type = "t3.medium" 32 | ssh_username = "ubuntu" 33 | force_deregister = true 34 | encrypt_boot = true 35 | 36 | launch_block_device_mappings { 37 | device_name = "/dev/sda1" 38 | encrypted = true 39 | volume_size = 20 40 | volume_type = "gp3" 41 | delete_on_termination = true 42 | } 43 | } 44 | 45 | build { 46 | sources = [ 47 | "source.amazon-ebs.docker-optimized" 48 | ] 49 | 50 | provisioner "shell" { 51 | environment_vars = [ 52 | "DEBIAN_FRONTEND=noninteractive" 53 | ] 54 | 55 | inline = [ 56 | "sleep 15", 57 | 58 | "sudo apt-get clean", 59 | "sudo apt-get update", 60 | "sudo apt-get install -y ca-certificates curl gnupg lsb-release unzip jq rng-tools", 61 | 62 | "curl https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip -o awscliv2.zip", 63 | "unzip awscliv2.zip", 64 | "sudo ./aws/install", 65 | "rm -rf ./aws ./awscliv2.zip", 66 | 67 | "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg", 68 | "echo \"deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null", 69 | "sudo apt-get update", 70 | "sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin", 71 | "sudo gpasswd -a ubuntu docker", 72 | "sudo mkdir -p /etc/docker/", 73 | 74 | "if [ ! -f /etc/docker/daemon.json ]; then 75 | echo '{ "registry-mirrors": ["https://mirror.gcr.io"] }' > /etc/docker/daemon.json 76 | else 77 | jq '.["registry-mirrors"] += ["https://mirror.gcr.io"]' /etc/docker/daemon.json > /etc/docker/daemon.tmp && mv /etc/docker/daemon.tmp /etc/docker/daemon.json 78 | fi", 79 | 80 | "sudo service docker restart", 81 | ] 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/README_vultr.md: -------------------------------------------------------------------------------- 1 | # Deploying to Vultr 2 | 3 | 4 | Files related to Vultr deployment are in `devops/vultr_scripts/` and `devops/vultr_tf`. 5 | 6 | To use Terraform, you need: 7 | - create API key which you can find in Vultr -> Account -> API: 8 | - allow your IP in Access Control section at the same page as above 9 | 10 | - To use ssh keys in Terraform, you need to create them in Vultr -> Account -> SSH Keys: 11 | 12 | 13 | ## Required software 14 | 15 | 16 | *Terraform* You will also need terraform version 1.0.x. It is recommended to use `tfenv` to install terraform with correct version. 17 | You can download an install it from 18 | 19 | *direnv* To avoid mistakes when switching environments (or regions), it is recommended to use `direnv` tools, which supports loading environment variables from .envrc file, placed in directory. 20 | You can read about it here: 21 | 22 | 23 | (recommended) *Vultr CLI* via to interact with Vultr instances post-deployment, eg. get their IP addressed, instances ID, update Cloud Init data. 24 | 25 | ## Configure your environment 26 | 27 | 28 | To deploy via Terraform, you have to fill all variables for Cloud Init in `vultr-cloud-init.tftpl`. 29 | These variables can be sourced from various sources, recommended approach is to use environment variables in combination with `dotenv` 30 | 31 | To use Vultr CLI, you have to have API key, ideally in environment variable again. 32 | 33 | ## Configuring infra 34 | 35 | You only need to do this if you change anything in `devops/vultr_tf` directory. 36 | 37 | TODO - currently TF Vultr is not configured to use S3 buckets. 38 | 39 | 40 | ``` 41 | terraform init 42 | terraform apply 43 | ``` 44 | 45 | ## Adding secrets to the projects 46 | 47 | Project uses `.env` file in same directory as `docker-compose.yml` is, so any secrets should be sourced via this file. 48 | 49 | Do not commit secrets into the repository, this `.env` file can be updated via Cloud init executed when a new machines is spawned or reinstalled. The Cloud Init is located in Terraform directory: `vultr-cloud-init.tftpl`. 50 | 51 | After spawning the machines, Cloud Init can be updated via Vultr CLI, see `devops/vultr_scripts/vultr-update-cloudinit.py`. Updating Cloud Data in Terraform would mean destroying & recreating all instances from scratch. 52 | 53 | 54 | ## Deploying apps 55 | 56 | Deployment is executed via `post-receive` hook in git repository on each instance. See `devops/vultr_scripts/vultr-deploy.py` 57 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/files/cloud-init.yml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | groups: 3 | - docker 4 | 5 | system_info: 6 | default_user: 7 | groups: [docker] 8 | 9 | write_files: 10 | - path: /home/ubuntu/installer.sh 11 | permissions: '0755' 12 | content: | 13 | apt-get clean && apt-get update && apt-get install -y ca-certificates curl gnupg lsb-release unzip jq rng-tools 14 | 15 | curl https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip -o awscliv2.zip 16 | unzip awscliv2.zip 17 | ./aws/install 18 | rm -rf ./aws ./awscliv2.zip 19 | 20 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg 21 | echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 22 | apt-get update 23 | apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin 24 | gpasswd -a ubuntu docker 25 | mkdir -p /etc/docker/ 26 | 27 | if [ ! -f /etc/docker/daemon.json ]; then 28 | echo '{ "registry-mirrors": ["https://mirror.gcr.io"] }' > /etc/docker/daemon.json 29 | else 30 | jq '.["registry-mirrors"] += ["https://mirror.gcr.io"]' /etc/docker/daemon.json > /etc/docker/daemon.tmp && mv /etc/docker/daemon.tmp /etc/docker/daemon.json 31 | fi 32 | 33 | service docker restart 34 | 35 | - path: /home/ubuntu/cloud-init.sh 36 | permissions: '0755' 37 | content: | 38 | #!/bin/bash 39 | 40 | export APP_NAME=${name} 41 | export APP_ENV=${env} 42 | 43 | aws ssm describe-parameters | jq -r '.Parameters[].Name' | grep "\/application\/$APP_NAME\/$APP_ENV" | sed "s/\/application.*$APP_ENV\///" | while read -r FILE; do 44 | mkdir -p $(dirname "$FILE"); 45 | aws ssm get-parameter --name "/application/$APP_NAME/$APP_ENV/$FILE" --output text --with-decrypt --query 'Parameter.Value' | sed "s/###//g" > "$FILE"; 46 | done 47 | 48 | source .envrc 49 | 50 | export INSTANCE_ID_SUBST=`wget http://169.254.169.254/latest/meta-data/instance-id -O- --timeout=5 --tries=1` 51 | [ -z "$INSTANCE_ID_SUBST" ] && export INSTANCE_ID_SUBST='{% raw %}{{.FullID}}{% endraw %}' 52 | echo "INSTANCE_ID_SUBST=$INSTANCE_ID_SUBST" >> .env 53 | 54 | [ -f secret.env ] && cat secret.env >> .env 55 | 56 | 57 | aws ecr get-login-password --region ${region} | docker login --username AWS --password-stdin "$AWS_ECR_BASE_URL" 58 | docker compose up -d 59 | 60 | runcmd: 61 | - chown -R ubuntu:ubuntu /home/ubuntu 62 | - cd /home/ubuntu/ 63 | {% if cookiecutter.aws_use_packer == 'n' %} 64 | - "[ -f ./installer.sh ] && ./installer.sh" 65 | {% endif %} 66 | - sudo -u ubuntu ./cloud-init.sh 67 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/app/src/{{cookiecutter.django_project_name}}/{% if cookiecutter.use_celery == 'y' %}celery.py{% endif %}: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | from celery import Celery 5 | {% if cookiecutter.monitoring == "y" %} 6 | from celery.signals import setup_logging, worker_process_shutdown 7 | {% endif %} 8 | from django.conf import settings 9 | from django_structlog.celery.steps import DjangoStructLogInitStep 10 | from more_itertools import chunked 11 | {% if cookiecutter.monitoring == "y" %} 12 | from prometheus_client import Gauge, multiprocess 13 | {% endif %} 14 | 15 | from .settings import configure_structlog 16 | 17 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "{{cookiecutter.django_project_name}}.settings") 18 | 19 | app = Celery("{{cookiecutter.django_project_name}}") 20 | app.config_from_object("django.conf:settings", namespace="CELERY") 21 | app.steps["worker"].add(DjangoStructLogInitStep) 22 | app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) 23 | 24 | {% if cookiecutter.monitoring == "y" %} 25 | num_tasks_in_queue = Gauge( 26 | "celery_queue_len", 27 | "How many tasks are there in a queue", 28 | labelnames=("queue",), 29 | ) 30 | {% endif %} 31 | 32 | 33 | @setup_logging.connect 34 | def receiver_setup_logging(loglevel, logfile, format, colorize, **kwargs): # pragma: no cover 35 | config = settings.LOGGING 36 | # worker and master have a logfile, beat does not 37 | if logfile: 38 | config["handlers"]["console"]["class"] = "logging.FileHandler" 39 | config["handlers"]["console"]["filename"] = logfile 40 | logging.config.dictConfig(config) 41 | configure_structlog() 42 | 43 | 44 | def get_tasks_in_queue(queue_name: str) -> list[bytes]: 45 | with app.pool.acquire(block=True) as conn: 46 | return conn.default_channel.client.lrange(queue_name, 0, -1) 47 | 48 | 49 | def get_num_tasks_in_queue(queue_name: str) -> int: 50 | with app.pool.acquire(block=True) as conn: 51 | return conn.default_channel.client.llen(queue_name) 52 | 53 | 54 | def move_tasks(source_queue: str, destination_queue: str, chunk_size: int = 100) -> None: 55 | with app.pool.acquire(block=True) as conn: 56 | client = conn.default_channel.client 57 | tasks = client.lrange(source_queue, 0, -1) 58 | 59 | for chunk in chunked(tasks, chunk_size): 60 | with client.pipeline() as pipe: 61 | for task in chunk: 62 | client.rpush(destination_queue, task) 63 | client.lrem(source_queue, 1, task) 64 | pipe.execute() 65 | 66 | 67 | def flush_tasks(queue_name: str) -> None: 68 | with app.pool.acquire(block=True) as conn: 69 | conn.default_channel.client.delete(queue_name) 70 | 71 | 72 | {% if cookiecutter.monitoring == "y" %} 73 | @worker_process_shutdown.connect 74 | def child_exit(pid, **kw): 75 | multiprocess.mark_process_dead(pid) 76 | {% endif %} 77 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/.github/workflows/cruft-updates.yml: -------------------------------------------------------------------------------- 1 | name: Update repository with Cruft 2 | permissions: 3 | contents: write 4 | pull-requests: write 5 | actions: write 6 | on: 7 | schedule: 8 | - cron: "0 2 * * 1" # Every Monday at 2am 9 | workflow_dispatch: 10 | 11 | jobs: 12 | update: 13 | runs-on: ubuntu-latest 14 | env: 15 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} 16 | SLACK_CHANNEL_ID: ${{ secrets.SLACK_CHANNEL_ID }} 17 | steps: 18 | - uses: actions/checkout@v4 19 | 20 | - uses: actions/setup-python@v5 21 | with: 22 | python-version: "3.11" 23 | 24 | - name: Install Cruft 25 | run: pip3 install cruft~=2.16.0 ruff~=0.8.4 26 | 27 | - name: Check if update is available 28 | continue-on-error: false 29 | id: check 30 | run: | 31 | CHANGES=0 32 | if [ -f .cruft.json ]; then 33 | if ! cruft check; then 34 | CHANGES=1 35 | fi 36 | else 37 | echo "No .cruft.json file" 38 | fi 39 | 40 | echo "has_changes=$CHANGES" >> "$GITHUB_OUTPUT" 41 | 42 | - name: Run update if available 43 | if: steps.check.outputs.has_changes == '1' 44 | run: | 45 | git config --global user.email "updater@reef.pl" 46 | git config --global user.name "Cruft Updater" 47 | 48 | cruft update --skip-apply-ask --refresh-private-variables 49 | git restore --staged . 50 | 51 | - name: Check for .rej files 52 | if: steps.check.outputs.has_changes == '1' 53 | id: check_rej 54 | run: | 55 | REJ_FILES=$(find . -name "*.rej") 56 | if [ -n "$REJ_FILES" ]; then 57 | echo "has_rej_files=1" >> "$GITHUB_OUTPUT" 58 | else 59 | echo "has_rej_files=0" >> "$GITHUB_OUTPUT" 60 | fi 61 | 62 | - name: Create pull request 63 | if: steps.check.outputs.has_changes == '1' 64 | id: create_pr 65 | uses: peter-evans/create-pull-request@v7 66 | with: 67 | token: ${{ secrets.GITHUB_TOKEN }} 68 | add-paths: . 69 | commit-message: "Apply new Cruft update" 70 | branch: cruft/update 71 | delete-branch: true 72 | title: Cruft cookiecutter update 73 | body: | 74 | This is an autogenerated PR. 75 | 76 | ${{ steps.check_rej.outputs.has_rej_files == '1' && 'IMPORTANT: One or more `.rej` files have been detected, which means that some changes could not be applied automatically. Please RESOLVE them manually' || 'No conflicts detected' }} 77 | 78 | - name: Post to a Slack channel 79 | if: steps.create_pr.outputs.pull-request-number && env.SLACK_BOT_TOKEN 80 | uses: slackapi/slack-github-action@v2.0.0 81 | with: 82 | method: chat.postMessage 83 | token: ${{ env.SLACK_BOT_TOKEN }} 84 | payload: | 85 | channel: ${{ env.SLACK_CHANNEL_ID }} 86 | text: | 87 | cruft updates for `${{ github.repository }}`: 88 | ${{ steps.create_pr.outputs.pull-request-url }} 89 | ${{ steps.check_rej.outputs.has_rej_files == '1' && 'IMPORTANT: `.rej` files detected, conflicts need to be resolved' || 'Can be merged safely' }} 90 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "{{cookiecutter.repostory_name}}" 3 | requires-python = "==3.11.*" 4 | version = "0" 5 | dependencies = [ 6 | "Django~=4.2.4", 7 | {% if cookiecutter.csp_enabled == "y" %} 8 | "django-csp==3.7", 9 | {% endif %} 10 | "django-constance[database]==3.1.0", 11 | "django-cors-headers~=4.2.0", 12 | "django-environ~=0.11.2", 13 | "django-extensions==3.2.3", 14 | "django-probes==1.7.0", 15 | "django-debug-toolbar==4.1.0", 16 | "django-structlog{% if cookiecutter.use_celery == "y" %}[celery]{% endif %}==8.0.0", 17 | {% if cookiecutter.use_rest_framework == 'y' %} 18 | "djangorestframework~=3.16.0", 19 | "drf-spectacular[sidecar]~=0.28.0", 20 | {% endif %} 21 | {% if cookiecutter.use_celery == "y" %} 22 | "celery~=5.3.1", 23 | {% if cookiecutter.use_flower == 'y' %} 24 | "flower~=2.0.0", 25 | {% endif %} 26 | {% endif %} 27 | "gunicorn==20.1.0", 28 | "psycopg[binary]~=3.1.19", 29 | "redis~=4.6.0", 30 | "sentry-sdk==1.3.0", 31 | "ipython~=8.14.0", 32 | "nox==2024.10.9", 33 | "more-itertools~=10.3.0", 34 | {% if cookiecutter.monitoring == "y" %} 35 | "psutil>=5.9.8", 36 | "prometheus-client~=0.17.0", 37 | "django-prometheus==2.3.1", 38 | "django-business-metrics>=1.0.1,<2", 39 | {% endif %} 40 | {% if cookiecutter.use_fingerprinting == "y" %} 41 | "django-fingerprint-rt~=0.1.0", 42 | {% endif %} 43 | {% if cookiecutter.use_channels == "y" %} 44 | "channels[daphne]~=4.0", 45 | "channels-redis~=4.2.0", 46 | "uvicorn[standard]==0.29", 47 | "pydantic~=2.0", 48 | {% endif %} 49 | {% if cookiecutter.use_allauth == "y" %} 50 | "django-allauth[socialaccount]~=0.63.1", 51 | {% endif %} 52 | ] 53 | 54 | [build-system] 55 | requires = ["pdm-backend"] 56 | build-backend = "pdm.backend" 57 | 58 | [tool.pdm] 59 | distribution = false 60 | 61 | [dependency-groups] 62 | test = [ 63 | 'pytest', 64 | 'pytest-django', 65 | 'pytest-xdist', 66 | {% if cookiecutter.use_channels == "y" %} 67 | 'pytest-asyncio', 68 | {% endif %} 69 | 'ipdb', 70 | 'freezegun', 71 | ] 72 | lint = [ 73 | "ruff", 74 | {% if cookiecutter.ci_use_spellchecker == "y" %} 75 | "codespell[toml]", 76 | {% endif %} 77 | ] 78 | type_check = [ 79 | "django-stubs[compatible-mypy]", 80 | "djangorestframework-stubs[compatible-mypy]", 81 | "mypy", 82 | "types-freezegun", 83 | "types-python-dateutil", 84 | "types-requests", 85 | ] 86 | 87 | [tool.ruff] 88 | src = ["app/src"] 89 | line-length = 120 90 | 91 | [tool.ruff.lint] 92 | # TODO add D 93 | select = [ 94 | "E", "F", "I", "UP", "S", 95 | "TC005", 96 | ] 97 | # TODO: remove E501 once docstrings are formatted 98 | ignore = [ 99 | "D100", "D105", "D107", "D200", "D202", "D203", "D205", "D212", "D400", "D401", "D415", 100 | "D101", "D102","D103", "D104", # TODO remove once we have docstring for all public methods 101 | "E501", # TODO: remove E501 once docstrings are formatted 102 | "S101", "S108", "S603", "S607", 103 | ] 104 | 105 | [tool.ruff.lint.per-file-ignores] 106 | "__init__.py" = ["F401"] 107 | "**/test*/**.py" = ["D", "F403", "F405", "S106", "S311"] 108 | 109 | [tool.codespell] 110 | skip = '*.min.js,*.lock,*/monitoring_certs/*' 111 | ignore-words-list = 'datas' 112 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/envs/dev/.env.template: -------------------------------------------------------------------------------- 1 | ENV=backend-dev 2 | DEBUG=on 3 | DEBUG_TOOLBAR=on 4 | SECRET_KEY=12345 5 | 6 | POSTGRES_DB={{cookiecutter.django_project_name}} 7 | POSTGRES_HOST=localhost 8 | POSTGRES_PORT=8432 9 | POSTGRES_USER={{cookiecutter.postgres_user}} 10 | POSTGRES_PASSWORD=12345 11 | DATABASE_POOL_URL= 12 | # using transaction-based db connection pool as DATABASE_URL instead of DATABASE_POOL_URL will break production 13 | DATABASE_URL=postgres://{{cookiecutter.postgres_user}}:12345@localhost:8432/{{cookiecutter.django_project_name}} 14 | 15 | NGINX_HOST=localhost 16 | 17 | CORS_ENABLED=on 18 | CORS_ALLOWED_ORIGINS= 19 | CORS_ALLOWED_ORIGIN_REGEXES= 20 | CORS_ALLOW_ALL_ORIGINS=0 21 | 22 | REDIS_HOST=localhost 23 | REDIS_PORT=8379 24 | 25 | {% if cookiecutter.use_celery == "y" %} 26 | CELERY_BROKER_URL=redis://localhost:8379/0 27 | CELERY_TASK_ALWAYS_EAGER=1 28 | CELERY_MASTER_CONCURRENCY=1 29 | CELERY_WORKER_CONCURRENCY=1 30 | {% if cookiecutter.use_flower == "y" %} 31 | CELERY_FLOWER_USER=flower 32 | CELERY_FLOWER_PASSWORD=12345 33 | {% endif %} 34 | {% endif %} 35 | 36 | {% if cookiecutter.use_channels == "y" %} 37 | CHANNELS_BACKEND_URL=redis://localhost:8379/1 38 | {% endif %} 39 | 40 | EMAIL_BACKEND=django.core.mail.backends.filebased.EmailBackend 41 | EMAIL_FILE_PATH=/tmp/email 42 | EMAIL_HOST=smtp.sendgrid.net 43 | EMAIL_PORT=587 44 | EMAIL_USE_TLS=1 45 | EMAIL_HOST_USER=apikey 46 | EMAIL_HOST_PASSWORD= 47 | DEFAULT_FROM_EMAIL= 48 | 49 | SENTRY_DSN={{cookiecutter.sentry_dsn}} 50 | 51 | CSP_ENABLED={{cookiecutter.csp_enabled}} 52 | CSP_REPORT_ONLY={{cookiecutter.csp_report_only}} 53 | CSP_REPORT_URL="{{cookiecutter.csp_report_url}}" 54 | CSP_DEFAULT_SRC="{{cookiecutter.csp_default_src}}" 55 | CSP_SCRIPT_SRC="{{cookiecutter.csp_script_src}}" 56 | CSP_STYLE_SRC="{{cookiecutter.csp_style_src}}" 57 | CSP_FONT_SRC="{{cookiecutter.csp_font_src}}" 58 | CSP_IMG_SRC="{{cookiecutter.csp_img_src}}" 59 | CSP_MEDIA_SRC="{{cookiecutter.csp_media_src}}" 60 | CSP_OBJECT_SRC="{{cookiecutter.csp_object_src}}" 61 | CSP_FRAME_SRC="{{cookiecutter.csp_frame_src}}" 62 | CSP_CONNECT_SRC="{{cookiecutter.csp_connect_src}}" 63 | CSP_CHILD_SRC="{{cookiecutter.csp_child_src}}" 64 | CSP_MANIFEST_SRC="{{cookiecutter.csp_manifest_src}}" 65 | CSP_WORKER_SRC="{{cookiecutter.csp_worker_src}}" 66 | CSP_BLOCK_ALL_MIXED_CONTENT={{cookiecutter.csp_block_all_mixed_content}} 67 | CSP_EXCLUDE_URL_PREFIXES={{cookiecutter.csp_exclude_url_prefixes}} 68 | 69 | BACKUP_LOCAL_ROTATE_KEEP_LAST= 70 | BACKUP_B2_BUCKET= 71 | BACKUP_B2_FOLDER= 72 | BACKUP_B2_APPLICATION_KEY_ID= 73 | BACKUP_B2_APPLICATION_KEY= 74 | 75 | {% if cookiecutter.use_allauth == "y" %} 76 | {% if 'apple' in cookiecutter.allauth_providers %} 77 | APPLE_LOGIN_CLIENT_ID= 78 | APPLE_LOGIN_SECRET= 79 | APPLE_LOGIN_KEY= 80 | APPLE_LOGIN_CERTIFICATE_PRIVATE_KEY= 81 | {% endif %} 82 | {% if 'discord' in cookiecutter.allauth_providers %} 83 | DISCORD_LOGIN_CLIENT_ID= 84 | DISCORD_LOGIN_SECRET= 85 | {% endif %} 86 | {% if 'facebook' in cookiecutter.allauth_providers %} 87 | FACEBOOK_LOGIN_CLIENT_ID= 88 | FACEBOOK_LOGIN_SECRET= 89 | {% endif %} 90 | {% if 'github' in cookiecutter.allauth_providers %} 91 | GITHUB_LOGIN_CLIENT_ID= 92 | GITHUB_LOGIN_SECRET= 93 | {% endif %} 94 | {% if 'gitlab' in cookiecutter.allauth_providers %} 95 | GITLAB_LOGIN_CLIENT_ID= 96 | GITLAB_LOGIN_SECRET= 97 | {% endif %} 98 | {% if 'google' in cookiecutter.allauth_providers %} 99 | GOOGLE_LOGIN_CLIENT_ID= 100 | GOOGLE_LOGIN_SECRET= 101 | {% endif %} 102 | {% if 'microsoft' in cookiecutter.allauth_providers %} 103 | MICROSOFT_LOGIN_CLIENT_ID= 104 | MICROSOFT_LOGIN_SECRET= 105 | {% endif %} 106 | {% if 'openid_connect' in cookiecutter.allauth_providers %} 107 | OPENID_CONNECT_NICE_NAME= 108 | OPENID_CONNECT_LOGIN_CLIENT_ID= 109 | OPENID_CONNECT_LOGIN_SECRET= 110 | OPENID_CONNECT_SERVER_URL= 111 | {% endif %} 112 | {% if 'twitter' in cookiecutter.allauth_providers %} 113 | TWITTER_LOGIN_CLIENT_ID= 114 | TWITTER_LOGIN_SECRET= 115 | {% endif %} 116 | {% if 'atlassian' in cookiecutter.allauth_providers %} 117 | ATLASSIAN_LOGIN_CLIENT_ID= 118 | ATLASSIAN_LOGIN_SECRET= 119 | {% endif %} 120 | {% endif %} -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/backups/bin/emailhelper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) 2018, Reef Technologies, BSD 3-Clause License 4 | 5 | import argparse 6 | import os 7 | import smtplib 8 | import sys 9 | from collections import namedtuple 10 | from email import encoders 11 | from email.mime.base import MIMEBase 12 | from email.mime.multipart import MIMEMultipart 13 | from email.mime.text import MIMEText 14 | from urllib.parse import urlsplit 15 | 16 | 17 | class GmailSender(namedtuple("SmtpAuthData", "server port user password")): 18 | def send(self, addr_from, addr_to, subject, message, files=tuple()): 19 | msg = MIMEMultipart("alternative") 20 | msg["To"] = addr_to 21 | msg["From"] = addr_from 22 | msg["Subject"] = subject 23 | 24 | text = "view the html version." 25 | msg.attach(MIMEText(text, "plain")) 26 | msg.attach(MIMEText(message, "html")) 27 | 28 | for file in files: 29 | part = MIMEBase("application", "octet-stream") 30 | with open(file, "rb") as stream: 31 | part.set_payload(stream.read()) 32 | encoders.encode_base64(part) 33 | part.add_header( 34 | "Content-Disposition", 35 | f'attachment; filename="{os.path.basename(file)}"', 36 | ) 37 | msg.attach(part) 38 | 39 | s = smtplib.SMTP(self.server, self.port) 40 | s.ehlo() 41 | s.starttls() 42 | if self.password: 43 | s.login(self.user, self.password) 44 | s.sendmail(addr_from, addr_to, msg.as_string()) 45 | s.quit() 46 | 47 | 48 | def parse_arguments(): 49 | parser = argparse.ArgumentParser() 50 | 51 | parser.add_argument( 52 | "-t", 53 | "--to", 54 | required=True, 55 | action="store", 56 | dest="to_email", 57 | help="Destination address", 58 | ) 59 | 60 | parser.add_argument( 61 | "--from", 62 | required=False, 63 | default="", 64 | dest="from_email", 65 | help="Sender address", 66 | ) 67 | 68 | parser.add_argument( 69 | "-f", 70 | "--files", 71 | action="store", 72 | nargs="*", 73 | dest="files", 74 | help="Files to be send as attachments", 75 | ) 76 | 77 | parser.add_argument( 78 | "-s", 79 | "--subject", 80 | action="store", 81 | dest="subject", 82 | help="Subject of Email", 83 | ) 84 | 85 | result = parser.parse_args() 86 | return result 87 | 88 | 89 | if __name__ == "__main__": 90 | parser_result = parse_arguments() 91 | email_creds = os.environ.get("EMAIL_CREDS") 92 | if not email_creds: 93 | sys.stderr.write("no EMAIL_CREDS environment variable!\nexport EMAIL_CREDS=user:password@server:port") 94 | sys.exit(2) 95 | 96 | try: 97 | email_creds = urlsplit(f"//{email_creds}") 98 | if not all([email_creds.username, email_creds.hostname, email_creds.port]): 99 | raise ValueError 100 | except ValueError: 101 | sys.stderr.write( 102 | "EMAIL_CREDS environment variable has wrong format!\nexport EMAIL_CREDS=user:password@server:port" 103 | ) 104 | sys.exit(2) 105 | 106 | addr_to = parser_result.to_email 107 | files = parser_result.files or [] 108 | if parser_result.from_email: 109 | addr_form = parser_result.from_email 110 | if "@" in email_creds.username: 111 | addr_from = email_creds.username 112 | else: 113 | addr_from = f"{email_creds.username}@{email_creds.hostname}" 114 | 115 | print("Enter/Paste the message for email. Ctrl-%s to save it." % (os.name == "nt" and "Z" or "D")) 116 | message_lines = [] 117 | while True: 118 | try: 119 | line = input() 120 | except EOFError: 121 | break 122 | message_lines.append(line) 123 | 124 | subject = parser_result.subject 125 | message = "\n".join(message_lines) 126 | 127 | sender = GmailSender(email_creds.hostname, email_creds.port, email_creds.username, email_creds.password) 128 | print("Sending email...") 129 | sender.send(addr_from, addr_to, subject, message, files=files) 130 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/envs/prod/.env.template: -------------------------------------------------------------------------------- 1 | ENV=backend-prod 2 | DEBUG=off 3 | DEBUG_TOOLBAR=off 4 | SECRET_KEY= 5 | 6 | POSTGRES_DB={{cookiecutter.django_project_name}} 7 | POSTGRES_HOST=db 8 | POSTGRES_PORT=5432 9 | POSTGRES_USER={{cookiecutter.postgres_user}} 10 | POSTGRES_PASSWORD= 11 | DATABASE_POOL_URL= 12 | # using transaction-based db connection pool as DATABASE_URL instead of DATABASE_POOL_URL will break production 13 | DATABASE_URL=postgres://{{cookiecutter.postgres_user}}:@db:5432/{{cookiecutter.django_project_name}} 14 | 15 | NGINX_HOST= 16 | 17 | CORS_ENABLED=on 18 | CORS_ALLOWED_ORIGINS= 19 | CORS_ALLOWED_ORIGIN_REGEXES= 20 | CORS_ALLOW_ALL_ORIGINS=0 21 | 22 | REDIS_HOST=redis 23 | REDIS_PORT=6379 24 | 25 | {% if cookiecutter.use_celery == "y" %} 26 | CELERY_BROKER_URL=redis://redis:6379/0 27 | CELERY_TASK_ALWAYS_EAGER=0 28 | CELERY_MASTER_CONCURRENCY=2 29 | CELERY_WORKER_CONCURRENCY=2 30 | {% if cookiecutter.use_flower == "y" %} 31 | CELERY_FLOWER_USER=flower 32 | CELERY_FLOWER_PASSWORD= 33 | {% endif %} 34 | {% endif %} 35 | 36 | {% if cookiecutter.use_channels == "y" %} 37 | CHANNELS_BACKEND_URL=redis://redis:6379/1 38 | {% endif %} 39 | 40 | {% if cookiecutter.log_aggregating == 'y' %} 41 | LOKI_URL={{cookiecutter.log_aggregator_url}} 42 | LOKI_REFRESH_INTERVAL=5s 43 | LOKI_USER= 44 | LOKI_PASSWORD= 45 | LOKI_CLIENT= 46 | LOKI_CLIENT_SERVER_GROUP= 47 | {% endif %} 48 | 49 | EMAIL_BACKEND=django.core.mail.backends.smtp.EmailBackend 50 | EMAIL_FILE_PATH=/tmp/email 51 | EMAIL_HOST=smtp.sendgrid.net 52 | EMAIL_PORT=587 53 | EMAIL_USE_TLS=1 54 | EMAIL_HOST_USER=apikey 55 | EMAIL_HOST_PASSWORD= 56 | DEFAULT_FROM_EMAIL= 57 | 58 | SENTRY_DSN={{cookiecutter.sentry_dsn}} 59 | 60 | CSP_ENABLED={{cookiecutter.csp_enabled}} 61 | CSP_REPORT_ONLY={{cookiecutter.csp_report_only}} 62 | CSP_REPORT_URL="{{cookiecutter.csp_report_url}}" 63 | CSP_DEFAULT_SRC="{{cookiecutter.csp_default_src}}" 64 | CSP_SCRIPT_SRC="{{cookiecutter.csp_script_src}}" 65 | CSP_STYLE_SRC="{{cookiecutter.csp_style_src}}" 66 | CSP_FONT_SRC="{{cookiecutter.csp_font_src}}" 67 | CSP_IMG_SRC="{{cookiecutter.csp_img_src}}" 68 | CSP_MEDIA_SRC="{{cookiecutter.csp_media_src}}" 69 | CSP_OBJECT_SRC="{{cookiecutter.csp_object_src}}" 70 | CSP_FRAME_SRC="{{cookiecutter.csp_frame_src}}" 71 | CSP_CONNECT_SRC="{{cookiecutter.csp_connect_src}}" 72 | CSP_CHILD_SRC="{{cookiecutter.csp_child_src}}" 73 | CSP_MANIFEST_SRC="{{cookiecutter.csp_manifest_src}}" 74 | CSP_WORKER_SRC="{{cookiecutter.csp_worker_src}}" 75 | CSP_BLOCK_ALL_MIXED_CONTENT={{cookiecutter.csp_block_all_mixed_content}} 76 | CSP_EXCLUDE_URL_PREFIXES={{cookiecutter.csp_exclude_url_prefixes}} 77 | 78 | BACKUP_LOCAL_ROTATE_KEEP_LAST= 79 | BACKUP_B2_BUCKET= 80 | BACKUP_B2_FOLDER= 81 | BACKUP_B2_APPLICATION_KEY_ID= 82 | BACKUP_B2_APPLICATION_KEY= 83 | 84 | {% if cookiecutter.use_allauth == "y" %} 85 | {% if 'apple' in cookiecutter.allauth_providers %} 86 | APPLE_LOGIN_CLIENT_ID= 87 | APPLE_LOGIN_SECRET= 88 | APPLE_LOGIN_KEY= 89 | APPLE_LOGIN_CERTIFICATE_PRIVATE_KEY= 90 | {% endif %} 91 | {% if 'discord' in cookiecutter.allauth_providers %} 92 | DISCORD_LOGIN_CLIENT_ID= 93 | DISCORD_LOGIN_SECRET= 94 | {% endif %} 95 | {% if 'facebook' in cookiecutter.allauth_providers %} 96 | FACEBOOK_LOGIN_CLIENT_ID= 97 | FACEBOOK_LOGIN_SECRET= 98 | {% endif %} 99 | {% if 'github' in cookiecutter.allauth_providers %} 100 | GITHUB_LOGIN_CLIENT_ID= 101 | GITHUB_LOGIN_SECRET= 102 | {% endif %} 103 | {% if 'gitlab' in cookiecutter.allauth_providers %} 104 | GITLAB_LOGIN_CLIENT_ID= 105 | GITLAB_LOGIN_SECRET= 106 | {% endif %} 107 | {% if 'google' in cookiecutter.allauth_providers %} 108 | GOOGLE_LOGIN_CLIENT_ID= 109 | GOOGLE_LOGIN_SECRET= 110 | {% endif %} 111 | {% if 'microsoft' in cookiecutter.allauth_providers %} 112 | MICROSOFT_LOGIN_CLIENT_ID= 113 | MICROSOFT_LOGIN_SECRET= 114 | {% endif %} 115 | {% if 'openid_connect' in cookiecutter.allauth_providers %} 116 | OPENID_CONNECT_NICE_NAME= 117 | OPENID_CONNECT_LOGIN_CLIENT_ID= 118 | OPENID_CONNECT_LOGIN_SECRET= 119 | OPENID_CONNECT_SERVER_URL= 120 | {% endif %} 121 | {% if 'twitter' in cookiecutter.allauth_providers %} 122 | TWITTER_LOGIN_CLIENT_ID= 123 | TWITTER_LOGIN_SECRET= 124 | {% endif %} 125 | {% if 'atlassian' in cookiecutter.allauth_providers %} 126 | ATLASSIAN_LOGIN_CLIENT_ID= 127 | ATLASSIAN_LOGIN_SECRET= 128 | {% endif %} 129 | {% endif %} -------------------------------------------------------------------------------- /features.md: -------------------------------------------------------------------------------- 1 | # RT cookiecutter template selling points 2 | 3 | ## Main features 4 | 5 | - Cookiecutter template allowing long term support using [Cruft](https://github.com/cruft/cruft) update mechanism 6 | - [Docker](https://www.docker.com) and [docker compose](https://docs.docker.com/compose/) for easy & simple (c) development and deployment 7 | - Latest [python](https://www.python.org) from 3.11 line 8 | - Latest [Django](https://www.djangoproject.com) LTS release 9 | - [Gunicorn](https://gunicorn.org) for running WSGI instances on prod 10 | - [Uvicorn](https://www.uvicorn.org) for ASGI instances on prod 11 | - [Nginx](https://www.nginx.com) as high-performance reverse proxy with automatic SSL certificate renewal 12 | - [Postgres](https://www.postgresql.org) with [psycopg3](https://www.psycopg.org/psycopg3) for database 13 | - Task management via [celery](https://docs.celeryproject.org) with scheduled tasks support (using celery-beat) 14 | - Multiple workers & queues supported (if you need to divide / prioritize tasks and apply different levels of concurrency) 15 | 16 | ## Self-hosted configuration 17 | 18 | - Persistent [redis](https://redis.io) for task management backend 19 | - Celery task monitoring via [flower](https://flower.readthedocs.io/en/latest/) 20 | 21 | ## Cloud configuration options 22 | 23 | - AWS support: 24 | terraform to deploy RDS, SQS, ELB etc 25 | - Vultr: terraform for deploying application on cheap VPS servers 26 | - Support for transaction-based database connection pooling 27 | 28 | ## Configuration 29 | 30 | - [.env files](https://12factor.net/config) for configuration; preconfigured `.env` for both local and prod environments 31 | - [django-debug-toolbar](https://django-debug-toolbar.readthedocs.io/en/latest/) (enabled for local environment) for debugging app performance 32 | - [ipython](https://ipython.org) included for nice interactive django shell 33 | - [django-extensions](https://django-extensions.readthedocs.io/en/latest/) for drawing graph of models and more 34 | 35 | ## Continuous integration 36 | 37 | - Github Actions for CI/CD 38 | - [nox](https://nox.thea.codes) for workflow automation 39 | - [ruff](https://github.com/astral-sh/ruff) for linting & auto fixing python code 40 | - [shellcheck](https://www.shellcheck.net) for linting & auto fixing shell scripts 41 | - [pytest](https://docs.pytest.org) with xdist for efficient parallel testing 42 | 43 | ## Deployment 44 | 45 | - [2-stage docker image build](https://docs.docker.com/develop/develop-images/multistage-build/) for clean app image (both debian-based and alpine-based base images are supported) 46 | - Easy deployment based on `git push production master` 47 | - Redeployment doesn't stop `db`, `redis` and `nginx` containers -> non-interrupted deployments 48 | - Migrations are done during deployment, before application startup -> application won't be run on unmigrated database 49 | 50 | ## Security & performance 51 | 52 | - [CORS headers](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) preconfigured 53 | - [CSP](https://en.wikipedia.org/wiki/Content_Security_Policy) integrated 54 | - [BREACH attack](https://en.wikipedia.org/wiki/BREACH) mitigation 55 | - Brotli compression support 56 | - 0-RTT TLS 1.3 Early Data support 57 | - [http/2](https://en.wikipedia.org/wiki/HTTP/2) support 58 | - [TLS 1.2&1.3](https://en.wikipedia.org/wiki/Transport_Layer_Security) via [letsencrypt](https://letsencrypt.org) with auto-renewal 59 | - Forward secrecy ciphers 60 | - Overall ssllabs security class: 61 | A+ 100/100/90/90 (to keep compatibility with some older clients) 62 | - Optional fingerprinting of users on backend and frontend sides 63 | 64 | ## Reliability 65 | 66 | - Cost-efficient & secure automatic database backups 67 | - [B2](https://www.backblaze.com/b2/cloud-storage.html) cloud storage using a `writeFiles`-only key with and option to store them locally or send them over email 68 | - zstd compression for efficient storage & excellent speed for both backup and restore 69 | - Scripted and repeatable procedure for restoring the system from a backup 70 | - [Sentry](https://sentry.io) error tracking preconfigured 71 | - Grafana for metrics and log aggregation (Grafana Loki) 72 | - Prometheus for data collection 73 | - Grafana Loki for log aggregation with Alloy for log shipping 74 | - Generic host dashboard section optimized for both VM and physical machines 75 | - [Integration of framework allowing easy addition of new application-level metrics](https://github.com/reef-technologies/django-business-metrics) 76 | - nginx-level dashboard section for http/ws statistics 77 | - Active monitoring dashboard (http ping) 78 | - Alert history dashboard 79 | - Alertmanager for detecting issues and alerting 80 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/bin/prepare-os.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Copyright 2020, Reef Technologies (reef.pl), All rights reserved. 3 | set -eux 4 | 5 | sudo ufw allow proto tcp from 172.0.0.0/8 to any port 9100 # nginx getting node-exporter metrics 6 | 7 | DOCKER_BIN="$(command -v docker || true)" 8 | DOCKER_COMPOSE_INSTALLED="$(docker compose version || true)" 9 | SENTRY_CLI="$(command -v sentry-cli || true)" 10 | B2_CLI="$(command -v b2 || true)" 11 | AWS_CLI="$(command -v aws || true)" 12 | JQ_BIN="$(command -v jq || true)" 13 | USER="$(id -un 1000)" 14 | 15 | if [ -x "${DOCKER_BIN}" ] && [ -n "${DOCKER_COMPOSE_INSTALLED}" ] && [ -x "${SENTRY_CLI}" ] && [ -x "${B2_CLI}" ] && [ -x "${AWS_CLI}" ] && [ -x "${JQ_BIN}" ]; then 16 | echo "\e[32mEverything required is already installed\e[0m"; 17 | exit 0; 18 | fi 19 | 20 | PLATFORM="$(uname -i)" 21 | if [ "${PLATFORM}" != "x86_64" ] && [ "${PLATFORM}" != "aarch64" ]; then 22 | echo "Unsupported hardware platform: ${PLATFORM}" 23 | exit 1 24 | fi 25 | 26 | WORK_DIR="$(mktemp -d)" 27 | if [ ! "${WORK_DIR}" ] || [ ! -d "${WORK_DIR}" ]; then 28 | echo "Could not create temp dir" 29 | exit 1 30 | fi 31 | cd "${WORK_DIR}" 32 | cleanup() { 33 | rm -rf "${WORK_DIR}" 34 | } 35 | trap cleanup EXIT 36 | 37 | DEBIAN_FRONTEND=noninteractive 38 | 39 | apt-get update 40 | apt-get install -y apt-transport-https ca-certificates curl software-properties-common python3-pip rng-tools 41 | 42 | if [ ! -x "${SENTRY_CLI}" ]; then 43 | curl -sL https://sentry.io/get-cli/ | bash 44 | fi 45 | 46 | if [ ! -x "${B2_CLI}" ]; then 47 | curl -s --output /usr/local/bin/b2 -L https://github.com/Backblaze/B2_Command_Line_Tool/releases/latest/download/b2-linux 48 | chmod a+x /usr/local/bin/b2 49 | fi 50 | 51 | if [ ! -x "${DOCKER_BIN}" ] || [ ! -x "${DOCKER_COMPOSE_INSTALLED}" ]; then 52 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - 53 | add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" 54 | apt-get update 55 | apt-get -y install docker-ce docker-compose-plugin 56 | usermod -aG docker "$USER" 57 | fi 58 | 59 | if [ ! -x "${JQ_BIN}" ]; then 60 | apt-get -y install jq 61 | fi 62 | 63 | if [ ! -f /etc/docker/daemon.json ]; then 64 | echo '{ "registry-mirrors": ["https://mirror.gcr.io"] }' > /etc/docker/daemon.json 65 | else 66 | jq '.["registry-mirrors"] += ["https://mirror.gcr.io"]' /etc/docker/daemon.json > /etc/docker/daemon.tmp && mv /etc/docker/daemon.tmp /etc/docker/daemon.json 67 | fi 68 | 69 | service docker restart 70 | 71 | if [ ! -x "${AWS_CLI}" ]; then 72 | apt-get -y install gpg unzip 73 | curl "https://awscli.amazonaws.com/awscli-exe-linux-${PLATFORM}.zip" -o "awscliv2.zip" 74 | curl "https://awscli.amazonaws.com/awscli-exe-linux-${PLATFORM}.zip.sig" -o "awscliv2.sig" 75 | gpg --import < list[Path]: 40 | file_list = [] 41 | for cmd in ( 42 | ["git", "ls-files"], 43 | ["git", "ls-files", "--others", "--exclude-standard"], 44 | ): 45 | cmd_result = subprocess.run(cmd, check=True, text=True, capture_output=True) 46 | file_list.extend(cmd_result.stdout.splitlines()) 47 | file_paths = [Path(p) for p in file_list] 48 | return file_paths 49 | 50 | 51 | def list_files(suffix: str | None = None) -> list[Path]: 52 | """List all non-files not-ignored by git.""" 53 | file_paths = _list_files() 54 | if suffix is not None: 55 | file_paths = [p for p in file_paths if p.suffix == suffix] 56 | return file_paths 57 | 58 | 59 | def run_readable(session, mode="check"): 60 | session.run( 61 | "docker", 62 | "run", 63 | "--platform", 64 | "linux/amd64", 65 | "--rm", 66 | "-v", 67 | f"{ROOT.absolute()}:/data", 68 | "-w", 69 | "/data", 70 | "ghcr.io/bobheadxi/readable:v0.5.0@sha256:423c133e7e9ca0ac20b0ab298bd5dbfa3df09b515b34cbfbbe8944310cc8d9c9", 71 | mode, 72 | "![.]**/*.md", 73 | external=True, 74 | ) 75 | 76 | 77 | def run_shellcheck(session, mode="check"): 78 | shellcheck_cmd = [ 79 | "docker", 80 | "run", 81 | "--platform", 82 | "linux/amd64", # while this image is multi-arch, we cannot use digest with multi-arch images 83 | "--rm", 84 | "-v", 85 | f"{ROOT.absolute()}:/mnt", 86 | "-w", 87 | "/mnt", 88 | "-q", 89 | "koalaman/shellcheck:0.9.0@sha256:a527e2077f11f28c1c1ad1dc784b5bc966baeb3e34ef304a0ffa72699b01ad9c", 90 | ] 91 | 92 | files = list_files(suffix=".sh") 93 | if not files: 94 | session.log("No shell files found") 95 | return 96 | shellcheck_cmd.extend(files) 97 | 98 | if mode == "fmt": 99 | with tempfile.NamedTemporaryFile(mode="w+") as diff_file: 100 | session.run( 101 | *shellcheck_cmd, 102 | "--format=diff", 103 | external=True, 104 | stdout=diff_file, 105 | success_codes=[0, 1], 106 | ) 107 | diff_file.seek(0) 108 | diff = diff_file.read() 109 | if len(diff.splitlines()) > 1: # ignore single-line message 110 | session.log("Applying shellcheck patch:\n%s", diff) 111 | subprocess.run( 112 | ["patch", "-p1"], 113 | input=diff, 114 | text=True, 115 | check=True, 116 | ) 117 | 118 | session.run(*shellcheck_cmd, external=True) 119 | 120 | 121 | @nox.session(name="format", python=PYTHON_DEFAULT_VERSION) 122 | def format_(session): 123 | """Lint the code and apply fixes in-place whenever possible.""" 124 | install(session, "format") 125 | session.run("ruff", "check", "--fix", ".") 126 | run_shellcheck(session, mode="fmt") 127 | run_readable(session, mode="fmt") 128 | session.run("ruff", "format", ".") 129 | 130 | 131 | @nox.session(python=PYTHON_DEFAULT_VERSION) 132 | def lint(session): 133 | """Run linters in readonly mode.""" 134 | install(session, "lint") 135 | session.run("ruff", "check", "--diff", "--unsafe-fixes", ".") 136 | session.run("codespell", ".") 137 | run_shellcheck(session, mode="check") 138 | run_readable(session, mode="check") 139 | session.run("ruff", "format", "--diff", ".") 140 | 141 | 142 | @nox.session(python=PYTHON_DEFAULT_VERSION) 143 | def type_check(session): 144 | install(session, "type_check") 145 | with session.chdir(str(APP_ROOT)): 146 | session.run("mypy", "--config-file", "mypy.ini", ".", *session.posargs) 147 | 148 | 149 | @nox.session(python=PYTHON_VERSIONS) 150 | def test(session): 151 | install(session, "test") 152 | with session.chdir(str(APP_ROOT)): 153 | session.run( 154 | "pytest", 155 | "-W", 156 | "ignore::DeprecationWarning", 157 | "-s", 158 | "-x", 159 | "-vv", 160 | "-n", 161 | "auto", 162 | "{{cookiecutter.django_project_name}}", 163 | *session.posargs, 164 | ) 165 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/devops/tf/main/files/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | services: 4 | app: 5 | image: ${ecr_base_url}/${ecr_image} 6 | init: true 7 | restart: always 8 | env_file: ./.env 9 | healthcheck: 10 | test: ["CMD", "./healthcheck.py", "/var/run/gunicorn/gunicorn.sock"] 11 | {% if cookiecutter.monitoring == 'y' %} 12 | environment: 13 | # Add this variable to all containers that should dump Prometheus metrics. Each container besides this one 14 | # should use a different subdirectory of /prometheus-multiproc-dir, e.g. 15 | # - PROMETHEUS_MULTIPROC_DIR=/prometheus-multiproc-dir/other-container 16 | # Don't forget to also mount the prometheus-metrics volume in other containers too. 17 | - PROMETHEUS_MULTIPROC_DIR=/prometheus-multiproc-dir 18 | {% endif %} 19 | volumes: 20 | - backend-static:/root/src/static 21 | - gunicorn-socket:/var/run/gunicorn 22 | - ./media:/root/src/media 23 | {% if cookiecutter.monitoring == 'y' %} 24 | # Add this mount to each container that should dump Prometheus metrics. 25 | - ./prometheus-metrics:/prometheus-multiproc-dir 26 | {% endif %} 27 | logging: &app_logging 28 | driver: awslogs 29 | options: 30 | awslogs-region: ${region} 31 | awslogs-group: /aws/ec2/${name}-${env} 32 | tag: '$${INSTANCE_ID_SUBST}-app' 33 | awslogs-create-group: "true" 34 | 35 | backups: 36 | build: 37 | context: backups/ 38 | dockerfile: Dockerfile 39 | init: true 40 | restart: unless-stopped 41 | environment: 42 | - DATABASE_URL=${DATABASE_URL} 43 | - BACKUP_LOCAL_DIR=/var/backups 44 | - BACKUP_LOCAL_ROTATE_KEEP_LAST=${BACKUP_LOCAL_ROTATE_KEEP_LAST} 45 | - B2_BUCKET=${BACKUP_B2_BUCKET} 46 | - B2_FOLDER=${BACKUP_B2_FOLDER} 47 | - B2_APPLICATION_KEY_ID=${BACKUP_B2_APPLICATION_KEY_ID} 48 | - B2_APPLICATION_KEY=${BACKUP_B2_APPLICATION_KEY} 49 | - EMAIL_HOST=${EMAIL_HOST} 50 | - EMAIL_PORT=${EMAIL_PORT} 51 | - EMAIL_HOST_USER=${EMAIL_HOST_USER} 52 | - EMAIL_HOST_PASSWORD=${EMAIL_HOST_PASSWORD} 53 | - EMAIL_TARGET=${EMAIL_TARGET} 54 | - DEFAULT_FROM_EMAIL=${DEFAULT_FROM_EMAIL} 55 | - SENTRY_DSN=${SENTRY_DSN} 56 | volumes: 57 | - backups:/var/backups 58 | logging: 59 | <<: *app_logging 60 | 61 | {% if cookiecutter.monitoring == 'y' %} 62 | node-exporter: 63 | image: ghcr.io/reef-technologies/node-exporter-aws-ec2:latest 64 | container_name: node-exporter 65 | restart: unless-stopped 66 | network_mode: host 67 | pid: host 68 | volumes: 69 | - /:/host:ro,rslave 70 | - nodeexporter_collectors:/textfile_collectors 71 | command: 72 | - '--path.rootfs=/host' 73 | - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)' 74 | {% if cookiecutter.vulnerabilities_scanning == 'y' %} 75 | - '--collector.textfile.directory=textfile_collectors' 76 | {% endif %} 77 | logging: &exporter_logging 78 | driver: journald 79 | options: 80 | tag: '{% raw %}{###{.Name}###}{% endraw %}' 81 | 82 | cadvisor: 83 | image: gcr.io/cadvisor/cadvisor:v0.40.0 84 | container_name: cadvisor 85 | privileged: true 86 | devices: 87 | - /dev/kmsg:/dev/kmsg 88 | volumes: 89 | - /:/rootfs:ro 90 | - /var/run:/var/run:ro 91 | - /sys:/sys:ro 92 | - /var/lib/docker:/var/lib/docker:ro 93 | - /cgroup:/cgroup:ro 94 | restart: unless-stopped 95 | logging: 96 | <<: *exporter_logging 97 | {% endif %} 98 | 99 | nginx: 100 | image: 'ghcr.io/reef-technologies/nginx-rt:v1.2.2' 101 | restart: unless-stopped 102 | healthcheck: 103 | test: wget -q --spider http://0.0.0.0:8000/alive/ || exit 1 104 | depends_on: 105 | - app 106 | {% if cookiecutter.monitoring == 'y' %} 107 | - cadvisor 108 | - node-exporter 109 | {% endif %} 110 | command: nginx -g 'daemon off;' 111 | ports: 112 | {% if cookiecutter.monitoring == 'y' %} 113 | - 10443:10443 114 | {% endif %} 115 | - 8000:8000 116 | volumes: 117 | - ./nginx/templates:/etc/nginx/templates 118 | - ./nginx/config_helpers:/etc/nginx/config_helpers 119 | - backend-static:/srv/static:ro 120 | - ./media:/srv/media:ro 121 | - ./nginx/monitoring_certs:/etc/monitoring_certs 122 | - gunicorn-socket:/var/run/gunicorn:ro 123 | logging: 124 | driver: awslogs 125 | options: 126 | awslogs-region: ${region} 127 | awslogs-group: /aws/ec2/${name}-${env} 128 | tag: '$${INSTANCE_ID_SUBST}-nginx' 129 | awslogs-create-group: "true" 130 | {% if cookiecutter.monitoring == 'y' %} 131 | extra_hosts: 132 | - "host.docker.internal:host-gateway" 133 | {% endif %} 134 | {% if cookiecutter.vulnerabilities_scanning == 'y' %} 135 | vulnrelay: 136 | image: 'ghcr.io/reef-technologies/vulnrelay:latest' 137 | container_name: vulnrelay 138 | restart: unless-stopped 139 | env_file: ./.vuln.env 140 | environment: 141 | - METRICS_DIR=/app/metrics 142 | volumes: 143 | - /var/run/docker.sock:/var/run/docker.sock:ro 144 | - nodeexporter_collectors:/app/metrics 145 | logging: 146 | driver: awslogs 147 | options: 148 | awslogs-region: ${region} 149 | awslogs-group: /aws/ec2/${name}-${env} 150 | tag: '$${INSTANCE_ID_SUBST}-vulnrelay' 151 | awslogs-create-group: "true" 152 | watchtower: 153 | image: containrrr/watchtower 154 | restart: unless-stopped 155 | volumes: 156 | - /var/run/docker.sock:/var/run/docker.sock:ro 157 | command: --interval 7200 vulnrelay 158 | logging: 159 | driver: awslogs 160 | options: 161 | awslogs-region: ${region} 162 | awslogs-group: /aws/ec2/${name}-${env} 163 | tag: '$${INSTANCE_ID_SUBST}-vulnrelay' 164 | awslogs-create-group: "true" 165 | {% endif %} 166 | 167 | volumes: 168 | backend-static: 169 | backups: 170 | gunicorn-socket: 171 | nodeexporter_collectors: 172 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/nginx/templates/default.conf.template: -------------------------------------------------------------------------------- 1 | # 2 | # SSL config below is inspired by websites: 3 | # - https://syslink.pl/cipherlist/ 4 | # - https://ssl-config.mozilla.org/ 5 | # Generated for Intermediate configuration, nginx 1.20.1 or later 6 | # 7 | 8 | upstream gunicorn { 9 | server unix:/var/run/gunicorn/gunicorn.sock fail_timeout=0; 10 | } 11 | 12 | server { 13 | listen 80 default_server; 14 | server_name _; 15 | server_name_in_redirect off; 16 | 17 | return 444; 18 | } 19 | 20 | server { 21 | listen 80; 22 | server_name ${NGINX_HOST}; 23 | return 301 https://${NGINX_HOST}$request_uri; 24 | } 25 | 26 | server { 27 | listen 443 ssl default_server; 28 | server_name _; 29 | server_name_in_redirect off; 30 | 31 | # Load the Diffie-Hellman parameter. 32 | ssl_dhparam /etc/letsencrypt/dhparams/dhparam.pem; 33 | 34 | ssl_certificate /etc/letsencrypt/live/${NGINX_HOST}/fullchain.pem; 35 | ssl_certificate_key /etc/letsencrypt/live/${NGINX_HOST}/privkey.pem; 36 | ssl_trusted_certificate /etc/letsencrypt/live/${NGINX_HOST}/chain.pem; 37 | 38 | return 444; 39 | } 40 | 41 | server { 42 | listen 443 ssl http2; 43 | server_name ${NGINX_HOST}; 44 | 45 | add_header Strict-Transport-Security "max-age=31536000" always; 46 | add_header X-Content-Type-Options nosniff; 47 | add_header X-XSS-Protection "1; mode=block"; 48 | add_header X-Frame-Options DENY; 49 | 50 | # Load the Diffie-Hellman parameter. 51 | ssl_dhparam /etc/letsencrypt/dhparams/dhparam.pem; 52 | 53 | ssl_certificate /etc/letsencrypt/live/${NGINX_HOST}/fullchain.pem; 54 | ssl_certificate_key /etc/letsencrypt/live/${NGINX_HOST}/privkey.pem; 55 | ssl_trusted_certificate /etc/letsencrypt/live/${NGINX_HOST}/chain.pem; 56 | 57 | include /etc/nginx/config_helpers/brotli.conf; 58 | include /etc/nginx/config_helpers/gzip.conf; 59 | 60 | access_log /dev/stdout; 61 | error_log /dev/stderr info; 62 | 63 | client_max_body_size 100M; 64 | 65 | location /static/ { 66 | root /srv/; 67 | } 68 | 69 | location /media/ { 70 | root /srv/; 71 | } 72 | 73 | {% if cookiecutter.monitoring == 'y' %} 74 | location /metrics { 75 | return 404; 76 | } 77 | 78 | location /business-metrics { 79 | return 404; 80 | } 81 | {% endif %} 82 | 83 | location / { 84 | {% if cookiecutter.nginx_tls_early_data_enabled == 'y' %} 85 | if ($too_early) { 86 | return 425; 87 | } 88 | {% endif %} 89 | proxy_pass_header Server; 90 | proxy_redirect off; 91 | proxy_set_header Host $http_host; 92 | proxy_set_header X-Real-IP $remote_addr; 93 | proxy_set_header X-Forwarded-Proto $scheme; 94 | {% if cookiecutter.nginx_tls_early_data_enabled == 'y' %} 95 | proxy_set_header Early-Data $ssl_early_data; 96 | {% endif %} 97 | proxy_pass http://gunicorn; 98 | } 99 | } 100 | 101 | {% if cookiecutter.monitoring == 'y' %} 102 | upstream node_exporter { 103 | server host.docker.internal:9100; 104 | } 105 | 106 | server { 107 | server_name monitoring; 108 | listen 10443 ssl http2; 109 | 110 | include /etc/nginx/config_helpers/brotli.conf; 111 | include /etc/nginx/config_helpers/gzip.conf; 112 | 113 | ssl_certificate /etc/monitoring_certs/cert.crt; 114 | ssl_certificate_key /etc/monitoring_certs/cert.key; 115 | ssl_trusted_certificate /etc/monitoring_certs/ca.crt; 116 | 117 | ssl_client_certificate /etc/monitoring_certs/ca.crt; 118 | ssl_verify_client on; 119 | 120 | access_log /dev/stdout; 121 | error_log /dev/stderr info; 122 | 123 | location /node-exporter-metrics/ { 124 | proxy_pass_header Server; 125 | proxy_redirect off; 126 | proxy_set_header Host $http_host; 127 | proxy_set_header X-Real-IP $remote_addr; 128 | proxy_set_header X_SCHEME $scheme; 129 | proxy_pass http://node_exporter/metrics; 130 | } 131 | 132 | location /cadvisor-metrics/ { 133 | proxy_pass_header Server; 134 | proxy_redirect off; 135 | proxy_set_header Host $http_host; 136 | proxy_set_header X-Real-IP $remote_addr; 137 | proxy_set_header X_SCHEME $scheme; 138 | proxy_pass http://cadvisor:8080/metrics; 139 | } 140 | 141 | location /nginx-metrics/ { 142 | vhost_traffic_status_display; 143 | vhost_traffic_status_display_format prometheus; 144 | } 145 | 146 | location /application-metrics/ { 147 | proxy_pass_header Server; 148 | proxy_redirect off; 149 | proxy_set_header Host $http_host; 150 | proxy_set_header X-Real-IP $remote_addr; 151 | proxy_set_header X_SCHEME $scheme; 152 | proxy_pass http://gunicorn/metrics; 153 | } 154 | 155 | location /business-metrics/ { 156 | proxy_pass_header Server; 157 | proxy_redirect off; 158 | proxy_set_header Host $http_host; 159 | proxy_set_header X-Real-IP $remote_addr; 160 | proxy_set_header X_SCHEME $scheme; 161 | proxy_pass http://gunicorn/business-metrics; 162 | } 163 | 164 | {% if cookiecutter.use_flower == "y" %} 165 | location /celery-metrics/ { 166 | proxy_pass_header Server; 167 | proxy_redirect off; 168 | proxy_set_header Host $http_host; 169 | proxy_set_header X-Real-IP $remote_addr; 170 | proxy_set_header X_SCHEME $scheme; 171 | proxy_pass http://celery-flower:5555/metrics; 172 | } 173 | {% endif %} 174 | 175 | location /backup-metrics/ { 176 | proxy_pass_header Server; 177 | proxy_redirect off; 178 | proxy_set_header Host $http_host; 179 | proxy_set_header X-Real-IP $remote_addr; 180 | proxy_set_header X_SCHEME $scheme; 181 | proxy_pass http://backups:8000/; 182 | } 183 | } 184 | {% endif %} 185 | 186 | {% if cookiecutter.nginx_tls_early_data_enabled == 'y' %} 187 | ssl_early_data on; 188 | 189 | map "$request_method:$ssl_early_data" $too_early { 190 | default 1; 191 | "GET:1" 0; 192 | "~^\S+:$" 0; 193 | } 194 | {% endif %} 195 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/README_AWS.md: -------------------------------------------------------------------------------- 1 | # Deploying to AWS 2 | 3 | The deployment is split into two steps: 4 | 5 | Files related to AWS deployment has been generated in `devops/` directory. 6 | 7 | By convention, projects that are meant to be deployed to AWS have a `deploy-to-aws.sh` script in the root dir and a `devops` directory. 8 | The script builds the docker image, uploads it and tells AWS to reload the app (causing a new ec2 machine to be spawned). 9 | In the `devops` directory you will find terraform configuration as well as packer files (for building the AMI). 10 | 11 | If you want to deploy your app to an AWS environment, you need to do following steps: 12 | 13 | - configuring your environment 14 | - create an infra s3 bucket 15 | - deploy `tf/core` (contains stuff common to all environments in given AWS Account) 16 | - deploy chosen `tf/main/envs/` (by default staging and prod are generated) 17 | 18 | ## Required software 19 | 20 | *AWS CLI* 21 | 22 | AWS recommends using profiles, when dealing with multiple AWS accounts. 23 | To choose between environments, rather than switching access and secret keys, we just switch our profiles. 24 | We can choose our profile name, which make it easier to recognize in which environment we operate. 25 | To configure AWS environment, you need to have AWS CLI installed. 26 | It is recommended to use AWS v2, which can be downloaded from: 27 | 28 | 29 | *Terraform* You will also need terraform version 1.0.x. It is recommended to use `tfenv` to install terraform with correct version. 30 | You can download an install it from 31 | 32 | *direnv* To avoid mistakes when switching environments (or regions), it is recommended to use `direnv` tools, which supports loading environment variables from .envrc file, placed in directory. 33 | You can read about it here: 34 | 35 | 36 | ## Configure your environment 37 | 38 | To configure your AWS profile, please run: 39 | 40 | ``` 41 | $ aws configure --profile 42 | ``` 43 | 44 | And answer following questions: 45 | 46 | ``` 47 | AWS Access Key ID: ... 48 | AWS Secret Access Key: ... 49 | Default region name: us-east-1 (just an example) 50 | Default output format [None]: (can be left blank) 51 | ``` 52 | 53 | Once, configured, you can switch your profile using `AWS_PROFILE=` env variable or by adding `--profile` option to your aws cli command. 54 | 55 | It's handy to create .envrc file in the project rood directory (where deploy-to-aws.sh is created) with content: 56 | 57 | ``` 58 | export AWS_PROFILE= 59 | export AWS_REGION= 60 | ``` 61 | 62 | And then accept changes by using command: 63 | 64 | ``` 65 | $ direnv allow 66 | ``` 67 | 68 | After doing that, anytime you enter the project directory, correct profile will be loaded. 69 | 70 | ## Configuring infra 71 | 72 | You only need to do this if you change anything in `devops` directory (or if you mess something up in AWS console and want to revert the changes). 73 | 74 | Create infra bucket 75 | 76 | Before being able to run terraform, we need to create S3 bucket, which will hold the state. 77 | This bucket is used by all environments and needs to be globally unique. 78 | 79 | To create bucket, please type: 80 | 81 | ``` 82 | aws s3 mb --region {{ cookiecutter.aws_region }} s3://{{ cookiecutter.aws_infra_bucket }} 83 | ``` 84 | 85 | TF has a following structure: 86 | 87 | ``` 88 | |- devops 89 | |- tf 90 | |- core 91 | |- main 92 | |- envs 93 | | |- staging 94 | | |- prod 95 | |- modules 96 | ``` 97 | 98 | You can run terraform from: 99 | 100 | - core 101 | - envs/staging 102 | - envs/prod 103 | 104 | directories. 105 | 106 | Directory *core* contains infrastructure code, which needs to be created BEFORE pushing docker image. 107 | It is responsible for creating docker registries, which you can use, to push docker images to. 108 | 109 | Code places in *main* is the rest of the infrastructure, which is created after pushing docker image. 110 | 111 | Each of the environment (and core) can be applied by executing: 112 | 113 | ``` 114 | terraform init 115 | terraform apply 116 | ``` 117 | 118 | IMPORTANT! the env variables for the apps (`.env` file) and `docker-compose.yml` are defined in terraform files, if you change any, you need to run `terraform apply` AND refresh the ec2 instance. 119 | The same goes for AMI built by packer. 120 | 121 | ## Adding secrets to the projects 122 | 123 | Cloud init is configured to provision EC2 machines spun up as part of this project's infrastructure. 124 | As part of this provisioning, SSM parameters following a specific name convention are read and saved as files in EC2's home directory (RDS access details are managed in another way). 125 | SSM parameters can be managed via AWS console (Systems Manager -> Parameter Store) or via AWS CLI (`aws ssm`). 126 | The naming convention is `/application/{{ cookiecutter.aws_project_name }}/{env}/{path_of_the_file_to_be_created}`, for example `/application/project/staging/.env`. 127 | A few such parameters are managed by terraform in this project (e.g. `.env`, `docker-compose.yml`) and more can be added. 128 | In case you need to add confidential files (like a GCP credentials file) you can simply create appropriate SSM parameters. 129 | These will only be accessible to people that access to AWS or EC2 machines, not to people who have access to this repository. 130 | One such parameter, namely `/application/{{ cookiecutter.aws_project_name }}/{env}/secret.env` is treated specially - if it exists (it doesn't by default) its contents are appended to `.env` during EC2 machine provisioning - this is a convenient way of supplying pieces of confidential information, like external systems' access keys to `.env`. 131 | 132 | ## Vulnerability scanning 133 | If you set up your project with `vulnerabilities_scanning` enabled, you need to create an additional SSM parameter with the name `/application/{{ cookiecutter.aws_project_name }}/{env}/.vuln.env` containing environment variables required by [vulnrelay](https://github.com/reef-technologies/vulnrelay) prior to deploying the project. Look at the `/envs/prod/.vuln.env.template` file to see the expected file format. 134 | 135 | For variable values, please refer to the [instructions in the internal handbook](https://github.com/reef-technologies/internal-handbook/blob/master/vuln_management.md) 136 | 137 | ## Deploying apps 138 | 139 | The docker containers are built with code you have locally, including any changes. 140 | Building requires docker. 141 | To successfully run `deploy-to-aws.sh` you first need to do `./setup.prod.sh`. 142 | It uses the aws credentials stored as `AWS_PROFILE` variable. 143 | If you don't set this variable, the `default` will be used. 144 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/backups/bin/serve_metrics.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import subprocess 3 | import time 4 | from argparse import ArgumentParser 5 | from collections.abc import Callable, Iterator 6 | from dataclasses import dataclass, field 7 | from datetime import UTC, datetime, timedelta 8 | from functools import wraps 9 | from os import environ 10 | from pathlib import Path 11 | from tempfile import NamedTemporaryFile 12 | 13 | import structlog 14 | from b2sdk.v2 import B2Api, InMemoryAccountInfo 15 | from prometheus_client import Gauge, Histogram, start_http_server 16 | from structlog.contextvars import bound_contextvars 17 | 18 | MiB = 1 19 | GiB = 1024 * MiB 20 | 21 | backup_size = Histogram( 22 | "backup_size", 23 | "Size of the backup", 24 | unit="MiB", 25 | buckets=( 26 | 16 * MiB, 27 | 64 * MiB, 28 | 256 * MiB, 29 | 1 * GiB, 30 | 2 * GiB, 31 | 4 * GiB, 32 | 8 * GiB, 33 | 16 * GiB, 34 | 32 * GiB, 35 | 64 * GiB, 36 | 128 * GiB, 37 | 256 * GiB, 38 | 512 * GiB, 39 | 1024 * GiB, 40 | ), 41 | ) 42 | total_backup_size = Gauge("total_backup_size", "Total size of all backups", unit="MiB") 43 | backup_count = Gauge("backup_count", "Number of backups") 44 | first_backup_time = Gauge("first_backup_time", "Timestamp of the first backup") 45 | last_backup_time = Gauge("last_backup_time", "Timestamp of the last backup") 46 | last_backup_is_operational = Gauge("last_backup_is_operational", "Last backup is checked") 47 | 48 | DATABASE_URL = environ["DATABASE_URL"] 49 | LOCAL_BACKUP_PATH = Path(environ["BACKUP_LOCAL_DIR"]) 50 | LOCAL_ROTATE_KEEP_LAST = (keep_last := environ.get("BACKUP_LOCAL_ROTATE_KEEP_LAST")) and int(keep_last) 51 | 52 | B2_BUCKET = environ.get("B2_BUCKET") 53 | B2_FOLDER = environ.get("B2_FOLDER") 54 | B2_APPLICATION_KEY_ID = environ.get("B2_APPLICATION_KEY_ID") 55 | B2_APPLICATION_KEY = environ.get("B2_APPLICATION_KEY") 56 | 57 | log = structlog.getLogger(__name__) 58 | 59 | 60 | def cached_method(method: Callable) -> Callable: 61 | @wraps(method) 62 | def wrapper(self, *args, **kwargs): 63 | cache_key = (*args, *sorted(kwargs.items())) 64 | 65 | if not hasattr(self, "_cache"): 66 | self._cache = {} 67 | 68 | try: 69 | return self._cache[cache_key] 70 | except KeyError: 71 | self._cache[cache_key] = result = method(self, *args, **kwargs) 72 | return result 73 | 74 | return wrapper 75 | 76 | 77 | @dataclass 78 | class Backup: 79 | location: Path 80 | created_at: datetime 81 | size_bytes: int 82 | 83 | def __hash__(self) -> int: 84 | return hash(self.location) 85 | 86 | 87 | def check_pg_restore(backup_path: Path, expected_record: str = " TABLE DATA public django_migrations ") -> bool: 88 | """Check whether the backup ToC can be read, and whether it contains specific record.""" 89 | 90 | with bound_contextvars(backup_path=backup_path): 91 | log.debug("Testing readability of the backup") 92 | result = subprocess.run(["pg_restore", "-l", str(backup_path)], capture_output=True) 93 | if result.returncode != 0: 94 | log.error("Backup is not a valid PostgreSQL dump", result=result) 95 | return False 96 | 97 | with bound_contextvars(expected_record=expected_record): 98 | log.debug("Checking if backup contains expected record") 99 | if expected_record not in result.stdout.decode(): 100 | log.error("Backup is missing expected record", result=result) 101 | return False 102 | 103 | return True 104 | 105 | 106 | class BackupManager: 107 | def iter_backups(self) -> Iterator[Backup]: 108 | raise NotImplementedError 109 | 110 | def check_is_operational(self, backup: Backup) -> bool: 111 | raise NotImplementedError 112 | 113 | 114 | @dataclass 115 | class LocalBackupManager(BackupManager): 116 | backups_path: Path 117 | extension: str = "zstd" 118 | 119 | def iter_backups(self) -> Iterator[Backup]: 120 | for backup_path in sorted(self.backups_path.glob(f"*.{self.extension}")): 121 | stat = backup_path.stat() 122 | yield Backup( 123 | location=backup_path, 124 | created_at=datetime.fromtimestamp(stat.st_ctime, tz=UTC), 125 | size_bytes=stat.st_size, 126 | ) 127 | 128 | @cached_method 129 | def check_is_operational(self, backup: Backup) -> bool: 130 | return check_pg_restore(backup.location) 131 | 132 | 133 | @dataclass 134 | class B2BackupManager(BackupManager): 135 | bucket_name: str 136 | application_key_id: str 137 | application_key: str 138 | 139 | b2: B2Api = field(default_factory=lambda: B2Api(InMemoryAccountInfo())) 140 | integrity_check_download_bytes: int = 1024 * 1024 * 10 141 | 142 | def __post_init__(self): 143 | log.debug("Authorizing B2 account") 144 | self.b2.authorize_account("production", self.application_key_id, self.application_key) 145 | 146 | def iter_backups(self) -> Iterator[Backup]: 147 | bucket = self.b2.get_bucket_by_name(B2_BUCKET) 148 | for file_version, _ in bucket.ls(B2_FOLDER): 149 | yield Backup( 150 | location=Path(file_version.file_name), 151 | created_at=datetime.fromtimestamp(file_version.upload_timestamp / 1000, tz=UTC), 152 | size_bytes=file_version.size, 153 | ) 154 | 155 | @cached_method 156 | def check_is_operational(self, backup: Backup) -> bool: 157 | """ 158 | Download only head of the backup file, and use it to retrieve DB ToC. 159 | N.B.: If it fails, switch to streaming: curl -s | pg_restore -l 160 | """ 161 | 162 | log.debug("Downloading a part of the backup for integrity check", backup=backup) 163 | bucket = self.b2.get_bucket_by_name(self.bucket_name) 164 | downloaded_file = bucket.download_file_by_name( 165 | str(backup.location), range_=(0, self.integrity_check_download_bytes) 166 | ) 167 | with NamedTemporaryFile() as temp_file: 168 | downloaded_file.save(temp_file) 169 | return check_pg_restore(temp_file.name) 170 | 171 | 172 | def update_metrics() -> None: 173 | if B2_BUCKET: 174 | log.debug("Using B2 backup manager") 175 | manager = B2BackupManager( 176 | application_key_id=B2_APPLICATION_KEY_ID, 177 | application_key=B2_APPLICATION_KEY, 178 | bucket_name=B2_BUCKET, 179 | ) 180 | else: 181 | log.debug("Using local backup manager") 182 | manager = LocalBackupManager(LOCAL_BACKUP_PATH) 183 | 184 | backups = manager.iter_backups() 185 | first_backup, last_backup = None, None 186 | num_backups = 0 187 | total_size = 0 188 | for num_backups, backup in enumerate(backups, start=1): # noqa: B007 189 | log.debug("Processing backup", backup=backup) 190 | size_mib = backup.size_bytes / 1024 / 1024 191 | backup_size.observe(size_mib) 192 | total_size += size_mib 193 | 194 | if not first_backup or first_backup.created_at > backup.created_at: 195 | first_backup = backup 196 | 197 | if not last_backup or last_backup.created_at < backup.created_at: 198 | last_backup = backup 199 | 200 | backup_count.set(num_backups) 201 | total_backup_size.set(total_size) 202 | 203 | if first_backup: 204 | first_backup_time.set(first_backup.created_at.timestamp()) 205 | 206 | if last_backup: 207 | last_backup_time.set(last_backup.created_at.timestamp()) 208 | log.debug("Checking newest backup", last_backup=last_backup) 209 | is_operational = manager.check_is_operational(last_backup) 210 | last_backup_is_operational.set(is_operational) 211 | else: 212 | log.debug("No backups found") 213 | 214 | 215 | if __name__ == "__main__": 216 | parser = ArgumentParser() 217 | parser.add_argument("--port", type=int, default=8000) 218 | parser.add_argument( 219 | "--interval", type=int, default=timedelta(minutes=10).total_seconds(), help="Refresh interval (s)" 220 | ) 221 | parser.add_argument("--log-level", type=str, default="INFO", help="Logging level") 222 | args = parser.parse_args() 223 | 224 | level = getattr(logging, args.log_level.upper()) 225 | structlog.configure(wrapper_class=structlog.make_filtering_bound_logger(level)) 226 | 227 | log.info("Starting metrics server", port=args.port, interval=args.interval) 228 | start_http_server(args.port) 229 | while True: 230 | log.debug("Updating metrics") 231 | update_metrics() 232 | time.sleep(args.interval) 233 | -------------------------------------------------------------------------------- /{{cookiecutter.repostory_name}}/envs/prod/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | services: 4 | redis: 5 | {% if cookiecutter.use_valkey == "y" %} 6 | image: mirror.gcr.io/valkey/valkey:8.1-alpine 7 | command: valkey-server --appendonly yes 8 | healthcheck: 9 | test: valkey-cli ping 10 | {% else %} 11 | image: mirror.gcr.io/redis:6-alpine 12 | command: redis-server --appendonly yes 13 | healthcheck: 14 | test: redis-cli ping 15 | {% endif %} 16 | restart: unless-stopped 17 | volumes: 18 | - ./redis/data:/data 19 | logging: &logging 20 | driver: journald 21 | options: 22 | tag: '{% raw %}{{.Name}}{% endraw %}' 23 | 24 | db: 25 | image: postgres:14.0-alpine 26 | healthcheck: 27 | test: pg_isready -U ${POSTGRES_USER} || exit 1 28 | restart: unless-stopped 29 | env_file: ./.env 30 | environment: 31 | - POSTGRES_DB=${POSTGRES_DB} 32 | - POSTGRES_USER=${POSTGRES_USER} 33 | - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} 34 | volumes: 35 | - ./db/data:/var/lib/postgresql/data 36 | logging: 37 | <<: *logging 38 | 39 | backups: 40 | build: 41 | context: backups/ 42 | dockerfile: Dockerfile 43 | init: true 44 | restart: unless-stopped 45 | environment: 46 | - DATABASE_URL=${DATABASE_URL} 47 | - BACKUP_LOCAL_DIR=/var/backups 48 | - BACKUP_LOCAL_ROTATE_KEEP_LAST=${BACKUP_LOCAL_ROTATE_KEEP_LAST} 49 | - B2_BUCKET=${BACKUP_B2_BUCKET} 50 | - B2_FOLDER=${BACKUP_B2_FOLDER} 51 | - B2_APPLICATION_KEY_ID=${BACKUP_B2_APPLICATION_KEY_ID} 52 | - B2_APPLICATION_KEY=${BACKUP_B2_APPLICATION_KEY} 53 | - EMAIL_HOST=${EMAIL_HOST} 54 | - EMAIL_PORT=${EMAIL_PORT} 55 | - EMAIL_HOST_USER=${EMAIL_HOST_USER} 56 | - EMAIL_HOST_PASSWORD=${EMAIL_HOST_PASSWORD} 57 | - EMAIL_TARGET=${EMAIL_TARGET} 58 | - DEFAULT_FROM_EMAIL=${DEFAULT_FROM_EMAIL} 59 | - SENTRY_DSN=${SENTRY_DSN} 60 | volumes: 61 | - backups:/var/backups 62 | depends_on: 63 | - db 64 | logging: 65 | <<: *logging 66 | 67 | app: 68 | build: 69 | context: . 70 | dockerfile: app/Dockerfile 71 | image: {{cookiecutter.django_project_name}}/app 72 | healthcheck: 73 | test: ["CMD", "./healthcheck.py", "/var/run/gunicorn/gunicorn.sock"] 74 | init: true 75 | restart: unless-stopped 76 | env_file: ./.env 77 | {% if cookiecutter.monitoring == 'y' %} 78 | environment: 79 | # Add this variable to all containers that should dump Prometheus metrics. Each container besides this one 80 | # should use a different subdirectory of /prometheus-multiproc-dir, e.g. 81 | # - PROMETHEUS_MULTIPROC_DIR=/prometheus-multiproc-dir/other-container 82 | # Don't forget to also mount the prometheus-metrics volume in other containers too. 83 | - PROMETHEUS_MULTIPROC_DIR=/prometheus-multiproc-dir 84 | {% endif %} 85 | volumes: 86 | - backend-static:/root/src/static 87 | - gunicorn-socket:/var/run/gunicorn 88 | - ./media:/root/src/media 89 | {% if cookiecutter.monitoring == 'y' %} 90 | # Add this mount to each container that should dump Prometheus metrics. 91 | - ./prometheus-metrics:/prometheus-multiproc-dir 92 | {% endif %} 93 | depends_on: 94 | - redis 95 | - db 96 | logging: 97 | <<: *logging 98 | 99 | {% if cookiecutter.use_celery == "y" %} 100 | celery-worker: 101 | image: {{cookiecutter.django_project_name}}/app 102 | init: true 103 | healthcheck: 104 | test: celery -A {{cookiecutter.django_project_name}} status > /dev/null || exit 1 105 | restart: unless-stopped 106 | env_file: ./.env 107 | environment: 108 | - DEBUG=off 109 | {% if cookiecutter.monitoring == 'y' %} 110 | - PROMETHEUS_MULTIPROC_DIR=/prometheus-multiproc-dir/celery-worker 111 | {% endif %} 112 | command: ./celery-entrypoint.sh 113 | {% if cookiecutter.monitoring == 'y' %} 114 | volumes: 115 | - ./prometheus-metrics:/prometheus-multiproc-dir 116 | {% endif %} 117 | tmpfs: /run 118 | depends_on: 119 | - redis 120 | logging: 121 | <<: *logging 122 | 123 | celery-beat: 124 | image: {{cookiecutter.django_project_name}}/app 125 | init: true 126 | restart: unless-stopped 127 | env_file: ./.env 128 | environment: 129 | - DEBUG=off 130 | command: nice celery -A {{cookiecutter.django_project_name}} beat -l INFO --schedule /tmp/celerybeat-schedule -f /tmp/logs/celery-beat.log 131 | volumes: 132 | - ./logs:/tmp/logs 133 | depends_on: 134 | - redis 135 | logging: 136 | <<: *logging 137 | 138 | {% if cookiecutter.use_flower == "y" %} 139 | celery-flower: 140 | image: {{cookiecutter.django_project_name}}/app 141 | healthcheck: 142 | test: wget --user "${CELERY_FLOWER_USER}" --password "${CELERY_FLOWER_PASSWORD}" -qO- 127.0.0.1:5555 > /dev/null || exit 1 143 | init: true 144 | restart: unless-stopped 145 | env_file: ./.env 146 | environment: 147 | - DEBUG=off 148 | - FLOWER_TASK_RUNTIME_METRIC_BUCKETS=1,2,3,5,10,20,30,45,60,120,180,240,300,600,inf 149 | command: celery --app={{cookiecutter.django_project_name}} --broker="${CELERY_BROKER_URL}" flower --basic_auth="${CELERY_FLOWER_USER}:${CELERY_FLOWER_PASSWORD}" 150 | depends_on: 151 | - celery-worker 152 | ports: 153 | - 127.0.0.1:5555:5555 154 | logging: 155 | <<: *logging 156 | {% endif %} 157 | {% endif %} 158 | 159 | nginx: 160 | image: 'ghcr.io/reef-technologies/nginx-rt:v1.2.2' 161 | restart: unless-stopped 162 | healthcheck: 163 | test: [ 164 | "CMD-SHELL", 165 | "curl 0.0.0.0:80/alive/ -s --fail -H \"Host: $NGINX_HOST\" -H \"User-Agent: docker-compose-healthcheck\" -o /dev/null || exit 1" 166 | ] 167 | interval: 30s 168 | retries: 5 169 | start_period: 20s 170 | timeout: 10s 171 | environment: 172 | - NGINX_HOST=${NGINX_HOST} 173 | volumes: 174 | - ./nginx/templates:/etc/nginx/templates 175 | - ./nginx/config_helpers:/etc/nginx/config_helpers 176 | - backend-static:/srv/static:ro 177 | - ./media:/srv/media:ro 178 | - ./letsencrypt/etc:/etc/letsencrypt 179 | - ./nginx/monitoring_certs:/etc/monitoring_certs 180 | - gunicorn-socket:/var/run/gunicorn:ro 181 | depends_on: 182 | - app 183 | {% if cookiecutter.monitoring == 'y' %} 184 | - cadvisor 185 | - node-exporter 186 | {% endif %} 187 | command: nginx -g 'daemon off;' 188 | ports: 189 | - 80:80 190 | - 443:443 191 | {% if cookiecutter.monitoring == 'y' %} 192 | - 10443:10443 193 | {% endif %} 194 | logging: 195 | <<: *logging 196 | {% if cookiecutter.monitoring == 'y' %} 197 | extra_hosts: 198 | - "host.docker.internal:host-gateway" 199 | {% endif %} 200 | 201 | {% if cookiecutter.monitoring == 'y' %} 202 | node-exporter: 203 | image: prom/node-exporter:latest 204 | container_name: node-exporter 205 | restart: unless-stopped 206 | network_mode: host 207 | pid: host 208 | volumes: 209 | - /:/host:ro,rslave 210 | - nodeexporter_collectors:/textfile_collectors 211 | 212 | command: 213 | - '--path.rootfs=/host' 214 | - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc|run|boot|var/.+)($$|/)' 215 | {% if cookiecutter.vulnerabilities_scanning == 'y' %} 216 | - '--collector.textfile.directory=textfile_collectors' 217 | {% endif %} 218 | {% if cookiecutter.monitor_tcpstat == 'y' %} 219 | - '--collector.tcpstat' 220 | {% endif %} 221 | logging: 222 | <<: *logging 223 | 224 | cadvisor: 225 | image: gcr.io/cadvisor/cadvisor:v0.40.0 226 | container_name: cadvisor 227 | devices: 228 | - /dev/kmsg:/dev/kmsg 229 | volumes: 230 | - /:/rootfs:ro 231 | - /var/run:/var/run:ro 232 | - /sys:/sys:ro 233 | - /var/lib/docker:/var/lib/docker:ro 234 | - /cgroup:/cgroup:ro 235 | restart: unless-stopped 236 | logging: 237 | <<: *logging 238 | {% endif %} 239 | 240 | {% if cookiecutter.log_aggregating == 'y' %} 241 | alloy: 242 | image: grafana/alloy:v1.8.3 243 | restart: unless-stopped 244 | environment: 245 | - LOKI_URL=${LOKI_URL} 246 | - LOKI_USER=${LOKI_USER} 247 | - LOKI_PASSWORD=${LOKI_PASSWORD} 248 | volumes: 249 | - ./alloy:/etc/alloy 250 | - /var/run/docker.sock:/var/run/docker.sock:ro 251 | - alloy-data:/var/lib/alloy 252 | command: 253 | - run 254 | - --storage.path=/var/lib/alloy/data 255 | - /etc/alloy/config.alloy 256 | logging: 257 | <<: *logging 258 | {% endif %} 259 | {% if cookiecutter.vulnerabilities_scanning == 'y' %} 260 | vulnrelay: 261 | image: 'ghcr.io/reef-technologies/vulnrelay:latest' 262 | container_name: vulnrelay 263 | restart: unless-stopped 264 | env_file: ./.vuln.env 265 | environment: 266 | - METRICS_DIR=/app/metrics 267 | volumes: 268 | - /var/run/docker.sock:/var/run/docker.sock:ro 269 | - nodeexporter_collectors:/app/metrics 270 | logging: 271 | <<: *logging 272 | watchtower: 273 | image: containrrr/watchtower 274 | restart: unless-stopped 275 | volumes: 276 | - /var/run/docker.sock:/var/run/docker.sock:ro 277 | command: --interval 7200 vulnrelay 278 | logging: 279 | <<: *logging 280 | {% endif %} 281 | 282 | volumes: 283 | backend-static: 284 | backups: 285 | gunicorn-socket: 286 | nodeexporter_collectors: 287 | alloy-data: 288 | -------------------------------------------------------------------------------- /noxfile.py: -------------------------------------------------------------------------------- 1 | """ 2 | nox configuration for cookiecutter project template. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import contextlib 8 | import functools 9 | import hashlib 10 | import json 11 | import os 12 | import subprocess 13 | import tempfile 14 | from pathlib import Path 15 | 16 | import nox 17 | 18 | CI = os.environ.get("CI") is not None 19 | 20 | ROOT = Path(".") 21 | PYTHON_VERSIONS = ["3.11"] 22 | PYTHON_DEFAULT_VERSION = PYTHON_VERSIONS[-1] 23 | 24 | # tested default config overrides 25 | CRUFT_TESTED_CONFIG_MATRIX = { 26 | "default": {}, 27 | } 28 | CRUFT_TESTED_CONFIGS = os.getenv("CRUFT_TESTED_CONFIGS", ",".join(CRUFT_TESTED_CONFIG_MATRIX)).split(",") 29 | 30 | nox.options.default_venv_backend = "uv" 31 | nox.options.reuse_existing_virtualenvs = True 32 | 33 | MD_PATHS = ["*.md"] 34 | 35 | 36 | def get_cruft_config(config_name="default", **kw): 37 | with (Path(__file__).parent / "cookiecutter.json").open() as f: 38 | cruft_config = json.load(f) 39 | overrides = CRUFT_TESTED_CONFIG_MATRIX[config_name] 40 | complete_config = {**cruft_config, **overrides, **kw} 41 | config_hash = hashlib.sha256(json.dumps(complete_config, sort_keys=True).encode()).hexdigest() 42 | complete_config["django_project_name"] = f"{config_name}_{config_hash[:8]}" 43 | complete_config["repostory_name"] = complete_config["django_project_name"].replace("_", "-") 44 | return complete_config 45 | 46 | 47 | @contextlib.contextmanager 48 | def with_dirty_commit(session): 49 | """ 50 | Returned context manager will commit changes to the git repository if it is dirty. 51 | 52 | This is needed because tools like `cruft` only use committed changes. 53 | """ 54 | is_dirty = not CI and subprocess.run(["git", "diff", "--quiet"], check=False).returncode 55 | if is_dirty: 56 | with tempfile.TemporaryDirectory(prefix="rt_tmpl_repo") as tmpdir: 57 | session.log(f"Found dirty git repository, temporarily committing changes in {tmpdir}") 58 | subprocess.run(["cp", "-r", ".", tmpdir], check=True) 59 | with session.chdir(tmpdir): 60 | subprocess.run(["git", "add", "-A"], check=True) 61 | subprocess.run(["git", "commit", "-m", "nox: dirty commit"], check=True) 62 | yield 63 | else: 64 | yield 65 | 66 | 67 | @functools.lru_cache 68 | def _list_files() -> list[Path]: 69 | file_list = [] 70 | for cmd in ( 71 | ["git", "ls-files"], 72 | ["git", "ls-files", "--others", "--exclude-standard"], 73 | ): 74 | cmd_result = subprocess.run(cmd, check=True, text=True, capture_output=True) 75 | file_list.extend(cmd_result.stdout.splitlines()) 76 | return [Path(p) for p in file_list] 77 | 78 | 79 | def list_files(suffix: str | None = None) -> list[Path]: 80 | """List all non-files not-ignored by git.""" 81 | file_paths = _list_files() 82 | if suffix is not None: 83 | file_paths = [p for p in file_paths if p.suffix == suffix] 84 | return file_paths 85 | 86 | 87 | def run_readable(session, mode="check"): 88 | session.run( 89 | "docker", 90 | "run", 91 | "--platform", 92 | "linux/amd64", 93 | "--rm", 94 | "-v", 95 | f"{ROOT.absolute()}:/data", 96 | "-w", 97 | "/data", 98 | "ghcr.io/bobheadxi/readable:v0.5.0@sha256:423c133e7e9ca0ac20b0ab298bd5dbfa3df09b515b34cbfbbe8944310cc8d9c9", 99 | mode, 100 | *MD_PATHS, 101 | external=True, 102 | ) 103 | 104 | 105 | def run_shellcheck(session, mode="check"): 106 | shellcheck_cmd = [ 107 | "docker", 108 | "run", 109 | "--platform", 110 | "linux/amd64", # while this image is multi-arch, we cannot use digest with multi-arch images 111 | "--rm", 112 | "-v", 113 | f"{ROOT.absolute()}:/mnt", 114 | "-w", 115 | "/mnt", 116 | "-q", 117 | "koalaman/shellcheck:0.9.0@sha256:a527e2077f11f28c1c1ad1dc784b5bc966baeb3e34ef304a0ffa72699b01ad9c", 118 | ] 119 | 120 | files = list_files(suffix=".sh") 121 | if not files: 122 | session.log("No shell files found") 123 | return 124 | shellcheck_cmd.extend(files) 125 | 126 | if mode == "fmt": 127 | with tempfile.NamedTemporaryFile(mode="w+") as diff_file: 128 | session.run( 129 | *shellcheck_cmd, 130 | "--format=diff", 131 | external=True, 132 | stdout=diff_file, 133 | success_codes=[0, 1], 134 | ) 135 | diff_file.seek(0) 136 | diff = diff_file.read() 137 | if len(diff.splitlines()) > 1: # ignore single-line message 138 | session.log("Applying shellcheck patch:\n%s", diff) 139 | subprocess.run( 140 | ["patch", "-p1"], 141 | input=diff, 142 | text=True, 143 | check=True, 144 | ) 145 | 146 | session.run(*shellcheck_cmd, external=True) 147 | 148 | 149 | @nox.session(name="format", python=PYTHON_DEFAULT_VERSION) 150 | def format_(session): 151 | """Lint the code and apply fixes in-place whenever possible.""" 152 | uv_env = getattr(session.virtualenv, "location", os.getenv("VIRTUAL_ENV")) 153 | session.run_install( 154 | "uv", 155 | "sync", 156 | "--locked", 157 | "--extra", 158 | "format", 159 | env={"UV_PROJECT_ENVIRONMENT": uv_env}, 160 | ) 161 | 162 | session.run("ruff", "check", "--fix", ".") 163 | run_shellcheck(session, mode="fmt") 164 | run_readable(session, mode="fmt") 165 | session.run("ruff", "format", ".") 166 | 167 | 168 | @nox.session(python=PYTHON_DEFAULT_VERSION) 169 | def lint(session): 170 | """Run linters in readonly mode.""" 171 | uv_env = getattr(session.virtualenv, "location", os.getenv("VIRTUAL_ENV")) 172 | session.run_install( 173 | "uv", 174 | "sync", 175 | "--locked", 176 | "--extra", 177 | "lint", 178 | env={"UV_PROJECT_ENVIRONMENT": uv_env}, 179 | ) 180 | 181 | session.run("ruff", "check", "--diff", "--unsafe-fixes", ".") 182 | session.run("codespell", ".") 183 | run_shellcheck(session, mode="check") 184 | run_readable(session, mode="check") 185 | session.run("ruff", "format", "--diff", ".") 186 | 187 | 188 | @contextlib.contextmanager 189 | def crufted_project(session, cruft_config): 190 | uv_env = getattr(session.virtualenv, "location", os.getenv("VIRTUAL_ENV")) 191 | session.run_install( 192 | "uv", 193 | "sync", 194 | "--locked", 195 | "--extra", 196 | "format", # ruff is needed for the formatter hook 197 | env={"UV_PROJECT_ENVIRONMENT": uv_env}, 198 | ) 199 | 200 | tmpdir = crufted_project.tmpdir 201 | if not tmpdir: 202 | session.notify("cleanup_crufted_project") 203 | crufted_project.tmpdir = tmpdir = tempfile.TemporaryDirectory(prefix="rt_crufted_") 204 | tmpdir_path = Path(tmpdir.name) 205 | tmpdir_path.mkdir(exist_ok=True) 206 | 207 | project_path = tmpdir_path / cruft_config["repostory_name"] 208 | if not project_path.exists(): 209 | session.log("Creating project in %s", tmpdir.name) 210 | with with_dirty_commit(session): 211 | session.run( 212 | "cruft", 213 | "create", 214 | ".", 215 | "--output-dir", 216 | str(tmpdir_path), 217 | "--no-input", 218 | "--extra-context", 219 | json.dumps(cruft_config), 220 | ) 221 | with session.chdir(project_path): 222 | session.run("git", "init", external=True) 223 | session.run("./setup-dev.sh", external=True) 224 | 225 | with session.chdir(project_path): 226 | yield project_path 227 | 228 | 229 | crufted_project.tmpdir = None 230 | 231 | 232 | def rm_root_owned(session, dirpath): 233 | assert not ROOT.is_relative_to(dirpath) # sanity check before we nuke dirpath 234 | children = sorted(dirpath.iterdir()) 235 | session.run( 236 | "docker", 237 | "run", 238 | "--rm", 239 | "-v", 240 | f"{dirpath}:/tmpdir/", 241 | "alpine:3.18.0", 242 | "rm", 243 | "-rf", 244 | *[f"/tmpdir/{f.name}" for f in children], 245 | external=True, 246 | ) 247 | 248 | 249 | @contextlib.contextmanager 250 | def docker_up(session): 251 | session.run("docker", "compose", "up", "-d") 252 | try: 253 | yield 254 | finally: 255 | session.run("docker", "compose", "down", "-v", "--remove-orphans") 256 | 257 | 258 | @nox.session(python=PYTHON_DEFAULT_VERSION, tags=["crufted_project"]) 259 | @nox.parametrize("cruft_config_name", CRUFT_TESTED_CONFIGS) 260 | def lint_crufted_project(session, cruft_config_name): 261 | cruft_config = get_cruft_config(cruft_config_name) 262 | with crufted_project(session, cruft_config): 263 | session.run("nox", "-s", "lint") # TODO: RT-49 re-enable 'type_check' 264 | 265 | 266 | @nox.session(python=PYTHON_DEFAULT_VERSION, tags=["crufted_project"]) 267 | @nox.parametrize("cruft_config_name", CRUFT_TESTED_CONFIGS) 268 | def test_crufted_project(session, cruft_config_name): 269 | cruft_config = get_cruft_config(cruft_config_name) 270 | with crufted_project(session, cruft_config): 271 | with docker_up(session): 272 | session.run("nox", "-s", "test") 273 | 274 | 275 | @nox.session(python=PYTHON_DEFAULT_VERSION) 276 | def cleanup_crufted_project(session): 277 | if crufted_project.tmpdir: 278 | # workaround for docker compose creating root-owned files 279 | rm_root_owned(session, Path(crufted_project.tmpdir.name)) 280 | crufted_project.tmpdir.cleanup() 281 | crufted_project.tmpdir = None 282 | --------------------------------------------------------------------------------