├── scalable_airflow
├── dags
│ ├── __init__.py
│ ├── bash_operator_flow.py
│ └── kube_pod_operator_flow.py
├── config
│ ├── __init__.py
│ ├── kube
│ │ ├── git-credentials.secret.yaml
│ │ ├── airflow-role-binding.yaml
│ │ └── cwagent-configmap.yaml
│ ├── docker
│ │ └── Dockerfile
│ ├── helm
│ │ └── charts
│ │ │ ├── airflow-celery.yaml
│ │ │ └── airflow-kubernetes.yaml
│ └── airflow
│ │ └── airflow.cfg
├── requirements.txt
├── .gitignore
├── dockerignore
├── Makefile
├── README.md
└── scripts
│ └── entrypoint.sh
├── cookiecutter.json
├── .gitignore
└── README.md
/scalable_airflow/dags/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/scalable_airflow/config/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/scalable_airflow/requirements.txt:
--------------------------------------------------------------------------------
1 | apache-airflow==1.10.9
--------------------------------------------------------------------------------
/scalable_airflow/config/kube/git-credentials.secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: git-credentials
5 | data:
6 | GIT_SYNC_USERNAME: {{ cookiecutter.git_username_in_base_64 }}
7 | GIT_SYNC_PASSWORD: {{ cookiecutter.git_password_in_base_64 }}
--------------------------------------------------------------------------------
/scalable_airflow/config/kube/airflow-role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: admin-rbac
5 | subjects:
6 | - kind: ServiceAccount
7 | name: airflow-service-account
8 | namespace: airflow
9 | roleRef:
10 | kind: ClusterRole
11 | name: cluster-admin
12 | apiGroup: rbac.authorization.k8s.io
--------------------------------------------------------------------------------
/cookiecutter.json:
--------------------------------------------------------------------------------
1 | {
2 | "airflow_executor": [
3 | "Kubernetes",
4 | "Celery"
5 | ],
6 | "local_airflow_image_name": "xxxxx/airflow",
7 | "airflow_image_repository": "puckel/docker-airflow",
8 | "git_repo_to_sync_dags": "https://github.com/xxxxxx/flows.git",
9 | "git_username_in_base_64": "2NTTi6bOy",
10 | "git_password_in_base_64": "laTQudhlZe2NTTi6bOyV",
11 | "fernet_key": "sS_tNRk3JhlaTQudhlZe2NTTi6bOyVkZTJSHsz4Z13Q="
12 | }
--------------------------------------------------------------------------------
/scalable_airflow/config/kube/cwagent-configmap.yaml:
--------------------------------------------------------------------------------
1 | # create configmap for cwagent config
2 | apiVersion: v1
3 | data:
4 | # Configuration is in Json format. No matter what configure change you make,
5 | # please keep the Json blob valid.
6 | cwagentconfig.json: |
7 | {
8 | "logs": {
9 | "metrics_collected": {
10 | "kubernetes": {
11 | "cluster_name": "test-2",
12 | "metrics_collection_interval": 60
13 | }
14 | },
15 | "force_flush_interval": 5
16 | }
17 | }
18 | kind: ConfigMap
19 | metadata:
20 | name: cwagentconfig
21 | namespace: amazon-cloudwatch
--------------------------------------------------------------------------------
/scalable_airflow/dags/bash_operator_flow.py:
--------------------------------------------------------------------------------
1 | """
2 | Code that goes along with the Airflow located at:
3 | http://airflow.readthedocs.org/en/latest/tutorial.html
4 | """
5 | from airflow import DAG
6 | from airflow.operators.bash_operator import BashOperator
7 | from datetime import datetime, timedelta
8 |
9 |
10 | default_args = {
11 | "owner": "airflow",
12 | "depends_on_past": False,
13 | "start_date": datetime(2015, 6, 1),
14 | "email": ["airflow@airflow.com"],
15 | "email_on_failure": False,
16 | "email_on_retry": False,
17 | "retries": 1,
18 | "retry_delay": timedelta(minutes=5),
19 | }
20 |
21 | dag = DAG("tutorial", default_args=default_args, schedule_interval=timedelta(1))
22 |
23 | # t1, t2 and t3 are examples of tasks created by instantiating operators
24 | t1 = BashOperator(task_id="print_date", bash_command="date", dag=dag)
25 |
26 | t2 = BashOperator(task_id="sleep", bash_command="sleep 5", retries=3, dag=dag)
27 |
28 | templated_command = """
29 | {% for i in range(5) %}
30 | echo "{{ ds }}"
31 | echo "{{ macros.ds_add(ds, 7)}}"
32 | echo "{{ params.my_param }}"
33 | {% endfor %}
34 | """
35 |
36 | t3 = BashOperator(
37 | task_id="templated",
38 | bash_command=templated_command,
39 | params={"my_param": "Parameter I passed in"},
40 | dag=dag,
41 | )
42 |
43 | t2.set_upstream(t1)
44 | t3.set_upstream(t1)
--------------------------------------------------------------------------------
/scalable_airflow/.gitignore:
--------------------------------------------------------------------------------
1 | resources/*
2 | staticfiles/*
3 | # Byte-compiled / optimized / DLL files
4 | __pycache__/
5 | *.py[cod]
6 | *$py.class
7 |
8 | # C extensions
9 | *.so
10 |
11 | # Distribution / packaging
12 | .Python
13 | build/
14 | develop-eggs/
15 | dist/
16 | downloads/
17 | eggs/
18 | .eggs/
19 | lib/
20 | lib64/
21 | parts/
22 | sdist/
23 | var/
24 | wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | .hypothesis/
50 | .pytest_cache/
51 |
52 | # Translations
53 | *.mo
54 | *.pot
55 |
56 | # Django stuff:
57 | *.log
58 | local_settings.py
59 | db.sqlite3
60 |
61 | # Flask stuff:
62 | instance/
63 | .webassets-cache
64 |
65 | # Scrapy stuff:
66 | .scrapy
67 |
68 | # Sphinx documentation
69 | docs/_build/
70 |
71 | # PyBuilder
72 | target/
73 |
74 | # Jupyter Notebook
75 | .ipynb_checkpoints
76 |
77 | # pyenv
78 | .python-version
79 |
80 | # celery beat schedule file
81 | celerybeat-schedule
82 |
83 | # SageMath parsed files
84 | *.sage.py
85 |
86 | # Environments
87 | .env
88 | .venv
89 | env/
90 | venv/
91 | ENV/
92 | env.bak/
93 | venv.bak/
94 |
95 | # Spyder project settings
96 | .spyderproject
97 | .spyproject
98 |
99 | # Rope project settings
100 | .ropeproject
101 |
102 | # mkdocs documentation
103 | /site
104 |
105 | # mypy
106 | .mypy_cache/
107 |
108 | # DS Store
109 | .DS_Store
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/*
2 | resources/*
3 | staticfiles/*
4 | # Byte-compiled / optimized / DLL files
5 | __pycache__/
6 | *.py[cod]
7 | *$py.class
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | build/
15 | develop-eggs/
16 | dist/
17 | downloads/
18 | eggs/
19 | .eggs/
20 | lib/
21 | lib64/
22 | parts/
23 | sdist/
24 | var/
25 | wheels/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 | MANIFEST
30 |
31 | # PyInstaller
32 | # Usually these files are written by a python script from a template
33 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
34 | *.manifest
35 | *.spec
36 |
37 | # Installer logs
38 | pip-log.txt
39 | pip-delete-this-directory.txt
40 |
41 | # Unit test / coverage reports
42 | htmlcov/
43 | .tox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | .hypothesis/
51 | .pytest_cache/
52 |
53 | # Translations
54 | *.mo
55 | *.pot
56 |
57 | # Django stuff:
58 | *.log
59 | local_settings.py
60 | db.sqlite3
61 |
62 | # Flask stuff:
63 | instance/
64 | .webassets-cache
65 |
66 | # Scrapy stuff:
67 | .scrapy
68 |
69 | # Sphinx documentation
70 | docs/_build/
71 |
72 | # PyBuilder
73 | target/
74 |
75 | # Jupyter Notebook
76 | .ipynb_checkpoints
77 |
78 | # pyenv
79 | .python-version
80 |
81 | # celery beat schedule file
82 | celerybeat-schedule
83 |
84 | # SageMath parsed files
85 | *.sage.py
86 |
87 | # Environments
88 | .env
89 | .venv
90 | env/
91 | venv/
92 | ENV/
93 | env.bak/
94 | venv.bak/
95 |
96 | # Spyder project settings
97 | .spyderproject
98 | .spyproject
99 |
100 | # Rope project settings
101 | .ropeproject
102 |
103 | # mkdocs documentation
104 | /site
105 |
106 | # mypy
107 | .mypy_cache/
108 |
109 | # DS Store
110 | .DS_Store
111 | /.idea/dbnavigator.xml
112 |
--------------------------------------------------------------------------------
/scalable_airflow/dockerignore:
--------------------------------------------------------------------------------
1 |
2 | ### Python ###
3 | # Byte-compiled / optimized / DLL files
4 | __pycache__/
5 | *.py[cod]
6 | *$py.class
7 |
8 |
9 | *.json
10 | **/virtualenv/
11 |
12 |
13 | # C extensions
14 | *.so
15 |
16 | # Distribution / packaging
17 | .git
18 | *.zip
19 | .Python
20 | env/
21 | build/
22 | develop-eggs/
23 | dist/
24 | downloads/
25 | eggs/
26 | .eggs/
27 | lib/
28 | lib64/
29 | parts/
30 | sdist/
31 | var/
32 | *.egg-info/
33 | .installed.cfg
34 | *.egg
35 |
36 | # PyInstaller
37 | # Usually these files are written by a python script from a template
38 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
39 | *.manifest
40 | *.spec
41 |
42 | # Installer logs
43 | pip-log.txt
44 | pip-delete-this-directory.txt
45 |
46 | # Unit test / coverage reports
47 | htmlcov/
48 | .tox/
49 | .coverage
50 | .coverage.*
51 | .cache
52 | nosetests.xml
53 | coverage.xml
54 | *,cover
55 | .hypothesis/
56 |
57 | # Translations
58 | *.mo
59 | *.pot
60 |
61 | # Django stuff:
62 | *.log
63 | local_settings.py
64 |
65 | # Flask stuff:
66 | instance/
67 | .webassets-cache
68 |
69 | # Scrapy stuff:
70 | .scrapy
71 |
72 | # Sphinx documentation
73 | docs/_build/
74 |
75 | # PyBuilder
76 | target/
77 |
78 | # IPython Notebook
79 | .ipynb_checkpoints
80 |
81 | # pyenv
82 | .python-version
83 |
84 | # celery beat schedule file
85 | celerybeat-schedule
86 |
87 | # dotenv
88 | .env
89 |
90 | # virtualenv
91 | virtualenv/
92 | .venv/
93 | venv/
94 | ENV/
95 |
96 | # Spyder project settings
97 | .spyderproject
98 |
99 | # Rope project settings
100 | .ropeproject
101 |
102 |
103 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
104 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
105 |
106 | # User-specific stuff:
107 | .idea
108 | .idea/workspace.xml
109 | .idea/tasks.xml
110 |
111 | # Sensitive or high-churn files:
112 | .idea/dataSources.ids
113 | .idea/dataSources.xml
114 | .idea/dataSources.local.xml
115 | .idea/sqlDataSources.xml
116 | .idea/dynamic.xml
117 | .idea/uiDesigner.xml
118 |
119 | # Gradle:
120 | .idea/gradle.xml
121 | .idea/libraries
122 |
123 | # Mongo Explorer plugin:
124 | .idea/mongoSettings.xml
125 |
126 | ## File-based project format:
127 | *.iws
128 |
129 | ## Plugin-specific files:
130 |
131 | # IntelliJ
132 | /out/
133 |
134 | # mpeltonen/sbt-idea plugin
135 | .idea_modules/
136 |
137 | # JIRA plugin
138 | atlassian-ide-plugin.xml
139 |
140 | # Crashlytics plugin (for Android Studio and IntelliJ)
141 | com_crashlytics_export_strings.xml
142 | crashlytics.properties
143 | crashlytics-build.properties
144 | fabric.properties
--------------------------------------------------------------------------------
/scalable_airflow/config/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.7-slim-buster
2 | LABEL maintainer="Puckel_"
3 |
4 | # Never prompt the user for choices on installation/configuration of packages
5 | ENV DEBIAN_FRONTEND noninteractive
6 | ENV TERM linux
7 |
8 | # Airflow
9 | ARG AIRFLOW_VERSION=1.10.9
10 | ARG AIRFLOW_USER_HOME=/usr/local/airflow/
11 | ARG AIRFLOW_DEPS="async,aws,crypto,google_auth,postgres,slack,kubernetes"
12 | ARG PYTHON_DEPS=""
13 | ENV AIRFLOW_HOME=${AIRFLOW_USER_HOME}
14 |
15 | # Define en_US.
16 | ENV LANGUAGE en_US.UTF-8
17 | ENV LANG en_US.UTF-8
18 | ENV LC_ALL en_US.UTF-8
19 | ENV LC_CTYPE en_US.UTF-8
20 | ENV LC_MESSAGES en_US.UTF-8
21 |
22 | # Disable noisy "Handling signal" log messages:
23 | ENV GUNICORN_CMD_ARGS --log-level WARNING
24 |
25 | WORKDIR ${AIRFLOW_USER_HOME}
26 | RUN set -ex \
27 | && buildDeps=' \
28 | freetds-dev \
29 | libkrb5-dev \
30 | libsasl2-dev \
31 | libssl-dev \
32 | libffi-dev \
33 | libpq-dev \
34 | git \
35 | ' \
36 | && apt-get update -yqq \
37 | && apt-get upgrade -yqq \
38 | && apt-get install -yqq --no-install-recommends \
39 | $buildDeps \
40 | freetds-bin \
41 | build-essential \
42 | default-libmysqlclient-dev \
43 | apt-utils \
44 | curl \
45 | rsync \
46 | netcat \
47 | locales \
48 | && sed -i 's/^# en_US.UTF-8 UTF-8$/en_US.UTF-8 UTF-8/g' /etc/locale.gen \
49 | && locale-gen \
50 | && update-locale LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 \
51 | && useradd -ms /bin/bash -d ${AIRFLOW_USER_HOME} airflow \
52 | && pip install -U pip setuptools wheel \
53 | && pip install pytz \
54 | && pip install pyOpenSSL \
55 | && pip install ndg-httpsclient \
56 | && pip install pyasn1 \
57 | && pip install apache-airflow[crypto,celery,postgres,hive,jdbc,mysql,ssh${AIRFLOW_DEPS:+,}${AIRFLOW_DEPS}]==${AIRFLOW_VERSION} \
58 | && pip install 'redis==3.2' \
59 | && if [ -n "${PYTHON_DEPS}" ]; then pip install ${PYTHON_DEPS}; fi \
60 | && apt-get purge --auto-remove -yqq $buildDeps \
61 | && apt-get autoremove -yqq --purge \
62 | && apt-get clean \
63 | && rm -rf \
64 | /var/lib/apt/lists/* \
65 | /tmp/* \
66 | /var/tmp/* \
67 | /usr/share/man \
68 | /usr/share/doc \
69 | /usr/share/doc-base
70 |
71 | COPY scripts/entrypoint.sh /entrypoint.sh
72 | COPY requirements.txt ./
73 | RUN mkdir -p dags
74 | COPY dags $AIRFLOW_HOME/dags
75 | RUN pip install -r requirements.txt
76 | #COPY config/airflow/airflow.cfg ${AIRFLOW_USER_HOME}/airflow.cfg
77 |
78 | RUN chown -R airflow: ${AIRFLOW_USER_HOME}
79 |
80 | EXPOSE 8080 5555 8793
81 |
82 | USER airflow
83 | WORKDIR ${AIRFLOW_USER_HOME}
84 | ENTRYPOINT ["/entrypoint.sh"]
85 | CMD ["webserver"]
--------------------------------------------------------------------------------
/scalable_airflow/dags/kube_pod_operator_flow.py:
--------------------------------------------------------------------------------
1 | from datetime import datetime, timedelta
2 |
3 | from airflow import DAG
4 | from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
5 |
6 | default_args = {
7 | "owner": "airflow",
8 | "depends_on_past": False,
9 | "start_date": datetime(2015, 6, 1),
10 | "email": ["airflow@airflow.com"],
11 | "email_on_failure": False,
12 | "email_on_retry": False,
13 | "retries": 1,
14 | "retry_delay": timedelta(minutes=5),
15 | }
16 |
17 | company_onboarding = DAG('kube-operator',
18 | default_args=default_args,
19 | schedule_interval=timedelta(days=1))
20 | with company_onboarding:
21 | t1 = KubernetesPodOperator(namespace='airflow',
22 | image="ubuntu:16.04",
23 | cmds=["bash", "-cx"],
24 | arguments=["echo", "hello world"],
25 | labels={'runner': 'airflow'},
26 | name="pod1",
27 | task_id='pod1',
28 | is_delete_operator_pod=True,
29 | hostnetwork=False,
30 | )
31 |
32 | t2 = KubernetesPodOperator(namespace='airflow',
33 | image="ubuntu:16.04",
34 | cmds=["bash", "-cx"],
35 | arguments=["echo", "hello world"],
36 | labels={'runner': 'airflow'},
37 | name="pod2",
38 | task_id='pod2',
39 | is_delete_operator_pod=True,
40 | hostnetwork=False,
41 | )
42 |
43 | t3 = KubernetesPodOperator(namespace='airflow',
44 | image="ubuntu:16.04",
45 | cmds=["bash", "-cx"],
46 | arguments=["echo", "hello world"],
47 | labels={'runner': 'airflow'},
48 | name="pod3",
49 | task_id='pod3',
50 | is_delete_operator_pod=True,
51 | hostnetwork=False,
52 | )
53 |
54 | t4 = KubernetesPodOperator(namespace='airflow',
55 | image="ubuntu:16.04",
56 | cmds=["bash", "-cx"],
57 | arguments=["echo", "hello world"],
58 | labels={'runner': 'airflow'},
59 | name="pod4",
60 | task_id='pod4',
61 | is_delete_operator_pod=True,
62 | hostnetwork=False,
63 | )
64 |
65 | company_onboarding.doc_md = __doc__
66 |
67 | t1 >> [t2, t3] >> t4
68 |
--------------------------------------------------------------------------------
/scalable_airflow/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: build push deploy ns cwagent fluentd destroy restart kubedash ui clean
2 |
3 | #################################################################################
4 | # GLOBALS #
5 | #################################################################################
6 |
7 | ##################################s###############################################
8 | # COMMANDS #
9 | #################################################################################
10 |
11 | build:
12 | docker build -t {{cookiecutter.local_airflow_image_name}}:latest -f config/docker/Dockerfile .
13 |
14 | push:
15 | aws ecr get-login --no-include-email --region us-west-1 --no-verify-ssl | echo $($0)
16 | docker tag {{cookiecutter.local_airflow_image_name}}:latest {{cookiecutter.airflow_image_repository}}
17 | docker push {{cookiecutter.airflow_image_repository}}
18 |
19 | ns:
20 | kubectl create namespace airflow;
21 |
22 | deploy:
23 | kubectl apply -f config/kube/git-credentials.secret.yaml --namespace airflow
24 | kubectl apply -f config/kube/airflow-role-binding.yaml --namespace airflow
25 | helm install -f config/helm/charts/airflow-{{ cookiecutter.airflow_executor.lower() }}.yaml --namespace airflow airflow stable/airflow
26 |
27 | cwagent:
28 | kubectl apply -f https://raw.githubusercontent.com/aws-samples/amazon-cloudwatch-container-insights/latest/k8s-deployment-manifest-templates/deployment-mode/daemonset/container-insights-monitoring/cloudwatch-namespace.yaml
29 | kubectl apply -f https://raw.githubusercontent.com/aws-samples/amazon-cloudwatch-container-insights/latest/k8s-deployment-manifest-templates/deployment-mode/daemonset/container-insights-monitoring/cwagent/cwagent-serviceaccount.yaml
30 | kubectl apply -f config/kube/cwagent-configmap.yaml
31 | kubectl apply -f https://raw.githubusercontent.com/aws-samples/amazon-cloudwatch-container-insights/latest/k8s-deployment-manifest-templates/deployment-mode/daemonset/container-insights-monitoring/cwagent/cwagent-daemonset.yaml
32 |
33 | fluentd:
34 | kubectl apply -f https://raw.githubusercontent.com/aws-samples/amazon-cloudwatch-container-insights/latest/k8s-deployment-manifest-templates/deployment-mode/daemonset/container-insights-monitoring/cloudwatch-namespace.yaml
35 | kubectl create configmap cluster-info --from-literal=cluster.name=test-2 --from-literal=logs.region=us-west-2 -n amazon-cloudwatch
36 | kubectl apply -f https://raw.githubusercontent.com/aws-samples/amazon-cloudwatch-container-insights/latest/k8s-deployment-manifest-templates/deployment-mode/daemonset/container-insights-monitoring/fluentd/fluentd.yaml
37 |
38 | destroy:
39 | kubectl delete namespace airflow
40 |
41 | restart:
42 | make destroy
43 | make ns
44 | make deploy
45 |
46 | kubedash:
47 | kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep eks-admin | awk '{print $1}')
48 | kubectl proxy
49 |
50 | ui:
51 | kubectl -n airflow port-forward $(pod) 8080:8080
52 |
53 | clean:
54 | docker stop $(docker ps -a -q)
55 | docker rm -v $(docker ps -a -q)
56 | docker rmi $(docker images -a -q)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Scalable Airflow Setup Template
2 | ##### This repo's goal is to get you going fast and scalable with your Airflow on Kubernetes Setup.
3 |
4 | # Features
5 |
6 | :baby: **Easy Setup**: Using cookiecutter to fill in the blanks.
7 |
8 | :fire: **Disposable Infrastructure**: Using helm and some premade commands, we can destroy and re-deploy the entire infrastructure easily.
9 |
10 | :rocket: **Cost-Efficient**: We use kubernetes as the tasks' engine. Airflow scheduler will run each task on a new pod and delete it upon completion. Allowing us to scale according to workload using the minimal amount of resources.
11 |
12 | :nut_and_bolt: **Decoupled Executor**: Another great advantage of using Kubernetes as the task runner is - decoupling orchestration from execution. You can read more about it in [We're All Using Airflow Wrong and How to Fix It](https://medium.com/bluecore-engineering/were-all-using-airflow-wrong-and-how-to-fix-it-a56f14cb0753).
13 |
14 | :runner: **Dynamically Updated Workflows**: We use Git-Sync containers. Those will allow us to update the workflows using git alone. No need to redeploy Airflow on each workflow change.
15 |
16 |
17 | ## Installation
18 |
19 | ```console
20 | $ cookiecutter https://github.com/talperetz/scalable-airflow-template
21 |
22 | ```
23 |
24 | ### Cookicutter Options Explained
25 | * airflow_executor: You can use Kubernetes for execution with both Celery and Kubernetes as executors. To learn more checkout [Scale Your Data Pipelines with Airflow and Kubernetes](https://medium.com/@talperetz24)
26 | * local_airflow_image_name: image name. required if you want to build your own Airflow image.
27 | * airflow_image_repository: ECR repository link. required if you want to build your own Airflow image.
28 | * git_repo_to_sync_dags: link to the scalable_airflow repository with your new workflows on github.
29 | * git_username_in_base_64:
30 | You can convert strings to base64 via shell with:
31 | ```console
32 | $ echo -n "github_username" | base64
33 | ```
34 | * git_password_in_base_64:
35 | You can convert strings to base64 via shell with:
36 | ```console
37 | $ echo -n "github_password" | base64
38 | ```
39 | * fernet_key:
40 | You can fill fernet_key option with the response from this command:
41 | ```console
42 | $ python -c "from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())"
43 | ```
44 |
45 |
46 | ## Usage
47 |
48 | ### Prerequisites
49 | ```console
50 | $ brew install kubectl
51 | ```
52 | ```console
53 | $ brew install helm
54 | ```
55 | * make sure your [kubectl context is configured to your EKS cluster](https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html).
56 |
57 | for custom Airflow image you'll also need:
58 | [Kubernetes cluster set with autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html)
59 | [ECR Repository for the docker image](https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-create.html)
60 |
61 | It is also recommended to [set up Kubernetes Dashboard](https://aws.amazon.com/premiumsupport/knowledge-center/eks-cluster-kubernetes-dashboard/)
62 |
63 | ### Default Airflow Image
64 | ```console
65 | $ make deploy
66 | ```
67 | At this point you should see the stack deployed to kubernetes.
68 | To see Airflow's UI:
69 |
70 | ```console
71 | $ make ui pod=[webserver-pod-name]
72 | ```
73 | ### Custom Airflow Image
74 | After changing the config/docker/Dockerfile and scripts/entrypoint.sh
75 | Build your custom airflow image
76 | ```console
77 | $ make build
78 | ```
79 | Push to ECR
80 | ```console
81 | $ make push
82 | ```
83 | Deploy to Kubernetes
84 | ```console
85 | $ make deploy
86 | ```
87 | To see Airflow's UI:
88 |
89 | ```console
90 | $ make ui pod=[webserver-pod-name]
91 | ```
92 | ---
93 |
94 | ## Fine Tuning The Setup
95 |
96 | This template uses:
97 |
98 |
99 | **Airflow Helm Chart**: Airflow stable helm chart
100 |
101 | **Docker Image**: https://github.com/puckel/docker-airflow
102 |
103 | for more details and fine tuning of the setup please refer to the links above.
104 |
105 | ---
--------------------------------------------------------------------------------
/scalable_airflow/README.md:
--------------------------------------------------------------------------------
1 | # Scalable Airflow Setup Template
2 | ##### This repo's goal is to get you going fast and scalable with your Airflow on Kubernetes Setup.
3 |
4 | # Features
5 |
6 | :baby: **Easy Setup**: Using cookiecutter to fill in the blanks.
7 |
8 | :fire: **Disposable Infrastructure**: Using helm and some premade commands, we can destroy and re-deploy the entire infrastructure easily.
9 |
10 | :rocket: **Cost-Efficient**: We use kubernetes as the tasks' engine. Airflow scheduler will run each task on a new pod and delete it upon completion. Allowing us to scale according to workload using the minimal amount of resources.
11 |
12 | :nut_and_bolt: **Decoupled Executor**: Another great advantage of using Kubernetes as the task runner is - decoupling orchestration from execution. You can read more about it in [We're All Using Airflow Wrong and How to Fix It](https://medium.com/bluecore-engineering/were-all-using-airflow-wrong-and-how-to-fix-it-a56f14cb0753).
13 |
14 | :runner: **Dynamically Updated Workflows**: We use Git-Sync containers. Those will allow us to update the workflows using git alone. No need to redeploy Airflow on each workflow change.
15 |
16 |
17 | ## Installation
18 |
19 | ```console
20 | $ cookiecutter https://github.com/talperetz/airflow-kube-template
21 |
22 | ```
23 |
24 | ### Cookicutter Options Explained
25 | * airflow_executor: You can use Kubernetes for execution with both Celery and Kubernetes as executors. To learn more checkout [Scale Your Data Pipelines with Airflow and Kubernetes](https://medium.com/@talperetz24)
26 | * local_airflow_image_name: image name. required if you want to build your own Airflow image.
27 | * airflow_image_repository: ECR repository link. required if you want to build your own Airflow image.
28 | * git_repo_to_sync_dags: link to the scalable_airflow repository with your new workflows on github.
29 | * git_username_in_base_64:
30 | You can convert strings to base64 via shell with:
31 | ```console
32 | $ echo -n "github_username" | base64
33 | ```
34 | * git_password_in_base_64:
35 | You can convert strings to base64 via shell with:
36 | ```console
37 | $ echo -n "github_password" | base64
38 | ```
39 | * fernet_key:
40 | You can fill fernet_key option with the response from this command:
41 | ```console
42 | $ python -c "from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())"
43 | ```
44 |
45 |
46 | ## Usage
47 |
48 | ### Prerequisites
49 | ```console
50 | $ brew install kubectl
51 | ```
52 | ```console
53 | $ brew install helm
54 | ```
55 | * make sure your [kubectl context is configured to your EKS cluster](https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html).
56 |
57 | for custom Airflow image you'll also need:
58 | [Kubernetes cluster set with autoscaler](https://docs.aws.amazon.com/eks/latest/userguide/cluster-autoscaler.html)
59 | [ECR Repository for the docker image](https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-create.html)
60 |
61 | It is also recommended to [set up Kubernetes Dashboard](https://aws.amazon.com/premiumsupport/knowledge-center/eks-cluster-kubernetes-dashboard/)
62 |
63 | ### Default Airflow Image
64 | ```console
65 | $ make deploy
66 | ```
67 | At this point you should see the stack deployed to kubernetes.
68 | To see Airflow's UI:
69 |
70 | ```console
71 | $ make ui pod=[webserver-pod-name]
72 | ```
73 | ### Custom Airflow Image
74 | After changing the config/docker/Dockerfile and scripts/entrypoint.sh
75 | Build your custom airflow image
76 | ```console
77 | $ make build
78 | ```
79 | Push to ECR
80 | ```console
81 | $ make push
82 | ```
83 | Deploy to Kubernetes
84 | ```console
85 | $ make deploy
86 | ```
87 | To see Airflow's UI:
88 |
89 | ```console
90 | $ make ui pod=[webserver-pod-name]
91 | ```
92 | ---
93 |
94 | ## Fine Tuning The Setup
95 |
96 | This template uses:
97 |
98 |
99 | **Airflow Helm Chart**: Airflow stable helm chart
100 |
101 | **Docker Image**: https://github.com/puckel/docker-airflow
102 |
103 | for more details and fine tuning of the setup please refer to the links above.
104 |
105 | ---
--------------------------------------------------------------------------------
/scalable_airflow/scripts/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # User-provided configuration must always be respected.
4 | #
5 | # Therefore, this script must only derives Airflow AIRFLOW__ variables from other variables
6 | # when the user did not provide their own configuration.
7 |
8 | TRY_LOOP="20"
9 |
10 | # Global defaults and back-compat
11 | : "${AIRFLOW_HOME:="/usr/local/airflow"}"
12 | : "${AIRFLOW__CORE__FERNET_KEY:=${FERNET_KEY:=$(python -c "from cryptography.fernet import Fernet; FERNET_KEY = Fernet.generate_key().decode(); print(FERNET_KEY)")}}"
13 | : "${AIRFLOW__CORE__EXECUTOR:=${EXECUTOR:-Sequential}Executor}"
14 |
15 |
16 | export \
17 | AIRFLOW_HOME \
18 | AIRFLOW__CORE__EXECUTOR \
19 | AIRFLOW__CORE__FERNET_KEY \
20 | AIRFLOW__CORE__LOAD_EXAMPLES \
21 |
22 | # Install custom python package if requirements.txt is present
23 | if [ -e "/requirements.txt" ]; then
24 | $(command -v pip) install --user -r /requirements.txt
25 | fi
26 |
27 | wait_for_port() {
28 | local name="$1" host="$2" port="$3"
29 | local j=0
30 | while ! nc -z "$host" "$port" >/dev/null 2>&1 < /dev/null; do
31 | j=$((j+1))
32 | if [ $j -ge $TRY_LOOP ]; then
33 | echo >&2 "$(date) - $host:$port still not reachable, giving up"
34 | exit 1
35 | fi
36 | echo "$(date) - waiting for $name... $j/$TRY_LOOP"
37 | sleep 5
38 | done
39 | }
40 |
41 | # Other executors than SequentialExecutor drive the need for an SQL database, here PostgreSQL is used
42 | if [ "$AIRFLOW__CORE__EXECUTOR" != "SequentialExecutor" ]; then
43 | # Check if the user has provided explicit Airflow configuration concerning the database
44 | if [ -z "$AIRFLOW__CORE__SQL_ALCHEMY_CONN" ]; then
45 | # Default values corresponding to the default compose files
46 | : "${POSTGRES_HOST:="postgres"}"
47 | : "${POSTGRES_PORT:="5432"}"
48 | : "${POSTGRES_USER:="airflow"}"
49 | : "${POSTGRES_PASSWORD:="airflow"}"
50 | : "${POSTGRES_DB:="airflow"}"
51 | : "${POSTGRES_EXTRAS:-""}"
52 |
53 | AIRFLOW__CORE__SQL_ALCHEMY_CONN="postgresql+psycopg2://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}${POSTGRES_EXTRAS}"
54 | export AIRFLOW__CORE__SQL_ALCHEMY_CONN
55 |
56 | # Check if the user has provided explicit Airflow configuration for the broker's connection to the database
57 | if [ "$AIRFLOW__CORE__EXECUTOR" = "CeleryExecutor" ]; then
58 | AIRFLOW__CELERY__RESULT_BACKEND="db+postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}${POSTGRES_EXTRAS}"
59 | export AIRFLOW__CELERY__RESULT_BACKEND
60 | fi
61 | else
62 | if [[ "$AIRFLOW__CORE__EXECUTOR" == "CeleryExecutor" && -z "$AIRFLOW__CELERY__RESULT_BACKEND" ]]; then
63 | >&2 printf '%s\n' "FATAL: if you set AIRFLOW__CORE__SQL_ALCHEMY_CONN manually with CeleryExecutor you must also set AIRFLOW__CELERY__RESULT_BACKEND"
64 | exit 1
65 | fi
66 |
67 | # Derive useful variables from the AIRFLOW__ variables provided explicitly by the user
68 | POSTGRES_ENDPOINT=$(echo -n "$AIRFLOW__CORE__SQL_ALCHEMY_CONN" | cut -d '/' -f3 | sed -e 's,.*@,,')
69 | POSTGRES_HOST=$(echo -n "$POSTGRES_ENDPOINT" | cut -d ':' -f1)
70 | POSTGRES_PORT=$(echo -n "$POSTGRES_ENDPOINT" | cut -d ':' -f2)
71 | fi
72 |
73 | wait_for_port "Postgres" "$POSTGRES_HOST" "$POSTGRES_PORT"
74 | fi
75 |
76 | # CeleryExecutor drives the need for a Celery broker, here Redis is used
77 | if [ "$AIRFLOW__CORE__EXECUTOR" = "CeleryExecutor" ]; then
78 | # Check if the user has provided explicit Airflow configuration concerning the broker
79 | if [ -z "$AIRFLOW__CELERY__BROKER_URL" ]; then
80 | # Default values corresponding to the default compose files
81 | : "${REDIS_PROTO:="redis://"}"
82 | : "${REDIS_HOST:="redis"}"
83 | : "${REDIS_PORT:="6379"}"
84 | : "${REDIS_PASSWORD:=""}"
85 | : "${REDIS_DBNUM:="1"}"
86 |
87 | # When Redis is secured by basic auth, it does not handle the username part of basic auth, only a token
88 | if [ -n "$REDIS_PASSWORD" ]; then
89 | REDIS_PREFIX=":${REDIS_PASSWORD}@"
90 | else
91 | REDIS_PREFIX=
92 | fi
93 |
94 | AIRFLOW__CELERY__BROKER_URL="${REDIS_PROTO}${REDIS_PREFIX}${REDIS_HOST}:${REDIS_PORT}/${REDIS_DBNUM}"
95 | export AIRFLOW__CELERY__BROKER_URL
96 | else
97 | # Derive useful variables from the AIRFLOW__ variables provided explicitly by the user
98 | REDIS_ENDPOINT=$(echo -n "$AIRFLOW__CELERY__BROKER_URL" | cut -d '/' -f3 | sed -e 's,.*@,,')
99 | REDIS_HOST=$(echo -n "$POSTGRES_ENDPOINT" | cut -d ':' -f1)
100 | REDIS_PORT=$(echo -n "$POSTGRES_ENDPOINT" | cut -d ':' -f2)
101 | fi
102 |
103 | wait_for_port "Redis" "$REDIS_HOST" "$REDIS_PORT"
104 | fi
105 |
106 | case "$1" in
107 | webserver)
108 | airflow initdb
109 | if [ "$AIRFLOW__CORE__EXECUTOR" = "LocalExecutor" ] || [ "$AIRFLOW__CORE__EXECUTOR" = "SequentialExecutor" ]; then
110 | # With the "Local" and "Sequential" executors it should all run in one container.
111 | airflow scheduler &
112 | fi
113 | exec airflow webserver
114 | ;;
115 | worker|scheduler)
116 | # Give the webserver time to run initdb.
117 | sleep 10
118 | exec airflow "$@"
119 | ;;
120 | flower)
121 | sleep 10
122 | exec airflow "$@"
123 | ;;
124 | version)
125 | exec airflow "$@"
126 | ;;
127 | *)
128 | # The command is something like bash, not an airflow subcommand. Just run it in the right environment.
129 | exec "$@"
130 | ;;
131 | esac
--------------------------------------------------------------------------------
/scalable_airflow/config/helm/charts/airflow-celery.yaml:
--------------------------------------------------------------------------------
1 | #`dags_volume_claim` or `dags_volume_host` or `dags_in_image` or `git_repo and git_branch and git_dags_folder_mount_point`
2 | airflow:
3 | extraConfigmapMounts: []
4 | extraEnv:
5 | fernetKey: "{{cookiecutter.fernet_key}}"
6 | service:
7 | annotations: {}
8 | sessionAffinity: "None"
9 | sessionAffinityConfig: {}
10 | type: ClusterIP
11 | externalPort: 8080
12 | nodePort:
13 | http:
14 | executor: Celery
15 | initRetryLoop: 3
16 | image:
17 | repository: {{cookiecutter.airflow_image_repository}}
18 | tag: latest
19 | pullPolicy: Always
20 | pullSecret:
21 | schedulerNumRuns: "-1"
22 | schedulerDoPickle: true
23 | webReplicas: 1
24 | config:
25 | AIRFLOW__CORE__EXECUTOR: CeleryExecutor
26 | AIRFLOW__CORE__LOAD_EXAMPLES: true
27 | AIRFLOW__WEBSERVER__EXPOSE_CONFIG: True
28 | AIRFLOW__WEBSERVER__WORKERS: 10
29 | AIRFLOW__KUBERNETES__WORKER_CONTAINER_REPOSITORY: {{cookiecutter.airflow_image_repository}}
30 | AIRFLOW__KUBERNETES__WORKER_CONTAINER_TAG: latest
31 | AIRFLOW__KUBERNETES__WORKER_CONTAINER_IMAGE_PULL_POLICY: Always
32 | AIRFLOW__KUBERNETES__NAMESPACE: airflow
33 | AIRFLOW__KUBERNETES__DAGS_VOLUME_CLAIM: airflow-dags
34 | AIRFLOW__KUBERNETES__GIT_REPO: {{cookiecutter.git_repo_to_sync_dags}}
35 | AIRFLOW__KUBERNETES__GIT_BRANCH: master
36 | AIRFLOW__KUBERNETES__GIT_SUBPATH: dags
37 | AIRFLOW__KUBERNETES__GIT_SYNC_CREDENTIALS_SECRET: git-credentials
38 | AIRFLOW__KUBERNETES__GIT_SYNC_CONTAINER_REPOSITORY: k8s.gcr.io/git-sync
39 | AIRFLOW__KUBERNETES__GIT_SYNC_CONTAINER_TAG: v3.1.1
40 | AIRFLOW__KUBERNETES__GIT_SYNC_INIT_CONTAINER_NAME: git-sync-clone
41 | AIRFLOW__KUBERNETES__GIT_DAGS_FOLDER_MOUNT_POINT: /usr/local/airflow/dags
42 |
43 | podDisruptionBudgetEnabled: true
44 | podDisruptionBudget:
45 | maxUnavailable: 1
46 | connections: []
47 | variables: {}
48 | pools: {}
49 | podAnnotations: {}
50 | extraInitContainers: []
51 | extraContainers:
52 | - name: git-sync-clone
53 | image: k8s.gcr.io/git-sync:v3.1.2
54 | imagePullPolicy: IfNotPresent
55 | envFrom:
56 | - configMapRef:
57 | name: airflow-gitsync
58 | - secretRef:
59 | name: git-credentials
60 | volumeMounts:
61 | - mountPath: /git
62 | name: airflow-dags
63 | extraVolumeMounts:
64 | - name: airflow-dags
65 | mountPath: /usr/local/airflow/dags
66 | readOnly: true
67 | extraVolumes:
68 | - name: airflow-dags
69 | emptyDir: {}
70 | initdb: true
71 |
72 |
73 | scheduler:
74 | resources: {}
75 | labels: {}
76 | podAnnotations: {}
77 | annotations: {}
78 | nodeSelector: {}
79 | affinity: {}
80 | tolerations: []
81 |
82 | flower:
83 | enabled: true
84 | urlPrefix: "/airflow/flower"
85 | resources: {}
86 | labels: {}
87 | annotations: {}
88 | service:
89 | annotations: {}
90 | type: ClusterIP
91 | externalPort: 5555
92 | nodeSelector: {}
93 | affinity: {}
94 | tolerations: []
95 | extraConfigmapMounts: []
96 |
97 | web:
98 | baseUrl: "http://localhost:8080"
99 | resources: {}
100 | labels: {}
101 | annotations: {}
102 | podAnnotations: {}
103 | initialStartupDelay: "30"
104 | initialDelaySeconds: "360"
105 | minReadySeconds: 120
106 | readinessProbe:
107 | periodSeconds: 30
108 | timeoutSeconds: 1
109 | successThreshold: 1
110 | failureThreshold: 5
111 | livenessProbe:
112 | periodSeconds: 60
113 | timeoutSeconds: 1
114 | successThreshold: 1
115 | failureThreshold: 5
116 | nodeSelector: {}
117 | affinity: {}
118 | tolerations: []
119 | secretsDir: /var/airflow/secrets
120 | secrets: []
121 |
122 | workers:
123 | enabled: true
124 | replicas: 2
125 | terminationPeriod: 30
126 | resources: {}
127 | labels: {}
128 | annotations: {}
129 | podAnnotations: {}
130 | celery:
131 | instances: 5
132 | gracefullTermination: true
133 | secretsDir: /var/airflow/secrets
134 | secrets: []
135 | nodeSelector: {}
136 | affinity: {}
137 | tolerations: []
138 |
139 | ingress:
140 | enabled: false
141 | web:
142 | path: "/airflow"
143 | host: ""
144 | annotations: {}
145 | livenessPath:
146 | tls:
147 | enabled: false
148 | precedingPaths:
149 | succeedingPaths:
150 |
151 | flower:
152 | path: ""
153 | livenessPath: /
154 | host: ""
155 | annotations: {}
156 | tls:
157 | enabled: false
158 |
159 | persistence:
160 | enabled: false
161 | accessMode: ReadWriteMany
162 | size: 2Gi
163 |
164 | logsPersistence:
165 | enabled: false
166 | accessMode: ReadWriteMany
167 | size: 2Gi
168 |
169 | dags:
170 | path: /usr/local/airflow/dags
171 | doNotPickle: false
172 | git:
173 | url: {{cookiecutter.git_repo_to_sync_dags}}
174 | ref: master
175 | secret: "generic git-sync-secret"
176 | repoHost: "github.com"
177 | privateKeyName: "id_rsa"
178 | gitSync:
179 | enabled: false
180 | image:
181 | repository: alpine/git
182 | tag: 1.0.7
183 | pullPolicy: IfNotPresent
184 | refreshTime: 60s
185 | initContainer:
186 | enabled: false
187 | image:
188 | repository: alpine/git
189 | tag: 1.0.7
190 | pullPolicy: IfNotPresent
191 | installRequirements: true
192 |
193 | logs:
194 | path: /usr/local/airflow/logs
195 |
196 | rbac:
197 | create: true
198 |
199 | serviceAccount:
200 | create: true
201 | name: airflow-service-account
202 | annotations: {}
203 |
204 |
205 | ## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md
206 | postgresql:
207 | enabled: true
208 | existingSecret: airflow-secrests
209 | existingSecretKey: postgresql
210 | service:
211 | port: 5432
212 | postgresqlUsername: postgres
213 | postgresqlPassword: airflow
214 | postgresqlDatabase: airflow
215 | persistence:
216 | enabled: true
217 | accessModes:
218 | - ReadWriteOnce
219 |
220 |
221 | ## ref: https://github.com/kubernetes/charts/blob/master/stable/redis/README.md
222 | redis:
223 | enabled: true
224 | existingSecret:
225 | existingSecretKey: redis-password
226 | password: airflow
227 | master:
228 | persistence:
229 | enabled: false
230 | accessModes:
231 | - ReadWriteOnce
232 | cluster:
233 | enabled: false
234 |
235 | # Enable this if you're using https://github.com/coreos/prometheus-operator
236 | # Don't forget you need to install something like https://github.com/epoch8/airflow-exporter in your airflow docker container
237 | serviceMonitor:
238 | enabled: false
239 | interval: "30s"
240 | path: /admin/metrics
241 | selector:
242 | prometheus: kube-prometheus
243 |
244 | prometheusRule:
245 | enabled: false
246 | groups: {}
247 | additionalLabels: {}
248 | extraManifests: []
249 |
--------------------------------------------------------------------------------
/scalable_airflow/config/helm/charts/airflow-kubernetes.yaml:
--------------------------------------------------------------------------------
1 | #`dags_volume_claim` or `dags_volume_host` or `dags_in_image` or `git_repo and git_branch and git_dags_folder_mount_point`
2 | airflow:
3 | extraConfigmapMounts: []
4 | extraEnv:
5 | fernetKey: "{{cookiecutter.fernet_key}}"
6 | service:
7 | annotations: {}
8 | sessionAffinity: "None"
9 | sessionAffinityConfig: {}
10 | type: ClusterIP
11 | externalPort: 8080
12 | nodePort:
13 | http:
14 | executor: Celery
15 | initRetryLoop: 3
16 | image:
17 | repository: {{cookiecutter.airflow_image_repository}}
18 | tag: latest
19 | pullPolicy: Always
20 | pullSecret:
21 | schedulerNumRuns: "-1"
22 | schedulerDoPickle: true
23 | webReplicas: 1
24 | config:
25 | AIRFLOW__CORE__EXECUTOR: KubernetesExecutor
26 | AIRFLOW__CORE__LOAD_EXAMPLES: true
27 | AIRFLOW__WEBSERVER__EXPOSE_CONFIG: True
28 | AIRFLOW__WEBSERVER__WORKERS: 10
29 | AIRFLOW__KUBERNETES__WORKER_CONTAINER_REPOSITORY: {{cookiecutter.airflow_image_repository}}
30 | AIRFLOW__KUBERNETES__WORKER_CONTAINER_TAG: latest
31 | AIRFLOW__KUBERNETES__WORKER_CONTAINER_IMAGE_PULL_POLICY: Always
32 | AIRFLOW__KUBERNETES__NAMESPACE: airflow
33 | AIRFLOW__KUBERNETES__DAGS_VOLUME_CLAIM: airflow-dags
34 | AIRFLOW__KUBERNETES__GIT_REPO: {{cookiecutter.git_repo_to_sync_dags}}
35 | AIRFLOW__KUBERNETES__GIT_BRANCH: master
36 | AIRFLOW__KUBERNETES__GIT_SUBPATH: dags
37 | AIRFLOW__KUBERNETES__GIT_SYNC_CREDENTIALS_SECRET: git-credentials
38 | AIRFLOW__KUBERNETES__GIT_SYNC_CONTAINER_REPOSITORY: k8s.gcr.io/git-sync
39 | AIRFLOW__KUBERNETES__GIT_SYNC_CONTAINER_TAG: v3.1.1
40 | AIRFLOW__KUBERNETES__GIT_SYNC_INIT_CONTAINER_NAME: git-sync-clone
41 | AIRFLOW__KUBERNETES__GIT_DAGS_FOLDER_MOUNT_POINT: /usr/local/airflow/dags
42 |
43 | podDisruptionBudgetEnabled: true
44 | podDisruptionBudget:
45 | maxUnavailable: 1
46 | connections: []
47 | variables: {}
48 | pools: {}
49 | podAnnotations: {}
50 | extraInitContainers: []
51 | extraContainers:
52 | - name: git-sync-clone
53 | image: k8s.gcr.io/git-sync:v3.1.2
54 | imagePullPolicy: IfNotPresent
55 | envFrom:
56 | - configMapRef:
57 | name: airflow-gitsync
58 | - secretRef:
59 | name: git-credentials
60 | volumeMounts:
61 | - mountPath: /git
62 | name: airflow-dags
63 | extraVolumeMounts:
64 | - name: airflow-dags
65 | mountPath: /usr/local/airflow/dags
66 | readOnly: true
67 | extraVolumes:
68 | - name: airflow-dags
69 | emptyDir: {}
70 | initdb: true
71 |
72 |
73 | scheduler:
74 | resources: {}
75 | labels: {}
76 | podAnnotations: {}
77 | annotations: {}
78 | nodeSelector: {}
79 | affinity: {}
80 | tolerations: []
81 |
82 | flower:
83 | enabled: false
84 | urlPrefix: "/airflow/flower"
85 | resources: {}
86 | labels: {}
87 | annotations: {}
88 | service:
89 | annotations: {}
90 | type: ClusterIP
91 | externalPort: 5555
92 | nodeSelector: {}
93 | affinity: {}
94 | tolerations: []
95 | extraConfigmapMounts: []
96 |
97 | web:
98 | baseUrl: "http://localhost:8080"
99 | resources: {}
100 | labels: {}
101 | annotations: {}
102 | podAnnotations: {}
103 | initialStartupDelay: "30"
104 | initialDelaySeconds: "360"
105 | minReadySeconds: 120
106 | readinessProbe:
107 | periodSeconds: 30
108 | timeoutSeconds: 1
109 | successThreshold: 1
110 | failureThreshold: 5
111 | livenessProbe:
112 | periodSeconds: 60
113 | timeoutSeconds: 1
114 | successThreshold: 1
115 | failureThreshold: 5
116 | nodeSelector: {}
117 | affinity: {}
118 | tolerations: []
119 | secretsDir: /var/airflow/secrets
120 | secrets: []
121 |
122 | workers:
123 | enabled: false
124 | replicas: 2
125 | terminationPeriod: 30
126 | resources: {}
127 | labels: {}
128 | annotations: {}
129 | podAnnotations: {}
130 | celery:
131 | instances: 5
132 | gracefullTermination: true
133 | secretsDir: /var/airflow/secrets
134 | secrets: []
135 | nodeSelector: {}
136 | affinity: {}
137 | tolerations: []
138 |
139 | ingress:
140 | enabled: false
141 | web:
142 | path: "/airflow"
143 | host: ""
144 | annotations: {}
145 | livenessPath:
146 | tls:
147 | enabled: false
148 | precedingPaths:
149 | succeedingPaths:
150 |
151 | flower:
152 | path: ""
153 | livenessPath: /
154 | host: ""
155 | annotations: {}
156 | tls:
157 | enabled: false
158 |
159 | persistence:
160 | enabled: false
161 | accessMode: ReadWriteMany
162 | size: 2Gi
163 |
164 | logsPersistence:
165 | enabled: false
166 | accessMode: ReadWriteMany
167 | size: 2Gi
168 |
169 | dags:
170 | path: /usr/local/airflow/dags
171 | doNotPickle: false
172 | git:
173 | url: {{cookiecutter.git_repo_to_sync_dags}}
174 | ref: master
175 | secret: "generic git-sync-secret"
176 | repoHost: "github.com"
177 | privateKeyName: "id_rsa"
178 | gitSync:
179 | enabled: false
180 | image:
181 | repository: alpine/git
182 | tag: 1.0.7
183 | pullPolicy: IfNotPresent
184 | refreshTime: 60s
185 | initContainer:
186 | enabled: false
187 | image:
188 | repository: alpine/git
189 | tag: 1.0.7
190 | pullPolicy: IfNotPresent
191 | installRequirements: true
192 |
193 | logs:
194 | path: /usr/local/airflow/logs
195 |
196 | rbac:
197 | create: true
198 |
199 | serviceAccount:
200 | create: true
201 | name: airflow-service-account
202 | annotations: {}
203 |
204 |
205 | ## ref: https://github.com/kubernetes/charts/blob/master/stable/postgresql/README.md
206 | postgresql:
207 | enabled: true
208 | existingSecret: airflow-secrests
209 | existingSecretKey: postgresql
210 | service:
211 | port: 5432
212 | postgresqlUsername: postgres
213 | postgresqlPassword: airflow
214 | postgresqlDatabase: airflow
215 | persistence:
216 | enabled: true
217 | accessModes:
218 | - ReadWriteOnce
219 |
220 |
221 | ## ref: https://github.com/kubernetes/charts/blob/master/stable/redis/README.md
222 | redis:
223 | enabled: false
224 | existingSecret:
225 | existingSecretKey: redis-password
226 | password: airflow
227 | master:
228 | persistence:
229 | enabled: false
230 | accessModes:
231 | - ReadWriteOnce
232 | cluster:
233 | enabled: false
234 |
235 | # Enable this if you're using https://github.com/coreos/prometheus-operator
236 | # Don't forget you need to install something like https://github.com/epoch8/airflow-exporter in your airflow docker container
237 | serviceMonitor:
238 | enabled: false
239 | interval: "30s"
240 | path: /admin/metrics
241 | selector:
242 | prometheus: kube-prometheus
243 |
244 | prometheusRule:
245 | enabled: false
246 | groups: {}
247 | additionalLabels: {}
248 | extraManifests: []
249 |
--------------------------------------------------------------------------------
/scalable_airflow/config/airflow/airflow.cfg:
--------------------------------------------------------------------------------
1 | [core]
2 | # The folder where your airflow pipelines live, most likely a
3 | # subfolder in a code repository. This path must be absolute.
4 | dags_folder = /usr/local/airflow/dags
5 |
6 | # The folder where airflow should store its log files
7 | # This path must be absolute
8 | base_log_folder = /usr/local/airflow/logs
9 |
10 | # Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search.
11 | # Set this to True if you want to enable remote logging.
12 | remote_logging = False
13 |
14 | # Users must supply an Airflow connection id that provides access to the storage
15 | # location.
16 | remote_log_conn_id =
17 | remote_base_log_folder =
18 | encrypt_s3_logs = False
19 |
20 | # Logging level
21 | logging_level = INFO
22 |
23 | # Logging level for Flask-appbuilder UI
24 | fab_logging_level = WARN
25 |
26 | # Logging class
27 | # Specify the class that will specify the logging configuration
28 | # This class has to be on the python classpath
29 | # Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG
30 | logging_config_class =
31 |
32 | # Flag to enable/disable Colored logs in Console
33 | # Colour the logs when the controlling terminal is a TTY.
34 | colored_console_log = True
35 |
36 | # Log format for when Colored logs is enabled
37 | colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {{%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d}} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s
38 | colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter
39 |
40 | # Format of Log line
41 | log_format = [%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s
42 | simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s
43 |
44 | # Log filename format
45 | log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log
46 | log_processor_filename_template = {{ filename }}.log
47 | dag_processor_manager_log_location = /usr/local/airflow/logs/dag_processor_manager/dag_processor_manager.log
48 |
49 | # Name of handler to read task instance logs.
50 | # Default to use task handler.
51 | task_log_reader = task
52 |
53 | # Hostname by providing a path to a callable, which will resolve the hostname.
54 | # The format is "package:function".
55 | #
56 | # For example, default value "socket:getfqdn" means that result from getfqdn() of "socket"
57 | # package will be used as hostname.
58 | #
59 | # No argument should be required in the function specified.
60 | # If using IP address as hostname is preferred, use value ``airflow.utils.net:get_host_ip_address``
61 | hostname_callable = socket:getfqdn
62 |
63 | # Default timezone in case supplied date times are naive
64 | # can be utc (default), system, or any IANA timezone string (e.g. Europe/Amsterdam)
65 | default_timezone = utc
66 |
67 | # The executor class that airflow should use. Choices include
68 | # SequentialExecutor, LocalExecutor, CeleryExecutor, DaskExecutor, KubernetesExecutor
69 | executor = SequentialExecutor
70 |
71 | # The SqlAlchemy connection string to the metadata database.
72 | # SqlAlchemy supports many different database engine, more information
73 | # their website
74 | # sql_alchemy_conn = sqlite:////tmp/airflow.db
75 |
76 | # The encoding for the databases
77 | sql_engine_encoding = utf-8
78 |
79 | # If SqlAlchemy should pool database connections.
80 | sql_alchemy_pool_enabled = True
81 |
82 | # The SqlAlchemy pool size is the maximum number of database connections
83 | # in the pool. 0 indicates no limit.
84 | sql_alchemy_pool_size = 5
85 |
86 | # The maximum overflow size of the pool.
87 | # When the number of checked-out connections reaches the size set in pool_size,
88 | # additional connections will be returned up to this limit.
89 | # When those additional connections are returned to the pool, they are disconnected and discarded.
90 | # It follows then that the total number of simultaneous connections the pool will allow
91 | # is pool_size + max_overflow,
92 | # and the total number of "sleeping" connections the pool will allow is pool_size.
93 | # max_overflow can be set to -1 to indicate no overflow limit;
94 | # no limit will be placed on the total number of concurrent connections. Defaults to 10.
95 | sql_alchemy_max_overflow = 10
96 |
97 | # The SqlAlchemy pool recycle is the number of seconds a connection
98 | # can be idle in the pool before it is invalidated. This config does
99 | # not apply to sqlite. If the number of DB connections is ever exceeded,
100 | # a lower config value will allow the system to recover faster.
101 | sql_alchemy_pool_recycle = 1800
102 |
103 | # Check connection at the start of each connection pool checkout.
104 | # Typically, this is a simple statement like "SELECT 1".
105 | # More information here:
106 | # https://docs.sqlalchemy.org/en/13/core/pooling.html#disconnect-handling-pessimistic
107 | sql_alchemy_pool_pre_ping = True
108 |
109 | # The schema to use for the metadata database.
110 | # SqlAlchemy supports databases with the concept of multiple schemas.
111 | sql_alchemy_schema =
112 |
113 | # The amount of parallelism as a setting to the executor. This defines
114 | # the max number of task instances that should run simultaneously
115 | # on this airflow installation
116 | parallelism = 32
117 |
118 | # The number of task instances allowed to run concurrently by the scheduler
119 | dag_concurrency = 16
120 |
121 | # Are DAGs paused by default at creation
122 | dags_are_paused_at_creation = True
123 |
124 | # The maximum number of active DAG runs per DAG
125 | max_active_runs_per_dag = 16
126 |
127 | # Whether to load the examples that ship with Airflow. It's good to
128 | # get started, but you probably want to set this to False in a production
129 | # environment
130 | load_examples = True
131 |
132 | # Where your Airflow plugins are stored
133 | plugins_folder = /usr/local/airflow/plugins
134 |
135 | # Secret key to save connection passwords in the db
136 | fernet_key = $FERNET_KEY
137 |
138 | # Whether to disable pickling dags
139 | donot_pickle = False
140 |
141 | # How long before timing out a python file import
142 | dagbag_import_timeout = 30
143 |
144 | # How long before timing out a DagFileProcessor, which processes a dag file
145 | dag_file_processor_timeout = 50
146 |
147 | # The class to use for running task instances in a subprocess
148 | task_runner = StandardTaskRunner
149 |
150 | # If set, tasks without a ``run_as_user`` argument will be run with this user
151 | # Can be used to de-elevate a sudo user running Airflow when executing tasks
152 | default_impersonation =
153 |
154 | # What security module to use (for example kerberos)
155 | security =
156 |
157 | # If set to False enables some unsecure features like Charts and Ad Hoc Queries.
158 | # In 2.0 will default to True.
159 | secure_mode = False
160 |
161 | # Turn unit test mode on (overwrites many configuration options with test
162 | # values at runtime)
163 | unit_test_mode = False
164 |
165 | # Whether to enable pickling for xcom (note that this is insecure and allows for
166 | # RCE exploits). This will be deprecated in Airflow 2.0 (be forced to False).
167 | enable_xcom_pickling = True
168 |
169 | # When a task is killed forcefully, this is the amount of time in seconds that
170 | # it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED
171 | killed_task_cleanup_time = 60
172 |
173 | # Whether to override params with dag_run.conf. If you pass some key-value pairs
174 | # through ``airflow dags backfill -c`` or
175 | # ``airflow dags trigger -c``, the key-value pairs will override the existing ones in params.
176 | dag_run_conf_overrides_params = False
177 |
178 | # Worker initialisation check to validate Metadata Database connection
179 | worker_precheck = False
180 |
181 | # When discovering DAGs, ignore any files that don't contain the strings ``DAG`` and ``airflow``.
182 | dag_discovery_safe_mode = True
183 |
184 | # The number of retries each task is going to have by default. Can be overridden at dag or task level.
185 | default_task_retries = 0
186 |
187 | # Whether to serialises DAGs and persist them in DB.
188 | # If set to True, Webserver reads from DB instead of parsing DAG files
189 | # More details: https://airflow.apache.org/docs/stable/dag-serialization.html
190 | store_serialized_dags = False
191 |
192 | # Updating serialized DAG can not be faster than a minimum interval to reduce database write rate.
193 | min_serialized_dag_update_interval = 30
194 |
195 | # On each dagrun check against defined SLAs
196 | check_slas = True
197 |
198 | [cli]
199 | # In what way should the cli access the API. The LocalClient will use the
200 | # database directly, while the json_client will use the api running on the
201 | # webserver
202 | api_client = airflow.api.client.local_client
203 |
204 | # If you set web_server_url_prefix, do NOT forget to append it here, ex:
205 | # ``endpoint_url = http://localhost:8080/myroot``
206 | # So api will look like: ``http://localhost:8080/myroot/api/experimental/...``
207 | endpoint_url = http://localhost:8080
208 |
209 | [debug]
210 | # Used only with DebugExecutor. If set to True DAG will fail with first
211 | # failed task. Helpful for debugging purposes.
212 | fail_fast = False
213 |
214 | [api]
215 | # How to authenticate users of the API
216 | auth_backend = airflow.api.auth.backend.default
217 |
218 | [lineage]
219 | # what lineage backend to use
220 | backend =
221 |
222 | [atlas]
223 | sasl_enabled = False
224 | host =
225 | port = 21000
226 | username =
227 | password =
228 |
229 | [operators]
230 | # The default owner assigned to each new operator, unless
231 | # provided explicitly or passed via ``default_args``
232 | default_owner = airflow
233 | default_cpus = 1
234 | default_ram = 512
235 | default_disk = 512
236 | default_gpus = 0
237 |
238 | [hive]
239 | # Default mapreduce queue for HiveOperator tasks
240 | default_hive_mapred_queue =
241 |
242 | [webserver]
243 | # The base url of your website as airflow cannot guess what domain or
244 | # cname you are using. This is used in automated emails that
245 | # airflow sends to point links to the right web server
246 | base_url = http://localhost:8080
247 |
248 | # The ip specified when starting the web server
249 | web_server_host = 0.0.0.0
250 |
251 | # The port on which to run the web server
252 | web_server_port = 8080
253 |
254 | # Paths to the SSL certificate and key for the web server. When both are
255 | # provided SSL will be enabled. This does not change the web server port.
256 | web_server_ssl_cert =
257 |
258 | # Paths to the SSL certificate and key for the web server. When both are
259 | # provided SSL will be enabled. This does not change the web server port.
260 | web_server_ssl_key =
261 |
262 | # Number of seconds the webserver waits before killing gunicorn master that doesn't respond
263 | web_server_master_timeout = 120
264 |
265 | # Number of seconds the gunicorn webserver waits before timing out on a worker
266 | web_server_worker_timeout = 120
267 |
268 | # Number of workers to refresh at a time. When set to 0, worker refresh is
269 | # disabled. When nonzero, airflow periodically refreshes webserver workers by
270 | # bringing up new ones and killing old ones.
271 | worker_refresh_batch_size = 1
272 |
273 | # Number of seconds to wait before refreshing a batch of workers.
274 | worker_refresh_interval = 30
275 |
276 | # Secret key used to run your flask app
277 | # It should be as random as possible
278 | secret_key = temporary_key
279 |
280 | # Number of workers to run the Gunicorn web server
281 | workers = 4
282 |
283 | # The worker class gunicorn should use. Choices include
284 | # sync (default), eventlet, gevent
285 | worker_class = sync
286 |
287 | # Log files for the gunicorn webserver. '-' means log to stderr.
288 | access_logfile = -
289 |
290 | # Log files for the gunicorn webserver. '-' means log to stderr.
291 | error_logfile = -
292 |
293 | # Expose the configuration file in the web server
294 | expose_config = True
295 |
296 | # Expose hostname in the web server
297 | expose_hostname = True
298 |
299 | # Expose stacktrace in the web server
300 | expose_stacktrace = True
301 |
302 | # Set to true to turn on authentication:
303 | # https://airflow.apache.org/security.html#web-authentication
304 | authenticate = False
305 |
306 | # Filter the list of dags by owner name (requires authentication to be enabled)
307 | filter_by_owner = False
308 |
309 | # Filtering mode. Choices include user (default) and ldapgroup.
310 | # Ldap group filtering requires using the ldap backend
311 | #
312 | # Note that the ldap server needs the "memberOf" overlay to be set up
313 | # in order to user the ldapgroup mode.
314 | owner_mode = user
315 |
316 | # Default DAG view. Valid values are:
317 | # tree, graph, duration, gantt, landing_times
318 | dag_default_view = tree
319 |
320 | # "Default DAG orientation. Valid values are:"
321 | # LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top)
322 | dag_orientation = LR
323 |
324 | # Puts the webserver in demonstration mode; blurs the names of Operators for
325 | # privacy.
326 | demo_mode = False
327 |
328 | # The amount of time (in secs) webserver will wait for initial handshake
329 | # while fetching logs from other worker machine
330 | log_fetch_timeout_sec = 5
331 |
332 | # Time interval (in secs) to wait before next log fetching.
333 | log_fetch_delay_sec = 2
334 |
335 | # Distance away from page bottom to enable auto tailing.
336 | log_auto_tailing_offset = 30
337 |
338 | # Animation speed for auto tailing log display.
339 | log_animation_speed = 1000
340 |
341 | # By default, the webserver shows paused DAGs. Flip this to hide paused
342 | # DAGs by default
343 | hide_paused_dags_by_default = False
344 |
345 | # Consistent page size across all listing views in the UI
346 | page_size = 100
347 |
348 | # Use FAB-based webserver with RBAC feature
349 | rbac = False
350 |
351 | # Define the color of navigation bar
352 | navbar_color = #007A87
353 |
354 | # Default dagrun to show in UI
355 | default_dag_run_display_number = 25
356 |
357 | # Enable werkzeug ``ProxyFix`` middleware for reverse proxy
358 | enable_proxy_fix = False
359 |
360 | # Number of values to trust for ``X-Forwarded-For``.
361 | # More info: https://werkzeug.palletsprojects.com/en/0.16.x/middleware/proxy_fix/
362 | proxy_fix_x_for = 1
363 |
364 | # Number of values to trust for ``X-Forwarded-Proto``
365 | proxy_fix_x_proto = 1
366 |
367 | # Number of values to trust for ``X-Forwarded-Host``
368 | proxy_fix_x_host = 1
369 |
370 | # Number of values to trust for ``X-Forwarded-Port``
371 | proxy_fix_x_port = 1
372 |
373 | # Number of values to trust for ``X-Forwarded-Prefix``
374 | proxy_fix_x_prefix = 1
375 |
376 | # Set secure flag on session cookie
377 | cookie_secure = False
378 |
379 | # Set samesite policy on session cookie
380 | cookie_samesite =
381 |
382 | # Default setting for wrap toggle on DAG code and TI log views.
383 | default_wrap = False
384 |
385 | # Allow the UI to be rendered in a frame
386 | x_frame_enabled = True
387 |
388 | # Send anonymous user activity to your analytics tool
389 | # choose from google_analytics, segment, or metarouter
390 | # analytics_tool =
391 |
392 | # Unique ID of your account in the analytics tool
393 | # analytics_id =
394 |
395 | # Update FAB permissions and sync security manager roles
396 | # on webserver startup
397 | update_fab_perms = True
398 |
399 | # Minutes of non-activity before logged out from UI
400 | # 0 means never get forcibly logged out
401 | force_log_out_after = 0
402 |
403 | # The UI cookie lifetime in days
404 | session_lifetime_days = 30
405 |
406 | [email]
407 | email_backend = airflow.utils.email.send_email_smtp
408 |
409 | [smtp]
410 |
411 | # If you want airflow to send emails on retries, failure, and you want to use
412 | # the airflow.utils.email.send_email_smtp function, you have to configure an
413 | # smtp server here
414 | smtp_host = localhost
415 | smtp_starttls = True
416 | smtp_ssl = False
417 | # Example: smtp_user = airflow
418 | # smtp_user =
419 | # Example: smtp_password = airflow
420 | # smtp_password =
421 | smtp_port = 25
422 | smtp_mail_from = airflow@example.com
423 |
424 | [sentry]
425 |
426 | # Sentry (https://docs.sentry.io) integration
427 | sentry_dsn =
428 |
429 | [celery]
430 |
431 | # This section only applies if you are using the CeleryExecutor in
432 | # ``[core]`` section above
433 | # The app name that will be used by celery
434 | celery_app_name = airflow.executors.celery_executor
435 |
436 | # The concurrency that will be used when starting workers with the
437 | # ``airflow celery worker`` command. This defines the number of task instances that
438 | # a worker will take, so size up your workers based on the resources on
439 | # your worker box and the nature of your tasks
440 | worker_concurrency = 16
441 |
442 | # The maximum and minimum concurrency that will be used when starting workers with the
443 | # ``airflow celery worker`` command (always keep minimum processes, but grow
444 | # to maximum if necessary). Note the value should be max_concurrency,min_concurrency
445 | # Pick these numbers based on resources on worker box and the nature of the task.
446 | # If autoscale option is available, worker_concurrency will be ignored.
447 | # http://docs.celeryproject.org/en/latest/reference/celery.bin.worker.html#cmdoption-celery-worker-autoscale
448 | # Example: worker_autoscale = 16,12
449 | worker_autoscale = 16,12
450 |
451 | # When you start an airflow worker, airflow starts a tiny web server
452 | # subprocess to serve the workers local log files to the airflow main
453 | # web server, who then builds pages and sends them to users. This defines
454 | # the port on which the logs are served. It needs to be unused, and open
455 | # visible from the main web server to connect into the workers.
456 | worker_log_server_port = 8793
457 |
458 | # The Celery broker URL. Celery supports RabbitMQ, Redis and experimentally
459 | # a sqlalchemy database. Refer to the Celery documentation for more
460 | # information.
461 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#broker-settings
462 | broker_url = redis://redis:6379/1
463 |
464 | # The Celery result_backend. When a job finishes, it needs to update the
465 | # metadata of the job. Therefore it will post a message on a message bus,
466 | # or insert it into a database (depending of the backend)
467 | # This status is used by the scheduler to update the state of the task
468 | # The use of a database is highly recommended
469 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-result-backend-settings
470 | result_backend = db+postgresql://airflow:airflow@postgres/airflow
471 |
472 | # Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start
473 | # it ``airflow flower``. This defines the IP that Celery Flower runs on
474 | flower_host = 0.0.0.0
475 |
476 | # The root URL for Flower
477 | # Example: flower_url_prefix = /flower
478 | flower_url_prefix =
479 |
480 | # This defines the port that Celery Flower runs on
481 | flower_port = 5555
482 |
483 | # Securing Flower with Basic Authentication
484 | # Accepts user:password pairs separated by a comma
485 | # Example: flower_basic_auth = user1:password1,user2:password2
486 | flower_basic_auth =
487 |
488 | # Default queue that tasks get assigned to and that worker listen on.
489 | default_queue = default
490 |
491 | # How many processes CeleryExecutor uses to sync task state.
492 | # 0 means to use max(1, number of cores - 1) processes.
493 | sync_parallelism = 0
494 |
495 | # Import path for celery configuration options
496 | celery_config_options = airflow.config_templates.default_celery.DEFAULT_CELERY_CONFIG
497 |
498 | # In case of using SSL
499 | ssl_active = False
500 | ssl_key =
501 | ssl_cert =
502 | ssl_cacert =
503 |
504 | # Celery Pool implementation.
505 | # Choices include: prefork (default), eventlet, gevent or solo.
506 | # See:
507 | # https://docs.celeryproject.org/en/latest/userguide/workers.html#concurrency
508 | # https://docs.celeryproject.org/en/latest/userguide/concurrency/eventlet.html
509 | pool = prefork
510 |
511 | # The number of seconds to wait before timing out ``send_task_to_executor`` or
512 | # ``fetch_celery_task_state`` operations.
513 | operation_timeout = 2
514 |
515 | [celery_broker_transport_options]
516 |
517 | # This section is for specifying options which can be passed to the
518 | # underlying celery broker transport. See:
519 | # http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_transport_options
520 | # The visibility timeout defines the number of seconds to wait for the worker
521 | # to acknowledge the task before the message is redelivered to another worker.
522 | # Make sure to increase the visibility timeout to match the time of the longest
523 | # ETA you're planning to use.
524 | # visibility_timeout is only supported for Redis and SQS celery brokers.
525 | # See:
526 | # http://docs.celeryproject.org/en/master/userguide/configuration.html#std:setting-broker_transport_options
527 | # Example: visibility_timeout = 21600
528 | # visibility_timeout =
529 |
530 | [dask]
531 |
532 | # This section only applies if you are using the DaskExecutor in
533 | # [core] section above
534 | # The IP address and port of the Dask cluster's scheduler.
535 | cluster_address = 127.0.0.1:8786
536 |
537 | # TLS/ SSL settings to access a secured Dask scheduler.
538 | tls_ca =
539 | tls_cert =
540 | tls_key =
541 |
542 | [scheduler]
543 | # Task instances listen for external kill signal (when you clear tasks
544 | # from the CLI or the UI), this defines the frequency at which they should
545 | # listen (in seconds).
546 | job_heartbeat_sec = 5
547 |
548 | # The scheduler constantly tries to trigger new tasks (look at the
549 | # scheduler section in the docs for more information). This defines
550 | # how often the scheduler should run (in seconds).
551 | scheduler_heartbeat_sec = 5
552 |
553 | # After how much time should the scheduler terminate in seconds
554 | # -1 indicates to run continuously (see also num_runs)
555 | run_duration = -1
556 |
557 | # The number of times to try to schedule each DAG file
558 | # -1 indicates unlimited number
559 | num_runs = -1
560 |
561 | # The number of seconds to wait between consecutive DAG file processing
562 | processor_poll_interval = 1
563 |
564 | # after how much time (seconds) a new DAGs should be picked up from the filesystem
565 | min_file_process_interval = 0
566 |
567 | # How often (in seconds) to scan the DAGs directory for new files. Default to 5 minutes.
568 | dag_dir_list_interval = 300
569 |
570 | # How often should stats be printed to the logs. Setting to 0 will disable printing stats
571 | print_stats_interval = 30
572 |
573 | # If the last scheduler heartbeat happened more than scheduler_health_check_threshold
574 | # ago (in seconds), scheduler is considered unhealthy.
575 | # This is used by the health check in the "/health" endpoint
576 | scheduler_health_check_threshold = 30
577 | child_process_log_directory = /usr/local/airflow/logs/scheduler
578 |
579 | # Local task jobs periodically heartbeat to the DB. If the job has
580 | # not heartbeat in this many seconds, the scheduler will mark the
581 | # associated task instance as failed and will re-schedule the task.
582 | scheduler_zombie_task_threshold = 300
583 |
584 | # Turn off scheduler catchup by setting this to False.
585 | # Default behavior is unchanged and
586 | # Command Line Backfills still work, but the scheduler
587 | # will not do scheduler catchup if this is False,
588 | # however it can be set on a per DAG basis in the
589 | # DAG definition (catchup)
590 | catchup_by_default = True
591 |
592 | # This changes the batch size of queries in the scheduling main loop.
593 | # If this is too high, SQL query performance may be impacted by one
594 | # or more of the following:
595 | # - reversion to full table scan
596 | # - complexity of query predicate
597 | # - excessive locking
598 | # Additionally, you may hit the maximum allowable query length for your db.
599 | # Set this to 0 for no limit (not advised)
600 | max_tis_per_query = 512
601 |
602 | # Statsd (https://github.com/etsy/statsd) integration settings
603 | statsd_on = False
604 | statsd_host = localhost
605 | statsd_port = 8125
606 | statsd_prefix = airflow
607 |
608 | # If you want to avoid send all the available metrics to StatsD,
609 | # you can configure an allow list of prefixes to send only the metrics that
610 | # start with the elements of the list (e.g: scheduler,executor,dagrun)
611 | statsd_allow_list =
612 |
613 | # The scheduler can run multiple threads in parallel to schedule dags.
614 | # This defines how many threads will run.
615 | max_threads = 2
616 | authenticate = False
617 |
618 | # Turn off scheduler use of cron intervals by setting this to False.
619 | # DAGs submitted manually in the web UI or with trigger_dag will still run.
620 | use_job_schedule = True
621 |
622 | # Allow externally triggered DagRuns for Execution Dates in the future
623 | # Only has effect if schedule_interval is set to None in DAG
624 | allow_trigger_in_future = False
625 |
626 | [ldap]
627 | # set this to ldaps://:
628 | uri =
629 | user_filter = objectClass=*
630 | user_name_attr = uid
631 | group_member_attr = memberOf
632 | superuser_filter =
633 | data_profiler_filter =
634 | bind_user = cn=Manager,dc=example,dc=com
635 | bind_password = insecure
636 | basedn = dc=example,dc=com
637 | cacert = /etc/ca/ldap_ca.crt
638 | search_scope = LEVEL
639 |
640 | # This setting allows the use of LDAP servers that either return a
641 | # broken schema, or do not return a schema.
642 | ignore_malformed_schema = False
643 |
644 | [mesos]
645 | # Mesos master address which MesosExecutor will connect to.
646 | master = localhost:5050
647 |
648 | # The framework name which Airflow scheduler will register itself as on mesos
649 | framework_name = Airflow
650 |
651 | # Number of cpu cores required for running one task instance using
652 | # 'airflow run --local -p '
653 | # command on a mesos slave
654 | task_cpu = 1
655 |
656 | # Memory in MB required for running one task instance using
657 | # 'airflow run --local -p '
658 | # command on a mesos slave
659 | task_memory = 256
660 |
661 | # Enable framework checkpointing for mesos
662 | # See http://mesos.apache.org/documentation/latest/slave-recovery/
663 | checkpoint = False
664 |
665 | # Failover timeout in milliseconds.
666 | # When checkpointing is enabled and this option is set, Mesos waits
667 | # until the configured timeout for
668 | # the MesosExecutor framework to re-register after a failover. Mesos
669 | # shuts down running tasks if the
670 | # MesosExecutor framework fails to re-register within this timeframe.
671 | # Example: failover_timeout = 604800
672 | # failover_timeout =
673 |
674 | # Enable framework authentication for mesos
675 | # See http://mesos.apache.org/documentation/latest/configuration/
676 | authenticate = False
677 |
678 | # Mesos credentials, if authentication is enabled
679 | # Example: default_principal = admin
680 | # default_principal =
681 | # Example: default_secret = admin
682 | # default_secret =
683 |
684 | # Optional Docker Image to run on slave before running the command
685 | # This image should be accessible from mesos slave i.e mesos slave
686 | # should be able to pull this docker image before executing the command.
687 | # Example: docker_image_slave = puckel/docker-airflow
688 | # docker_image_slave =
689 |
690 | [kerberos]
691 | ccache = /tmp/airflow_krb5_ccache
692 |
693 | # gets augmented with fqdn
694 | principal = airflow
695 | reinit_frequency = 3600
696 | kinit_path = kinit
697 | keytab = airflow.keytab
698 |
699 | [github_enterprise]
700 | api_rev = v3
701 |
702 | [admin]
703 | # UI to hide sensitive variable fields when set to True
704 | hide_sensitive_variable_fields = True
705 |
706 | [elasticsearch]
707 | # Elasticsearch host
708 | host =
709 |
710 | # Format of the log_id, which is used to query for a given tasks logs
711 | log_id_template = {{dag_id}}-{{task_id}}-{{execution_date}}-{{try_number}}
712 |
713 | # Used to mark the end of a log stream for a task
714 | end_of_log_mark = end_of_log
715 |
716 | # Qualified URL for an elasticsearch frontend (like Kibana) with a template argument for log_id
717 | # Code will construct log_id using the log_id template from the argument above.
718 | # NOTE: The code will prefix the https:// automatically, don't include that here.
719 | frontend =
720 |
721 | # Write the task logs to the stdout of the worker, rather than the default files
722 | write_stdout = False
723 |
724 | # Instead of the default log formatter, write the log lines as JSON
725 | json_format = False
726 |
727 | # Log fields to also attach to the json output, if enabled
728 | json_fields = asctime, filename, lineno, levelname, message
729 |
730 | [elasticsearch_configs]
731 | use_ssl = False
732 | verify_certs = True
733 |
734 | [kubernetes]
735 | # The repository, tag and imagePullPolicy of the Kubernetes Image for the Worker to Run
736 | worker_container_repository =
737 | worker_container_tag =
738 | worker_container_image_pull_policy = IfNotPresent
739 |
740 | # If True (default), worker pods will be deleted upon termination
741 | delete_worker_pods = True
742 |
743 | # Number of Kubernetes Worker Pod creation calls per scheduler loop
744 | worker_pods_creation_batch_size = 1
745 |
746 | # The Kubernetes namespace where airflow workers should be created. Defaults to ``default``
747 | namespace = default
748 |
749 | # The name of the Kubernetes ConfigMap containing the Airflow Configuration (this file)
750 | # Example: airflow_configmap = airflow-configmap
751 | airflow_configmap =
752 |
753 | # The name of the Kubernetes ConfigMap containing ``airflow_local_settings.py`` file.
754 | #
755 | # For example:
756 | #
757 | # ``airflow_local_settings_configmap = "airflow-configmap"`` if you have the following ConfigMap.
758 | #
759 | # ``airflow-configmap.yaml``:
760 | #
761 | # .. code-block:: yaml
762 | #
763 | # ---
764 | # apiVersion: v1
765 | # kind: ConfigMap
766 | # metadata:
767 | # name: airflow-configmap
768 | # data:
769 | # airflow_local_settings.py: |
770 | # def pod_mutation_hook(pod):
771 | # ...
772 | # airflow.cfg: |
773 | # ...
774 | # Example: airflow_local_settings_configmap = airflow-configmap
775 | airflow_local_settings_configmap =
776 |
777 | # For docker image already contains DAGs, this is set to ``True``, and the worker will
778 | # search for dags in dags_folder,
779 | # otherwise use git sync or dags volume claim to mount DAGs
780 | dags_in_image = False
781 |
782 | # For either git sync or volume mounted DAGs, the worker will look in this subpath for DAGs
783 | dags_volume_subpath =
784 |
785 | # For DAGs mounted via a volume claim (mutually exclusive with git-sync and host path)
786 | dags_volume_claim =
787 |
788 | # For volume mounted logs, the worker will look in this subpath for logs
789 | logs_volume_subpath =
790 |
791 | # A shared volume claim for the logs
792 | logs_volume_claim =
793 |
794 | # For DAGs mounted via a hostPath volume (mutually exclusive with volume claim and git-sync)
795 | # Useful in local environment, discouraged in production
796 | dags_volume_host =
797 |
798 | # A hostPath volume for the logs
799 | # Useful in local environment, discouraged in production
800 | logs_volume_host =
801 |
802 | # A list of configMapsRefs to envFrom. If more than one configMap is
803 | # specified, provide a comma separated list: configmap_a,configmap_b
804 | env_from_configmap_ref =
805 |
806 | # A list of secretRefs to envFrom. If more than one secret is
807 | # specified, provide a comma separated list: secret_a,secret_b
808 | env_from_secret_ref =
809 |
810 | # Git credentials and repository for DAGs mounted via Git (mutually exclusive with volume claim)
811 | git_repo =
812 | git_branch =
813 | git_subpath =
814 |
815 | # The specific rev or hash the git_sync init container will checkout
816 | # This becomes GIT_SYNC_REV environment variable in the git_sync init container for worker pods
817 | git_sync_rev =
818 |
819 | # Use git_user and git_password for user authentication or git_ssh_key_secret_name
820 | # and git_ssh_key_secret_key for SSH authentication
821 | git_user =
822 | git_password =
823 | git_sync_root = /git
824 | git_sync_dest = repo
825 |
826 | # Mount point of the volume if git-sync is being used.
827 | # i.e. /usr/local/airflow/dags
828 | git_dags_folder_mount_point =
829 |
830 | # To get Git-sync SSH authentication set up follow this format
831 | #
832 | # ``airflow-secrets.yaml``:
833 | #
834 | # .. code-block:: yaml
835 | #
836 | # ---
837 | # apiVersion: v1
838 | # kind: Secret
839 | # metadata:
840 | # name: airflow-secrets
841 | # data:
842 | # # key needs to be gitSshKey
843 | # gitSshKey:
844 | # Example: git_ssh_key_secret_name = airflow-secrets
845 | git_ssh_key_secret_name =
846 |
847 | # To get Git-sync SSH authentication set up follow this format
848 | #
849 | # ``airflow-configmap.yaml``:
850 | #
851 | # .. code-block:: yaml
852 | #
853 | # ---
854 | # apiVersion: v1
855 | # kind: ConfigMap
856 | # metadata:
857 | # name: airflow-configmap
858 | # data:
859 | # known_hosts: |
860 | # github.com ssh-rsa <...>
861 | # airflow.cfg: |
862 | # ...
863 | # Example: git_ssh_known_hosts_configmap_name = airflow-configmap
864 | git_ssh_known_hosts_configmap_name =
865 |
866 | # To give the git_sync init container credentials via a secret, create a secret
867 | # with two fields: GIT_SYNC_USERNAME and GIT_SYNC_PASSWORD (example below) and
868 | # add ``git_sync_credentials_secret = `` to your airflow config under the
869 | # ``kubernetes`` section
870 | #
871 | # Secret Example:
872 | #
873 | # .. code-block:: yaml
874 | #
875 | # ---
876 | # apiVersion: v1
877 | # kind: Secret
878 | # metadata:
879 | # name: git-credentials
880 | # data:
881 | # GIT_SYNC_USERNAME:
882 | # GIT_SYNC_PASSWORD:
883 | git_sync_credentials_secret =
884 |
885 | # For cloning DAGs from git repositories into volumes: https://github.com/kubernetes/git-sync
886 | git_sync_container_repository = k8s.gcr.io/git-sync
887 | git_sync_container_tag = v3.1.1
888 | git_sync_init_container_name = git-sync-clone
889 | git_sync_run_as_user = 65533
890 |
891 | # The name of the Kubernetes service account to be associated with airflow workers, if any.
892 | # Service accounts are required for workers that require access to secrets or cluster resources.
893 | # See the Kubernetes RBAC documentation for more:
894 | # https://kubernetes.io/docs/admin/authorization/rbac/
895 | worker_service_account_name =
896 |
897 | # Any image pull secrets to be given to worker pods, If more than one secret is
898 | # required, provide a comma separated list: secret_a,secret_b
899 | image_pull_secrets =
900 |
901 | # GCP Service Account Keys to be provided to tasks run on Kubernetes Executors
902 | # Should be supplied in the format: key-name-1:key-path-1,key-name-2:key-path-2
903 | gcp_service_account_keys =
904 |
905 | # Use the service account kubernetes gives to pods to connect to kubernetes cluster.
906 | # It's intended for clients that expect to be running inside a pod running on kubernetes.
907 | # It will raise an exception if called from a process not running in a kubernetes environment.
908 | in_cluster = True
909 |
910 | # When running with in_cluster=False change the default cluster_context or config_file
911 | # options to Kubernetes client. Leave blank these to use default behaviour like ``kubectl`` has.
912 | # cluster_context =
913 | # config_file =
914 |
915 | # Affinity configuration as a single line formatted JSON object.
916 | # See the affinity model for top-level key names (e.g. ``nodeAffinity``, etc.):
917 | # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#affinity-v1-core
918 | affinity =
919 |
920 | # A list of toleration objects as a single line formatted JSON array
921 | # See:
922 | # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#toleration-v1-core
923 | tolerations =
924 |
925 | # Keyword parameters to pass while calling a kubernetes client core_v1_api methods
926 | # from Kubernetes Executor provided as a single line formatted JSON dictionary string.
927 | # List of supported params are similar for all core_v1_apis, hence a single config
928 | # variable for all apis.
929 | # See:
930 | # https://raw.githubusercontent.com/kubernetes-client/python/master/kubernetes/client/apis/core_v1_api.py
931 | # Note that if no _request_timeout is specified, the kubernetes client will wait indefinitely
932 | # for kubernetes api responses, which will cause the scheduler to hang.
933 | # The timeout is specified as [connect timeout, read timeout]
934 | kube_client_request_args = {{"_request_timeout" : [60,60] }}
935 |
936 | # Specifies the uid to run the first process of the worker pods containers as
937 | run_as_user =
938 |
939 | # Specifies a gid to associate with all containers in the worker pods
940 | # if using a git_ssh_key_secret_name use an fs_group
941 | # that allows for the key to be read, e.g. 65533
942 | fs_group =
943 |
944 | [kubernetes_node_selectors]
945 |
946 | # The Key-value pairs to be given to worker pods.
947 | # The worker pods will be scheduled to the nodes of the specified key-value pairs.
948 | # Should be supplied in the format: key = value
949 |
950 | [kubernetes_annotations]
951 |
952 | # The Key-value annotations pairs to be given to worker pods.
953 | # Should be supplied in the format: key = value
954 |
955 | [kubernetes_environment_variables]
956 |
957 | # The scheduler sets the following environment variables into your workers. You may define as
958 | # many environment variables as needed and the kubernetes launcher will set them in the launched workers.
959 | # Environment variables in this section are defined as follows
960 | # `` = ``
961 | #
962 | # For example if you wanted to set an environment variable with value `prod` and key
963 | # ``ENVIRONMENT`` you would follow the following format:
964 | # ENVIRONMENT = prod
965 | #
966 | # Additionally you may override worker airflow settings with the ``AIRFLOW____``
967 | # formatting as supported by airflow normally.
968 |
969 | [kubernetes_secrets]
970 |
971 | # The scheduler mounts the following secrets into your workers as they are launched by the
972 | # scheduler. You may define as many secrets as needed and the kubernetes launcher will parse the
973 | # defined secrets and mount them as secret environment variables in the launched workers.
974 | # Secrets in this section are defined as follows
975 | # `` = =``
976 | #
977 | # For example if you wanted to mount a kubernetes secret key named ``postgres_password`` from the
978 | # kubernetes secret object ``airflow-secret`` as the environment variable ``POSTGRES_PASSWORD`` into
979 | # your workers you would follow the following format:
980 | # ``POSTGRES_PASSWORD = airflow-secret=postgres_credentials``
981 | #
982 | # Additionally you may override worker airflow settings with the ``AIRFLOW____``
983 | # formatting as supported by airflow normally.
984 |
985 | [kubernetes_labels]
986 |
987 | # The Key-value pairs to be given to worker pods.
988 | # The worker pods will be given these static labels, as well as some additional dynamic labels
989 | # to identify the task.
990 | # Should be supplied in the format: ``key = value``
--------------------------------------------------------------------------------