├── .jujuignore
├── requirements.txt
├── tests
├── unit
│ ├── test_config
│ │ ├── test_templates.tmpl
│ │ ├── alertmanager_default.yml
│ │ ├── alertmanager_invalid.yml
│ │ ├── alertmanager.yml
│ │ └── alertmanager_with_templates.yml
│ ├── test_config_dir_watcher.py
│ ├── test_charm_non_leader_unit.py
│ └── test_charm_leader_unit.py
└── integration
│ ├── alertmanager.py
│ └── test_integration.py
├── .gitignore
├── renovate.json
├── src
├── alertmanager.yml
├── config_dir_watcher.py
└── charm.py
├── charmcraft.yaml
├── config.yaml
├── .github
├── pull_request_template.md
├── workflows
│ ├── codeql-analysis.yml
│ ├── promote.yaml
│ └── ci.yaml
└── ISSUE_TEMPLATE
│ └── bug_report.md
├── metadata.yaml
├── pyproject.toml
├── tox.ini
├── CONTRIBUTING.md
├── icon.svg
├── README.md
├── LICENSE
└── lib
└── charms
├── observability_libs
└── v1
│ └── kubernetes_service_patch.py
└── alertmanager_k8s
└── v0
└── alertmanager_remote_configuration.py
/.jujuignore:
--------------------------------------------------------------------------------
1 | /venv
2 | *.py[cod]
3 | *.charm
4 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | lightkube
2 | lightkube-models
3 | ops
4 | PyYAML
5 | watchdog
6 |
--------------------------------------------------------------------------------
/tests/unit/test_config/test_templates.tmpl:
--------------------------------------------------------------------------------
1 | {{define "myTemplate"}}do something else{{end}}
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | venv/
2 | build/
3 | *.charm
4 |
5 | .coverage
6 | __pycache__/
7 | *.py[cod]
8 |
9 | .idea
10 | .mypy_cache
11 | .tox
12 |
--------------------------------------------------------------------------------
/renovate.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json",
3 | "extends": [
4 | "config:base"
5 | ]
6 | }
7 |
--------------------------------------------------------------------------------
/src/alertmanager.yml:
--------------------------------------------------------------------------------
1 | route:
2 | receiver: null_receiver
3 | group_by:
4 | - alertname
5 | group_wait: 10s
6 | group_interval: 10s
7 | repeat_interval: 1h
8 | receivers:
9 | - name: null_receiver
--------------------------------------------------------------------------------
/tests/unit/test_config/alertmanager_default.yml:
--------------------------------------------------------------------------------
1 | route:
2 | receiver: test_receiver
3 | group_by:
4 | - alertname
5 | group_wait: 1234s
6 | group_interval: 4321s
7 | repeat_interval: 1111h
8 | receivers:
9 | - name: test_receiver
10 |
--------------------------------------------------------------------------------
/charmcraft.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2022 Canonical Ltd.
2 | # See LICENSE file for licensing details.
3 |
4 | type: charm
5 | bases:
6 | - build-on:
7 | - name: "ubuntu"
8 | channel: "22.04"
9 | run-on:
10 | - name: "ubuntu"
11 | channel: "22.04"
12 |
--------------------------------------------------------------------------------
/tests/unit/test_config/alertmanager_invalid.yml:
--------------------------------------------------------------------------------
1 | whatever:
2 | that: makes
3 | this_config:
4 | - invalid
5 | route:
6 | receiver: test_receiver
7 | group_by:
8 | - alertname
9 | group_wait: 1234s
10 | group_interval: 4321s
11 | repeat_interval: 1111h
12 | receivers:
13 | - name: test_receiver
14 |
--------------------------------------------------------------------------------
/tests/unit/test_config/alertmanager.yml:
--------------------------------------------------------------------------------
1 | global:
2 | http_config:
3 | tls_config:
4 | insecure_skip_verify: true
5 | receivers:
6 | - name: dummy
7 | webhook_configs:
8 | - url: http://127.0.0.1:5001/
9 | route:
10 | group_by:
11 | - juju_application
12 | - juju_model
13 | - juju_model_uuid
14 | group_interval: 5m
15 | group_wait: 30s
16 | receiver: dummy
17 | repeat_interval: 1h
18 |
--------------------------------------------------------------------------------
/config.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2022 Canonical Ltd.
2 | # See LICENSE file for licensing details.
3 |
4 | options:
5 | multitenant_label:
6 | type: string
7 | description: |
8 | Alertmanager Configurer has been designed to support multiple tenants. In a multitenant
9 | Alertmanager Configurer setup, each alert is first routed on the tenancy label, and then
10 | the routing tree is distinct for each tenant.
11 | default: ""
12 |
--------------------------------------------------------------------------------
/tests/unit/test_config/alertmanager_with_templates.yml:
--------------------------------------------------------------------------------
1 | global:
2 | http_config:
3 | tls_config:
4 | insecure_skip_verify: true
5 | receivers:
6 | - name: dummy
7 | webhook_configs:
8 | - url: http://127.0.0.1:5001/
9 | route:
10 | group_by:
11 | - juju_application
12 | - juju_model
13 | - juju_model_uuid
14 | group_interval: 5m
15 | group_wait: 30s
16 | receiver: dummy
17 | repeat_interval: 1h
18 | templates:
19 | - ./tests/unit/test_config/test_templates.tmpl
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | # Description
2 |
3 | Please include a summary of the change. Please also include relevant motivation and context. List any dependencies that are required for this change.
4 |
5 | ## Checklist
6 |
7 | - [ ] My code follows the [style guidelines](/CONTRIBUTING.md) of this project.
8 | - [ ] I have performed a self-review of my own code.
9 | - [ ] I have made corresponding changes to the documentation.
10 | - [ ] I have added tests that validate the behaviour of the software.
11 | - [ ] I validated that new and existing unit tests pass locally with my changes.
12 | - [ ] Any dependent changes have been merged and published in downstream modules.
13 | - [ ] I have bumped the version of any required library.
14 |
--------------------------------------------------------------------------------
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | name: "CodeQL"
2 |
3 | on:
4 | push:
5 | schedule:
6 | - cron: '0 0 * * *'
7 |
8 | permissions:
9 | security-events:
10 | write
11 |
12 | jobs:
13 | analyze:
14 | name: Analyze
15 | runs-on: ubuntu-22.04
16 |
17 | strategy:
18 | fail-fast: false
19 | matrix:
20 | language: ['python']
21 |
22 | steps:
23 | - name: Checkout repository
24 | uses: actions/checkout@v3
25 |
26 | - name: Initialize CodeQL
27 | uses: github/codeql-action/init@v2
28 | with:
29 | languages: ${{ matrix.language }}
30 |
31 | - name: Autobuild
32 | uses: github/codeql-action/autobuild@v2
33 |
34 | - name: Perform CodeQL Analysis
35 | uses: github/codeql-action/analyze@v2
36 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a bug report to help us improve
4 | title: ""
5 | labels: ["bug"]
6 | assignees: ''
7 | ---
8 |
9 | #### Describe the bug
10 |
11 |
12 | #### To Reproduce
13 |
14 |
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | #### Expected behavior
21 |
22 |
23 | #### Screenshots
24 |
25 |
26 | #### Logs
27 |
28 |
29 | #### Environment
30 |
31 | - Charm / library version (if relevant):
32 | - Juju version (output from `juju --version`):
33 | - Cloud Environment:
34 | - Kubernetes version (output from `kubectl version --short`):
35 |
36 | #### Additional context
37 |
38 |
39 |
--------------------------------------------------------------------------------
/.github/workflows/promote.yaml:
--------------------------------------------------------------------------------
1 | name: Promote Charm
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | promotion:
7 | type: choice
8 | description: Channel to promote from
9 | required: true
10 | options:
11 | - edge -> beta
12 | - beta -> candidate
13 | - candidate -> stable
14 |
15 | jobs:
16 | promote:
17 | name: Promote Charm
18 | runs-on: ubuntu-22.04
19 | steps:
20 | - name: Checkout
21 | uses: actions/checkout@v3
22 | - name: Set target channel
23 | env:
24 | PROMOTE_FROM: ${{ github.event.inputs.promotion }}
25 | run: |
26 | if [ "${PROMOTE_FROM}" == "edge -> beta" ]; then
27 | echo "promote-from=edge" >> ${GITHUB_ENV}
28 | echo "promote-to=beta" >> ${GITHUB_ENV}
29 | elif [ "${PROMOTE_FROM}" == "beta -> candidate" ]; then
30 | echo "promote-from=beta" >> ${GITHUB_ENV}
31 | echo "promote-to=candidate" >> ${GITHUB_ENV}
32 | elif [ "${PROMOTE_FROM}" == "candidate -> stable" ]; then
33 | echo "promote-from=candidate" >> ${GITHUB_ENV}
34 | echo "promote-to=stable" >> ${GITHUB_ENV}
35 | fi
36 | - name: Promote Charm
37 | uses: canonical/charming-actions/release-charm@2.2.5
38 | with:
39 | credentials: ${{ secrets.CHARMHUB_TOKEN }}
40 | github-token: ${{ secrets.GITHUB_TOKEN }}
41 | destination-channel: latest/${{ env.promote-to }}
42 | origin-channel: latest/${{ env.promote-from }}
43 | charmcraft-channel: latest/stable
44 |
--------------------------------------------------------------------------------
/metadata.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2022 Canonical Ltd.
2 | # See LICENSE file for licensing details.
3 |
4 | name: alertmanager-configurer-k8s
5 | display-name: Alertmanager Configurer
6 | summary: |
7 | Provides an HTTP-based API for managing Alertmanager configuration.
8 | description: |
9 | The Alertmanager Configurer Charmed Operator provides an HTTP-based API for managing Alertmanager
10 | configuration.
11 |
12 | The Juju charm in this repository has been designed to supplement the alertmanager-k8s charm.
13 | It leverages the alertmanager_remote_configurer interface, provided by the alertmanager-k8s,
14 | to send the configuration over to the Alertmanager inside the Juju relation data bag.
15 |
16 | The full description of the API is available at
17 | https://github.com/facebookarchive/prometheus-configmanager/blob/main/alertmanager/docs/swagger-v1.yml.
18 | website: https://charmhub.io/alertmanager-configurer-k8s
19 | source: https://github.com/canonical/alertmanager-configurer-k8s-operator
20 | issues: https://github.com/canonical/alertmanager-configurer-k8s-operator/issues
21 |
22 | containers:
23 | alertmanager-configurer:
24 | resource: alertmanager-configurer-k8s-image
25 | mounts:
26 | - storage: config
27 | location: /etc/alertmanager
28 | dummy-http-server:
29 | resource: dummy-http-server-image
30 |
31 | provides:
32 | alertmanager-configurer:
33 | interface: alertmanager_configurer
34 | alertmanager:
35 | interface: alertmanager_remote_configuration
36 |
37 | storage:
38 | config:
39 | type: filesystem
40 | location: /etc/alertmanager
41 |
42 | resources:
43 | alertmanager-configurer-k8s-image:
44 | type: oci-image
45 | description: OCI image for alertmanager-configurer
46 | upstream-source: docker.io/facebookincubator/alertmanager-configurer:1.0.4
47 | dummy-http-server-image:
48 | type: oci-image
49 | description: Container image for the dummy HTTP server
50 | upstream-source: ghcr.io/canonical/200-ok:main
51 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | # Copyright 2022 Canonical Ltd.
2 | # See LICENSE file for licensing details.
3 |
4 | # Testing tools configuration
5 | [tool.coverage.run]
6 | branch = true
7 |
8 | [tool.coverage.report]
9 | show_missing = true
10 |
11 | # Formatting tools configuration
12 | [tool.black]
13 | line-length = 99
14 | target-version = ["py38"]
15 |
16 | [tool.isort]
17 | profile = "black"
18 |
19 | # Linting tools configuration
20 | [tool.flake8]
21 | max-line-length = 99
22 | max-doc-length = 99
23 | max-complexity = 10
24 | exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"]
25 | select = ["E", "W", "F", "C", "N", "R", "D", "H"]
26 | # Ignore W503, E501 because using black creates errors with this
27 | # Ignore D107 Missing docstring in __init__
28 | ignore = ["W503", "E501", "D107"]
29 | # D100, D101, D102, D103: Ignore missing docstrings in tests
30 | per-file-ignores = ["tests/*:D100,D101,D102,D103"]
31 | docstring-convention = "google"
32 | # Check for properly formatted copyright header in each file
33 | copyright-check = "True"
34 | copyright-author = "Canonical Ltd."
35 | copyright-regexp = "Copyright\\s\\d{4}([-,]\\d{4})*\\s+%(author)s"
36 |
37 | # Static analysis tools configuration
38 | [tool.mypy]
39 | pretty = true
40 | python_version = 3.8
41 | mypy_path = "src:lib"
42 | follow_imports = "normal"
43 | warn_redundant_casts = true
44 | warn_unused_ignores = true
45 | warn_unused_configs = true
46 | show_traceback = true
47 | show_error_codes = true
48 | namespace_packages = true
49 | explicit_package_bases = true
50 | check_untyped_defs = true
51 | allow_redefinition = true
52 |
53 | # Ignore libraries that do not have type hint nor stubs
54 | [[tool.mypy.overrides]]
55 | module = ["ops.*", "lightkube.*", "git.*", "pytest_operator.*", "validators.*"]
56 | ignore_missing_imports = true
57 |
58 | [[tool.mypy.overrides]]
59 | module = ["charms.alertmanager_k8s.*", "charms.observability_libs.*"]
60 | follow_imports = "silent"
61 | warn_unused_ignores = false
62 |
63 | [tool.pytest.ini_options]
64 | minversion = "6.0"
65 | log_cli_level = "INFO"
66 |
--------------------------------------------------------------------------------
/tests/unit/test_config_dir_watcher.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright 2023 Canonical Ltd.
3 | # See LICENSE file for licensing details.
4 |
5 | import unittest
6 | from unittest.mock import patch
7 |
8 | from ops import testing
9 |
10 | from charm import AlertmanagerConfigurerOperatorCharm
11 | from config_dir_watcher import AlertmanagerConfigDirWatcher
12 |
13 |
14 | class TestConfigDirWatcher(unittest.TestCase):
15 | @patch("charm.KubernetesServicePatch", lambda charm, ports: None)
16 | def setUp(self):
17 | self.harness = testing.Harness(AlertmanagerConfigurerOperatorCharm)
18 | self.harness.begin()
19 |
20 | @patch("pathlib.Path.exists")
21 | @patch("subprocess.Popen")
22 | @patch("config_dir_watcher.LOG_FILE_PATH")
23 | def test_given_config_dir_watcher_and_juju_exec_exists_when_start_watchdog_then_correct_subprocess_is_started(
24 | self, _, patched_popen, patched_path_exists
25 | ):
26 | test_watch_dir = "/whatever/watch/dir"
27 | patched_path_exists.return_value = True
28 | watchdog = AlertmanagerConfigDirWatcher(self.harness.charm, test_watch_dir)
29 |
30 | watchdog.start_watchdog()
31 |
32 | call_list = patched_popen.call_args_list
33 | patched_popen.assert_called_once()
34 | assert call_list[0].kwargs["args"] == [
35 | "/usr/bin/python3",
36 | "src/config_dir_watcher.py",
37 | test_watch_dir,
38 | "/usr/bin/juju-exec",
39 | self.harness.charm.unit.name,
40 | self.harness.charm.charm_dir,
41 | ]
42 |
43 | @patch("pathlib.Path.exists")
44 | @patch("subprocess.Popen")
45 | @patch("config_dir_watcher.LOG_FILE_PATH")
46 | def test_given_config_dir_watcher_and_juju_exec_does_not_exist_when_start_watchdog_then_correct_subprocess_is_started(
47 | self, _, patched_popen, patched_path_exists
48 | ):
49 | test_watch_dir = "/whatever/watch/dir"
50 | patched_path_exists.return_value = False
51 | watchdog = AlertmanagerConfigDirWatcher(self.harness.charm, test_watch_dir)
52 |
53 | watchdog.start_watchdog()
54 |
55 | call_list = patched_popen.call_args_list
56 | patched_popen.assert_called_once()
57 | assert call_list[0].kwargs["args"] == [
58 | "/usr/bin/python3",
59 | "src/config_dir_watcher.py",
60 | test_watch_dir,
61 | "/usr/bin/juju-run",
62 | self.harness.charm.unit.name,
63 | self.harness.charm.charm_dir,
64 | ]
65 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | # Copyright 2021 Canonical Ltd.
2 | # See LICENSE file for licensing details.
3 |
4 | [tox]
5 | skipsdist=True
6 | skip_missing_interpreters = True
7 | envlist = lint, static, unit
8 |
9 | [vars]
10 | src_path = {toxinidir}/src
11 | tst_path = {toxinidir}/tests
12 | all_path = {[vars]src_path} {[vars]tst_path}
13 |
14 | [testenv]
15 | basepython = python3
16 | setenv =
17 | PYTHONPATH = {toxinidir}:{toxinidir}/lib:{[vars]src_path}
18 | PYTHONBREAKPOINT=ipdb.set_trace
19 | PY_COLORS=1
20 | passenv =
21 | PYTHONPATH
22 | HOME
23 | PATH
24 | CHARM_BUILD_DIR
25 | MODEL_SETTINGS
26 | HTTP_PROXY
27 | HTTPS_PROXY
28 | NO_PROXY
29 |
30 | [testenv:fmt]
31 | description = Apply coding style standards to code
32 | deps =
33 | black
34 | isort
35 | commands =
36 | isort {[vars]all_path}
37 | black {[vars]all_path}
38 |
39 | [testenv:lint]
40 | description = Check code against coding style standards
41 | deps =
42 | black
43 | flake8==4.0.1
44 | flake8-docstrings
45 | flake8-copyright
46 | flake8-builtins
47 | pyproject-flake8
48 | pep8-naming
49 | isort
50 | codespell
51 | commands =
52 | codespell . --skip .git --skip .tox --skip build --skip lib --skip venv --skip .mypy_cache
53 | # pflake8 wrapper supports config from pyproject.toml
54 | pflake8 {[vars]all_path}
55 | isort --check-only --diff {[vars]all_path}
56 | black --check --diff {[vars]all_path}
57 |
58 | [testenv:static]
59 | description = Run static analysis checks
60 | deps =
61 | mypy
62 | types-PyYAML
63 | types-requests
64 | types-setuptools
65 | types-toml
66 | # pip-check-reqs does not yet work with recent pip
67 | pip-check-reqs
68 | pip<=21.1.3
69 | -r{toxinidir}/requirements.txt
70 | commands =
71 | pip-missing-reqs {toxinidir}/src {toxinidir}/lib --requirements-file={toxinidir}/requirements.txt
72 | pip-extra-reqs {toxinidir}/src {toxinidir}/lib --requirements-file={toxinidir}/requirements.txt
73 | mypy {[vars]src_path} {posargs}
74 |
75 | [testenv:unit]
76 | description = Run unit tests
77 | deps =
78 | pytest
79 | coverage[toml]
80 | validators
81 | -r{toxinidir}/requirements.txt
82 | commands =
83 | coverage run \
84 | --source={[vars]src_path} \
85 | -m pytest -v --tb native --log-cli-level=INFO -s {posargs} {[vars]tst_path}/unit
86 | coverage report
87 |
88 | [testenv:integration]
89 | description = Run integration tests
90 | deps =
91 | deepdiff
92 | juju
93 | pytest
94 | pytest-operator
95 | pytest-httpserver
96 | commands =
97 | pytest --asyncio-mode=auto -v --tb native --log-cli-level=INFO -s {posargs} {toxinidir}/tests/integration
98 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to alertmanager-configurer-k8s-operator
2 |
3 | ## Overview
4 |
5 | This documents explains the processes and practices recommended for contributing enhancements
6 | or bug fixing to the Alertmanager Configurer Charmed Operator.
7 |
8 | ## Setup
9 |
10 | A typical setup using [snaps](https://snapcraft.io/) can be found in the
11 | [Juju docs](https://juju.is/docs/sdk/dev-setup).
12 |
13 | ## Developing
14 |
15 | - Prior to getting started on a pull request, we first encourage you to open an issue explaining
16 | the use case or bug. This gives other contributors a chance to weigh in early in the process.
17 | - To author PRs you should be familiar with [juju](https://juju.is/#what-is-juju) and
18 | [how operators are written](https://juju.is/docs/sdk).
19 | - All enhancements require review before being merged. Besides the code quality and test coverage,
20 | the review will also take into account the resulting user experience for Juju administrators
21 | using this charm. To be able to merge you would have to rebase onto the `main` branch. We do this
22 | to avoid merge commits and to have a linear Git history.
23 | - We use [`tox`](https://tox.wiki/en/latest/#) to manage all virtualenvs for the development
24 | lifecycle.
25 |
26 | ### Testing
27 | Unit tests are written with the Operator Framework [test harness] and integration tests are written
28 | using [pytest-operator] and [python-libjuju].
29 |
30 | The default test environments - lint, static and unit - will run if you start `tox` without
31 | arguments.
32 |
33 | You can also manually run a specific test environment:
34 |
35 | ```shell
36 | tox -e fmt # update your code according to linting rules
37 | tox -e lint # code style
38 | tox -e static # static analysis
39 | tox -e unit # unit tests
40 | tox -e integration # integration tests
41 | ```
42 |
43 | `tox` creates a virtual environment for every tox environment defined in [tox.ini](tox.ini).
44 | To activate a tox environment for manual testing,
45 |
46 | ```shell
47 | source .tox/unit/bin/activate
48 | ```
49 |
50 | ## Build charm
51 |
52 | Build the charm in this git repository using
53 |
54 | ```shell
55 | charmcraft pack
56 | ```
57 |
58 | which will create a `*.charm` file you can deploy with:
59 |
60 | ```shell
61 | juju deploy ./alertmanager-configurer-k8s_ubuntu-22.04-amd64.charm \
62 | --resource alertmanager-configurer-image=docker.io/facebookincubator/alertmanager-configurer:1.0.4 \
63 | --resource dummy-http-server-image=ghcr.io/canonical/200-ok:main
64 | ```
65 |
66 | [test harness]: https://ops.readthedocs.io/en/latest/#module-ops.testing
67 | [pytest-operator]: https://github.com/charmed-kubernetes/pytest-operator/blob/main/docs/reference.md
68 | [python-libjuju]: https://pythonlibjuju.readthedocs.io/en/latest/
69 |
--------------------------------------------------------------------------------
/tests/integration/alertmanager.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright 2022 Canonical Ltd.
3 | # See LICENSE file for licensing details.
4 |
5 | import requests
6 |
7 |
8 | class Alertmanager:
9 | def __init__(self, host="localhost", port=9093):
10 | """Utility to manage a Prometheus application.
11 |
12 | Args:
13 | host: Optional; host address of Alertmanager application.
14 | port: Optional; port on which Alertmanager service is exposed.
15 | """
16 | self.base_url = f"http://{host}:{port}"
17 |
18 | async def is_ready(self) -> bool:
19 | """Send a GET request to check readiness.
20 |
21 | Returns:
22 | True if Alertmanager is ready (returned 200 OK); False otherwise.
23 | """
24 | url = f"{self.base_url}/-/ready"
25 |
26 | async with requests.get(url) as response:
27 | return response.status_code == 200
28 |
29 | async def config(self) -> str:
30 | """Send a GET request to get Alertmanager configuration.
31 |
32 | Returns:
33 | str: YAML config in string format or empty string
34 | """
35 | url = f"{self.base_url}/api/v2/status"
36 | # Response looks like this:
37 | # {
38 | # "cluster": {
39 | # "peers": [],
40 | # "status": "disabled"
41 | # },
42 | # "config": {
43 | # "original": "global:\n resolve_timeout: 5m\n http_config:\n tls_config:\n insecure_skip_verify: true\n follow_redirects: true\n smtp_hello: localhost\n smtp_require_tls: true\n pagerduty_url: https://events.pagerduty.com/v2/enqueue\n opsgenie_api_url: https://api.opsgenie.com/\n wechat_api_url: https://qyapi.weixin.qq.com/cgi-bin/\n victorops_api_url: https://alert.victorops.com/integrations/generic/20131114/alert/\nroute:\n receiver: dummy\n group_by:\n - juju_model\n - juju_application\n - juju_model_uuid\n continue: false\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 1h\nreceivers:\n- name: dummy\n webhook_configs:\n - send_resolved: true\n http_config:\n tls_config:\n insecure_skip_verify: true\n follow_redirects: true\n url: http://127.0.0.1:5001/\n max_alerts: 0\ntemplates: []\n" # noqa: E501, W505
44 | # },
45 | # "uptime": "2022-08-19T10:12:20.523Z",
46 | # "versionInfo": {
47 | # "branch": "HEAD",
48 | # "buildDate": "20210916-15:51:04",
49 | # "buildUser": "root@whatever",
50 | # "goVersion": "go1.14.15",
51 | # "revision": "61046b17771a57cfd4c4a51be370ab930a4d7d54",
52 | # "version": "0.23.0"
53 | # }
54 | # }
55 | response = requests.get(url)
56 | response.raise_for_status()
57 | result = response.json()
58 | return result["config"]["original"]
59 |
--------------------------------------------------------------------------------
/icon.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yaml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | schedule:
6 | - cron: '0 0 * * *'
7 |
8 | jobs:
9 | static-analysis:
10 | name: Static analysis
11 | runs-on: ubuntu-22.04
12 | steps:
13 | - uses: actions/checkout@v3
14 | - name: Install tox
15 | run: pip install tox
16 | - name: Run tests using tox
17 | run: tox -e static
18 |
19 | lint:
20 | name: Lint
21 | runs-on: ubuntu-22.04
22 | steps:
23 | - uses: actions/checkout@v3
24 | - name: Install tox
25 | run: pip install tox
26 | - name: Run tests using tox
27 | run: tox -e lint
28 |
29 | unit-test:
30 | name: Unit tests
31 | runs-on: ubuntu-22.04
32 | steps:
33 | - uses: actions/checkout@v3
34 | - name: Install tox
35 | run: pip install tox
36 | - name: Run tests using tox
37 | run: tox -e unit
38 |
39 | integration-test:
40 | name: Integration tests
41 | runs-on: ubuntu-22.04
42 | steps:
43 | - name: Checkout
44 | uses: actions/checkout@v3
45 | - name: Setup operator environment
46 | uses: charmed-kubernetes/actions-operator@main
47 | with:
48 | provider: microk8s
49 | channel: 1.26-strict/stable
50 | juju-channel: 3.1/stable
51 | - name: Run integration tests
52 | run: tox -e integration
53 | - name: Archive Tested Charm
54 | uses: actions/upload-artifact@v3
55 | if: ${{ github.ref_name == 'main' }}
56 | with:
57 | name: tested-charm
58 | path: .tox/**/alertmanager-configurer-k8s_ubuntu-22.04-amd64.charm
59 | retention-days: 5
60 | - name: Archive charmcraft logs
61 | if: failure()
62 | uses: actions/upload-artifact@v3
63 | with:
64 | name: charmcraft-logs
65 | path: /home/runner/.local/state/charmcraft/log/*.log
66 | - name: Archive juju crashdump
67 | if: failure()
68 | uses: actions/upload-artifact@v3
69 | with:
70 | name: juju-crashdump
71 | path: juju-crashdump-*.tar.xz
72 |
73 | publish-charm:
74 | name: Publish Charm
75 | needs: integration-test
76 | runs-on: ubuntu-22.04
77 | if: ${{ github.ref_name == 'main' && github.event.schedule != ''}}
78 | steps:
79 | - name: Checkout
80 | uses: actions/checkout@v3
81 | - name: Install charmcraft
82 | run: sudo snap install charmcraft --classic
83 | - name: Fetch Tested Charm
84 | uses: actions/download-artifact@v3
85 | with:
86 | name: tested-charm
87 | - name: Move charm in current directory
88 | run: find ./ -name alertmanager-configurer-k8s_ubuntu-22.04-amd64.charm -exec mv -t ./ {} \;
89 | - name: Select Charmhub channel
90 | uses: canonical/charming-actions/channel@2.2.5
91 | id: channel
92 | - name: Upload charm to Charmhub
93 | uses: canonical/charming-actions/upload-charm@2.2.5
94 | with:
95 | credentials: "${{ secrets.CHARMHUB_TOKEN }}"
96 | github-token: "${{ secrets.GITHUB_TOKEN }}"
97 | channel: "${{ steps.channel.outputs.name }}"
98 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Alertmanager Configurer Charmed Operator
2 |
3 | ## Description
4 |
5 | The Alertmanager Configurer Charmed Operator provides an HTTP-based API for managing
6 | [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) configuration.
7 |
8 | This charm has been designed to supplement the
9 | [alertmanager-k8s] charm. It leverages the `alertmanager_remote_configuration` interface, provided
10 | by the [alertmanager-k8s], to send the configuration over to the Alertmanager inside the
11 | [Juju](https://juju.is/) relation data bag.
12 |
13 | Full description of the API is available in [github].
14 |
15 | [alertmanager-k8s]: https://github.com/canonical/alertmanager-k8s-operator
16 | [github]: https://github.com/facebookarchive/prometheus-configmanager/blob/main/alertmanager/docs/swagger-v1.yml
17 |
18 | ## Usage
19 |
20 | ### Deployment
21 |
22 | > **NOTE**: This charm is only compatible with Juju 3.x!
23 |
24 | The Alertmanager Configurer Charmed Operator may be deployed using the Juju command line as in:
25 |
26 | ```bash
27 | juju deploy alertmanager-configurer-k8s --trust
28 | ```
29 |
30 | ### Relating to the Alertmanager
31 |
32 | ```bash
33 | juju deploy alertmanager-k8s --channel=edge --trust
34 | juju relate alertmanager-configurer-k8s:alertmanager alertmanager-k8s:remote-configuration
35 | ```
36 |
37 | ### Configuring Alertmanager via alertmanager-configurer
38 |
39 | Alertmanager Configurer exposes an HTTP API which allows managing Alertmanager's configuration.
40 | The API is available at port 9101 on the IP address of the charm unit. This unit and its IP address
41 | may be determined using the `juju status` command.
42 | Full description of Alertmanager Configurer's API is available in
43 | [github](https://github.com/facebookarchive/prometheus-configmanager/blob/main/alertmanager/docs/swagger-v1.yml).
44 |
45 | Alertmanager Configurer has been designed to support multiple tenants. In a multitenant
46 | Alertmanager Configurer setup, each alert is first routed on the tenancy label, and then
47 | the routing tree is distinct for each tenant.
48 |
49 | ### Examples:
50 |
51 | Get tenants:
52 |
53 | ```bash
54 | curl -X GET http://:9101/v1/tenants
55 | ```
56 |
57 | Create Alertmanager's global config:
58 |
59 | ```yaml
60 | global:
61 | resolve_timeout: 5m
62 | http_config:
63 | tls_config:
64 | insecure_skip_verify: true
65 | ```
66 |
67 | ```bash
68 | curl -X POST http://:9101/v1/global
69 | -H 'Content-Type: application/json'
70 | -d '{"resolve_timeout": "5m", "http_config": {"tls_config": {"insecure_skip_verify": true}}}'
71 | ```
72 |
73 | Get Alertmanager's global config:
74 |
75 | ```bash
76 | curl -X GET http://:9101/v1/global
77 | ```
78 |
79 | Create receiver:
80 |
81 | ```yaml
82 | receivers:
83 | - name: _example
84 | webhook_configs:
85 | - send_resolved: false
86 | url: http://receiver_example.com
87 | ```
88 |
89 | ```bash
90 | curl -X POST http://:9101/v1//receiver
91 | -H 'Content-Type: application/json'
92 | -d '{"name": "example", "webhook_configs": [{"url": "http://receiver_example.com"}]}'
93 | ```
94 |
95 | Delete receiver:
96 |
97 | ```bash
98 | curl -X DELETE http://:9101/v1//receiver/
99 | ```
100 |
101 | ## OCI Images
102 |
103 | - [facebookincubator/alertmanager-configurer](https://hub.docker.com/r/facebookincubator/alertmanager-configurer)
104 | - [canonical/200-ok](https://github.com/canonical/200-ok/pkgs/container/200-ok)
105 |
--------------------------------------------------------------------------------
/src/config_dir_watcher.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright 2022 Canonical Ltd.
3 | # See LICENSE file for licensing details.
4 |
5 | """Alertmanager configuration dir watcher module.
6 |
7 | This module implements custom Juju event (alertmanager_config_changed) fired upon any change
8 | in a given directory mounted to the workload container. It is based on `watchdog`.
9 | In this particular case, it is used by the alertmanager-configurer-k8s-operator charm to detect
10 | changes of the Alertmanager's configuration. Thanks to this mechanism, Alertmanager Configurer
11 | knows when to update the configuration of the Alertmanager.
12 | """
13 |
14 | import logging
15 | import os
16 | import subprocess
17 | import sys
18 | import time
19 | from pathlib import Path
20 |
21 | from ops.charm import CharmBase, CharmEvents
22 | from ops.framework import EventBase, EventSource, Object
23 | from watchdog.events import FileSystemEventHandler
24 | from watchdog.observers import Observer
25 |
26 | logger = logging.getLogger(__name__)
27 |
28 |
29 | class AlertmanagerConfigFileChangedEvent(EventBase):
30 | """Event emitted when Alertmanager configuration file changes."""
31 |
32 | pass
33 |
34 |
35 | class AlertmanagerConfigFileChangedCharmEvents(CharmEvents):
36 | """Event descriptor for events emitted when Alertmanager config file changes."""
37 |
38 | alertmanager_config_file_changed = EventSource(AlertmanagerConfigFileChangedEvent)
39 |
40 |
41 | LOG_FILE_PATH = "/var/log/alertmanager-configurer-watchdog.log"
42 |
43 |
44 | class AlertmanagerConfigDirWatcher(Object):
45 | """Alertmanager Config Dir Watcher."""
46 |
47 | def __init__(self, charm: CharmBase, config_dir: str):
48 | super().__init__(charm, None)
49 | self._charm = charm
50 | self._config_dir = config_dir
51 |
52 | def start_watchdog(self):
53 | """Wraps watchdog in a new background process."""
54 | logger.info("Starting alert rules watchdog.")
55 |
56 | # We need to trick Juju into thinking that we are not running
57 | # in a hook context, as Juju will disallow use of juju-run.
58 | new_env = os.environ.copy()
59 | if "JUJU_CONTEXT_ID" in new_env:
60 | new_env.pop("JUJU_CONTEXT_ID")
61 |
62 | juju_bin = (
63 | "/usr/bin/juju-exec" if Path("/usr/bin/juju-exec").exists() else "/usr/bin/juju-run"
64 | )
65 | pid = subprocess.Popen(
66 | args=[
67 | "/usr/bin/python3",
68 | "src/config_dir_watcher.py",
69 | self._config_dir,
70 | juju_bin,
71 | self._charm.unit.name,
72 | self._charm.charm_dir,
73 | ],
74 | stdout=open(LOG_FILE_PATH, "a"),
75 | stderr=subprocess.STDOUT,
76 | env=new_env,
77 | ).pid
78 |
79 | logger.info(f"Started Alertmanager's config watchdog process with PID {pid}.")
80 |
81 |
82 | def dispatch(run_cmd: str, unit: str, charm_dir: str):
83 | """Fires alert_rules_changed Juju event."""
84 | dispatch_sub_cmd = "JUJU_DISPATCH_PATH=hooks/alertmanager_config_file_changed {}/dispatch"
85 | subprocess.run([run_cmd, "-u", unit, dispatch_sub_cmd.format(charm_dir)])
86 |
87 |
88 | class Handler(FileSystemEventHandler):
89 | """Handler for changes in the watched directory."""
90 |
91 | def __init__(self, run_cmd: str, unit: str, charm_dir: str):
92 | self.run_cmd = run_cmd
93 | self.unit = unit
94 | self.charm_dir = charm_dir
95 |
96 | def on_closed(self, event):
97 | """Watchdog's callback ran on any change in the watched directory."""
98 | dispatch(self.run_cmd, self.unit, self.charm_dir)
99 |
100 |
101 | def main():
102 | """Starts watchdog."""
103 | config_dir, run_cmd, unit, charm_dir = sys.argv[1:]
104 |
105 | observer = Observer()
106 | event_handler = Handler(run_cmd, unit, charm_dir)
107 | observer.schedule(event_handler, config_dir, recursive=True)
108 | observer.start()
109 | try:
110 | while True:
111 | time.sleep(5)
112 | except Exception:
113 | observer.stop()
114 | logger.error("Watchdog error! Watchdog stopped!")
115 |
116 |
117 | if __name__ == "__main__":
118 | main()
119 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright 2022 Canonical Ltd.
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/tests/integration/test_integration.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright 2022 Canonical Ltd.
3 | # See LICENSE file for licensing details.
4 |
5 |
6 | import logging
7 | from copy import deepcopy
8 | from pathlib import Path
9 | from typing import cast
10 |
11 | import pytest
12 | import requests
13 | import yaml
14 | from alertmanager import Alertmanager
15 | from deepdiff import DeepDiff
16 | from pytest_operator.plugin import OpsTest # type: ignore[import] # noqa: F401
17 |
18 | logger = logging.getLogger(__name__)
19 |
20 | METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
21 | ALERTMANAGER_CONFIGURER_APP_NAME = METADATA["name"]
22 | ALERTMANAGER_CONFIGURER_DEFAULT_CONFIG = yaml.safe_load(Path("./src/alertmanager.yml").read_text())
23 | ALERTMANAGER_APP_NAME = "alertmanager-k8s"
24 | WAIT_FOR_STATUS_TIMEOUT = 5 * 60
25 | DUMMY_HTTP_SERVER_PORT = 80
26 | TEST_TENANT = "test-tenant"
27 | TEST_RECEIVER_NAME = "example"
28 |
29 |
30 | class TestAlertmanagerConfigurerOperatorCharm:
31 | @pytest.fixture(scope="module")
32 | @pytest.mark.abort_on_fail
33 | async def setup(self, ops_test: OpsTest):
34 | await ops_test.model.set_config({"update-status-hook-interval": "2s"})
35 | await self._deploy_alertmanager_k8s(ops_test)
36 | charm = await ops_test.build_charm(".")
37 | resources = {
38 | f"{ALERTMANAGER_CONFIGURER_APP_NAME}-image": METADATA["resources"][
39 | f"{ALERTMANAGER_CONFIGURER_APP_NAME}-image"
40 | ]["upstream-source"],
41 | "dummy-http-server-image": METADATA["resources"]["dummy-http-server-image"][
42 | "upstream-source"
43 | ],
44 | }
45 | await ops_test.model.deploy(
46 | charm,
47 | resources=resources,
48 | application_name=ALERTMANAGER_CONFIGURER_APP_NAME,
49 | trust=True,
50 | series="focal",
51 | )
52 |
53 | @pytest.mark.abort_on_fail
54 | async def test_given_alertmanager_configurer_charm_is_not_related_to_alertmanager_when_charm_deployed_then_charm_goes_to_blocked_status( # noqa: E501
55 | self, ops_test: OpsTest, setup
56 | ):
57 | await ops_test.model.wait_for_idle(
58 | apps=[ALERTMANAGER_CONFIGURER_APP_NAME],
59 | status="blocked",
60 | timeout=WAIT_FOR_STATUS_TIMEOUT,
61 | )
62 |
63 | @pytest.mark.abort_on_fail
64 | async def test_given_alertmanager_configurer_charm_in_blocked_status_when_alertmanager_relation_created_then_charm_goes_to_active_status( # noqa: E501, W505
65 | self, ops_test: OpsTest, setup
66 | ):
67 | await ops_test.model.add_relation(
68 | relation1=f"{ALERTMANAGER_CONFIGURER_APP_NAME}",
69 | relation2=f"{ALERTMANAGER_APP_NAME}:remote-configuration",
70 | )
71 | await ops_test.model.wait_for_idle(
72 | apps=[ALERTMANAGER_CONFIGURER_APP_NAME],
73 | status="active",
74 | timeout=WAIT_FOR_STATUS_TIMEOUT,
75 | )
76 |
77 | @pytest.mark.abort_on_fail
78 | async def test_given_alertmanager_configurer_running_when_post_sent_to_the_dummy_http_server_called_then_server_responds_with_200( # noqa: E501
79 | self, ops_test: OpsTest, setup
80 | ):
81 | dummy_http_server_ip = await _unit_address(ops_test, ALERTMANAGER_CONFIGURER_APP_NAME, 0)
82 | dummy_server_response = requests.post(
83 | f"http://{dummy_http_server_ip}:{DUMMY_HTTP_SERVER_PORT}"
84 | )
85 | assert dummy_server_response.status_code == 200
86 |
87 | @pytest.mark.abort_on_fail
88 | async def test_given_alertmanager_configurer_ready_when_get_alertmanager_config_then_alertmanager_has_config_from_alertmanager_configurer( # noqa: E501
89 | self, ops_test: OpsTest, setup
90 | ):
91 | expected_config = deepcopy(ALERTMANAGER_CONFIGURER_DEFAULT_CONFIG)
92 | expected_config = await _add_juju_topology_to_group_by(expected_config)
93 |
94 | alertmanager_config_raw = await _get_alertmanager_config(
95 | ops_test, ALERTMANAGER_APP_NAME, 0
96 | )
97 | alertmanager_config = yaml.safe_load(alertmanager_config_raw)
98 |
99 | assert await _get_config_difs(expected_config, alertmanager_config) == {}
100 |
101 | @pytest.mark.abort_on_fail
102 | async def test_given_alertmanager_configurer_ready_when_new_receiver_created_then_alertmanager_config_is_updated_with_the_new_receiver( # noqa: E501
103 | self, ops_test: OpsTest, setup
104 | ):
105 | test_receiver_json = {
106 | "name": f"{TEST_RECEIVER_NAME}",
107 | "webhook_configs": [{"url": "http://receiver_example.com"}],
108 | }
109 | expected_config = deepcopy(ALERTMANAGER_CONFIGURER_DEFAULT_CONFIG)
110 | expected_config = await _add_juju_topology_to_group_by(expected_config)
111 | expected_config = await _add_new_receiver(expected_config, test_receiver_json)
112 | alertmanager_configurer_server_ip = await _unit_address(
113 | ops_test, ALERTMANAGER_CONFIGURER_APP_NAME, 0
114 | )
115 |
116 | server_response = requests.post(
117 | f"http://{alertmanager_configurer_server_ip}:9101/v1/{TEST_TENANT}/receiver",
118 | json=test_receiver_json,
119 | )
120 | assert server_response.status_code == 200
121 |
122 | # Wait for Alertmanager to apply new config
123 | await ops_test.model.wait_for_idle(
124 | apps=[ALERTMANAGER_APP_NAME],
125 | status="active",
126 | timeout=WAIT_FOR_STATUS_TIMEOUT,
127 | idle_period=5,
128 | )
129 |
130 | alertmanager_config_raw = await _get_alertmanager_config(
131 | ops_test, ALERTMANAGER_APP_NAME, 0
132 | )
133 | alertmanager_config = yaml.safe_load(alertmanager_config_raw)
134 |
135 | assert await _get_config_difs(expected_config, alertmanager_config) == {}
136 |
137 | @pytest.mark.abort_on_fail
138 | async def test_given_alertmanager_configurer_ready_when_delete_receiver_then_receiver_is_removed_from_alertmanager_config( # noqa: E501
139 | self, ops_test: OpsTest, setup
140 | ):
141 | expected_config = deepcopy(ALERTMANAGER_CONFIGURER_DEFAULT_CONFIG)
142 | expected_config = await _add_juju_topology_to_group_by(expected_config)
143 | alertmanager_configurer_server_ip = await _unit_address(
144 | ops_test, ALERTMANAGER_CONFIGURER_APP_NAME, 0
145 | )
146 |
147 | server_response = requests.delete(
148 | f"http://{alertmanager_configurer_server_ip}:9101/v1/{TEST_TENANT}/receiver/{TEST_RECEIVER_NAME}" # noqa: E501, W505
149 | )
150 | assert server_response.status_code == 200
151 |
152 | # Wait for Alertmanager to apply new config
153 | await ops_test.model.wait_for_idle(
154 | apps=[ALERTMANAGER_APP_NAME],
155 | status="active",
156 | timeout=WAIT_FOR_STATUS_TIMEOUT,
157 | idle_period=5,
158 | )
159 |
160 | alertmanager_config_raw = await _get_alertmanager_config(
161 | ops_test, ALERTMANAGER_APP_NAME, 0
162 | )
163 | alertmanager_config = yaml.safe_load(alertmanager_config_raw)
164 |
165 | assert await _get_config_difs(expected_config, alertmanager_config) == {}
166 |
167 | @pytest.mark.abort_on_fail
168 | async def test_scale_up(self, ops_test: OpsTest, setup):
169 | await ops_test.model.applications[ALERTMANAGER_CONFIGURER_APP_NAME].scale(2)
170 |
171 | await ops_test.model.wait_for_idle(
172 | apps=[ALERTMANAGER_CONFIGURER_APP_NAME],
173 | status="active",
174 | timeout=WAIT_FOR_STATUS_TIMEOUT,
175 | idle_period=5,
176 | wait_for_exact_units=2,
177 | )
178 |
179 | @pytest.mark.xfail(reason="Bug in Juju: https://bugs.launchpad.net/juju/+bug/1977582")
180 | async def test_scale_down(self, ops_test: OpsTest, setup):
181 | await ops_test.model.applications[ALERTMANAGER_CONFIGURER_APP_NAME].scale(1)
182 |
183 | await ops_test.model.wait_for_idle(
184 | apps=[ALERTMANAGER_CONFIGURER_APP_NAME],
185 | status="active",
186 | timeout=60,
187 | wait_for_exact_units=1,
188 | )
189 |
190 | @staticmethod
191 | async def _deploy_alertmanager_k8s(ops_test: OpsTest):
192 | await ops_test.model.deploy(
193 | ALERTMANAGER_APP_NAME,
194 | application_name=ALERTMANAGER_APP_NAME,
195 | channel="stable",
196 | trust=True,
197 | series="focal",
198 | )
199 |
200 |
201 | async def _unit_address(ops_test: OpsTest, app_name: str, unit_num: int) -> str:
202 | """Find unit address for any application.
203 |
204 | Args:
205 | ops_test: pytest-operator plugin
206 | app_name: string name of application
207 | unit_num: integer number of a juju unit
208 |
209 | Returns:
210 | str: unit address as a string
211 | """
212 | status = await ops_test.model.get_status()
213 | return status["applications"][app_name]["units"][f"{app_name}/{unit_num}"]["address"]
214 |
215 |
216 | async def _get_alertmanager_config(ops_test: OpsTest, app_name: str, unit_num: int) -> str:
217 | """Fetch Alertmanager config.
218 |
219 | Args:
220 | ops_test: pytest-operator plugin
221 | app_name: string name of Prometheus application
222 | unit_num: integer number of a Prometheus juju unit
223 |
224 | Returns:
225 | str: YAML config in string format or empty string
226 | """
227 | host = await _unit_address(ops_test, app_name, unit_num)
228 | alertmanager = Alertmanager(host=host)
229 | config = await alertmanager.config()
230 | return config
231 |
232 |
233 | async def _add_juju_topology_to_group_by(config: dict) -> dict:
234 | route = cast(dict, config.get("route", {}))
235 | route["group_by"] = list(
236 | set(route.get("group_by", [])).union(["juju_application", "juju_model", "juju_model_uuid"])
237 | )
238 | config["route"] = route
239 | return config
240 |
241 |
242 | async def _add_new_receiver(config: dict, receiver_json: dict) -> dict:
243 | receiver = deepcopy(receiver_json)
244 | receivers = config.get("receivers")
245 | new_receiver = await _update_receiver_name_with_tenant_id(receiver)
246 | new_receiver = await _add_default_webhook_configs(new_receiver)
247 | receivers.append(new_receiver)
248 | config["receivers"] = receivers
249 | return config
250 |
251 |
252 | async def _update_receiver_name_with_tenant_id(receiver: dict) -> dict:
253 | receiver_name = receiver["name"]
254 | new_name = f"{TEST_TENANT}_{receiver_name}"
255 | receiver["name"] = new_name
256 | return receiver
257 |
258 |
259 | async def _add_default_webhook_configs(receiver: dict) -> dict:
260 | default_webhook_configs = {
261 | "send_resolved": False,
262 | "http_config": {"follow_redirects": True},
263 | "max_alerts": 0,
264 | }
265 | webhook_configs = receiver["webhook_configs"]
266 | webhook_configs[0].update(default_webhook_configs)
267 | receiver["webhook_configs"] = webhook_configs
268 | return receiver
269 |
270 |
271 | async def _get_config_difs(expected_config: dict, actual_config: dict) -> dict:
272 | difs = {}
273 | difs.update(
274 | DeepDiff(actual_config["receivers"], expected_config["receivers"], ignore_order=True)
275 | )
276 | difs.update(
277 | DeepDiff(
278 | actual_config["route"]["receiver"],
279 | expected_config["route"]["receiver"],
280 | ignore_order=True,
281 | )
282 | )
283 | difs.update(
284 | DeepDiff(
285 | actual_config["route"]["group_by"],
286 | expected_config["route"]["group_by"],
287 | ignore_order=True,
288 | )
289 | )
290 | difs.update(
291 | DeepDiff(actual_config["route"]["group_wait"], expected_config["route"]["group_wait"])
292 | )
293 | difs.update(
294 | DeepDiff(
295 | actual_config["route"]["group_interval"], expected_config["route"]["group_interval"]
296 | )
297 | )
298 | difs.update(
299 | DeepDiff(
300 | actual_config["route"]["repeat_interval"], expected_config["route"]["repeat_interval"]
301 | )
302 | )
303 | return difs
304 |
--------------------------------------------------------------------------------
/tests/unit/test_charm_non_leader_unit.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright 2022 Canonical Ltd.
3 | # See LICENSE file for licensing details.
4 |
5 | import unittest
6 | from unittest.mock import Mock, PropertyMock, patch
7 |
8 | from ops import testing
9 | from ops.model import ActiveStatus, BlockedStatus, WaitingStatus
10 |
11 | from charm import AlertmanagerConfigurerOperatorCharm
12 |
13 | testing.SIMULATE_CAN_CONNECT = True
14 |
15 | TEST_MULTITENANT_LABEL = "some_test_label"
16 | TEST_CONFIG = f"""options:
17 | multitenant_label:
18 | type: string
19 | description: |
20 | Alertmanager Configurer has been designed to support multiple tenants. In a multitenant
21 | Alertmanager Configurer setup, each alert is first routed on the tenancy label, and then
22 | the routing tree is distinct for each tenant.
23 | default: {TEST_MULTITENANT_LABEL}
24 | """
25 | with open("./tests/unit/test_config/alertmanager_default.yml", "r") as default_yaml:
26 | TEST_ALERTMANAGER_DEFAULT_CONFIG = default_yaml.read()
27 | TEST_ALERTMANAGER_CONFIG_FILE = "/test/rules/dir/config_file.yml"
28 | ALERTMANAGER_CLASS = "charm.AlertmanagerConfigurerOperatorCharm"
29 |
30 |
31 | class TestAlertmanagerConfigurerOperatorCharmNonLeader(unittest.TestCase):
32 | @patch("charm.KubernetesServicePatch", lambda charm, ports: None)
33 | def setUp(self):
34 | testing.SIMULATE_CAN_CONNECT = True
35 | self.harness = testing.Harness(AlertmanagerConfigurerOperatorCharm, config=TEST_CONFIG)
36 | self.addCleanup(self.harness.cleanup)
37 | self.harness.set_leader(False)
38 | self.harness.begin()
39 | self.alertmanager_configurer_container_name = (
40 | self.harness.charm.ALERTMANAGER_CONFIGURER_SERVICE_NAME
41 | )
42 |
43 | @patch("charm.AlertmanagerConfigDirWatcher")
44 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIG_DIR", new_callable=PropertyMock)
45 | @patch("ops.model.Container.push", Mock())
46 | def test_given_alertmanager_config_directory_and_can_connect_to_workload_when_start_then_watchdog_starts_watching_alertmanager_config_directory( # noqa: E501
47 | self, patched_config_dir, patched_alertmanager_config_dir_watcher
48 | ):
49 | self.harness.set_can_connect(
50 | container=self.alertmanager_configurer_container_name, val=True
51 | )
52 | test_config_dir = "/test/rules/dir"
53 | patched_config_dir.return_value = test_config_dir
54 | self.harness.charm.on.start.emit()
55 |
56 | patched_alertmanager_config_dir_watcher.assert_called_with(
57 | self.harness.charm, test_config_dir
58 | )
59 |
60 | @patch("charm.AlertmanagerConfigDirWatcher", Mock())
61 | def test_given_alertmanager_relation_not_created_when_pebble_ready_then_charm_goes_to_blocked_state( # noqa: E501
62 | self,
63 | ):
64 | self.harness.container_pebble_ready(self.alertmanager_configurer_container_name)
65 |
66 | assert self.harness.charm.unit.status == BlockedStatus(
67 | "Waiting for alertmanager relation to be created"
68 | )
69 |
70 | @patch("charm.AlertmanagerConfigDirWatcher", Mock())
71 | def test_given_alertmanager_relation_created_and_alertmanager_configurer_container_ready_but_dummy_http_server_not_yet_ready_when_pebble_ready_then_charm_goes_to_waiting_state( # noqa: E501
72 | self,
73 | ):
74 | self.harness.add_relation("alertmanager", "alertmanager-k8s")
75 | self.harness.container_pebble_ready(self.alertmanager_configurer_container_name)
76 |
77 | assert self.harness.charm.unit.status == WaitingStatus(
78 | "Waiting for the dummy HTTP server to be ready"
79 | )
80 |
81 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIG_FILE", new_callable=PropertyMock)
82 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIGURER_PORT", new_callable=PropertyMock)
83 | @patch(f"{ALERTMANAGER_CLASS}.DUMMY_HTTP_SERVER_HOST", new_callable=PropertyMock)
84 | @patch(f"{ALERTMANAGER_CLASS}.DUMMY_HTTP_SERVER_PORT", new_callable=PropertyMock)
85 | @patch("charm.AlertmanagerConfigDirWatcher", Mock())
86 | def test_given_prometheus_relation_created_and_prometheus_configurer_container_ready_when_pebble_ready_then_pebble_plan_is_updated_with_correct_pebble_layer( # noqa: E501
87 | self,
88 | patched_dummy_http_server_port,
89 | patched_dummy_http_server_host,
90 | patched_alertmanager_configurer_port,
91 | patched_alertmanager_config_file,
92 | ):
93 | test_dummy_http_server_port = 4321
94 | test_dummy_http_server_host = "testhost"
95 | test_alertmanager_configurer_port = 1234
96 | self.maxDiff = None
97 | patched_dummy_http_server_port.return_value = test_dummy_http_server_port
98 | patched_dummy_http_server_host.return_value = test_dummy_http_server_host
99 | patched_alertmanager_configurer_port.return_value = test_alertmanager_configurer_port
100 | patched_alertmanager_config_file.return_value = TEST_ALERTMANAGER_CONFIG_FILE
101 | self.harness.add_relation("alertmanager", "alertmanager-k8s")
102 | self.harness.container_pebble_ready("dummy-http-server")
103 | expected_plan = {
104 | "services": {
105 | f"{self.alertmanager_configurer_container_name}": {
106 | "override": "replace",
107 | "startup": "enabled",
108 | "command": "alertmanager_configurer "
109 | f"-port={test_alertmanager_configurer_port} "
110 | f"-alertmanager-conf={TEST_ALERTMANAGER_CONFIG_FILE} "
111 | "-alertmanagerURL="
112 | f"{test_dummy_http_server_host}:{test_dummy_http_server_port} "
113 | f"-multitenant-label={TEST_MULTITENANT_LABEL} "
114 | "-delete-route-with-receiver=true ",
115 | }
116 | }
117 | }
118 |
119 | self.harness.container_pebble_ready(self.alertmanager_configurer_container_name)
120 |
121 | updated_plan = self.harness.get_container_pebble_plan(
122 | self.alertmanager_configurer_container_name
123 | ).to_dict()
124 | self.assertEqual(expected_plan, updated_plan)
125 |
126 | def test_given_dummy_http_server_container_ready_when_pebble_ready_then_pebble_plan_is_updated_with_correct_pebble_layer( # noqa: E501
127 | self,
128 | ):
129 | expected_plan = {
130 | "services": {
131 | "dummy-http-server": {
132 | "override": "replace",
133 | "startup": "enabled",
134 | "command": "nginx",
135 | }
136 | }
137 | }
138 | self.harness.container_pebble_ready("dummy-http-server")
139 |
140 | updated_plan = self.harness.get_container_pebble_plan("dummy-http-server").to_dict()
141 | self.assertEqual(expected_plan, updated_plan)
142 |
143 | @patch("charm.AlertmanagerConfigDirWatcher", Mock())
144 | def test_given_alertmanager_relation_created_and_alertmanager_configurer_container_ready_when_pebble_ready_then_charm_goes_to_active_state( # noqa: E501
145 | self,
146 | ):
147 | self.harness.add_relation("alertmanager", "alertmanager-k8s")
148 | self.harness.container_pebble_ready("dummy-http-server")
149 |
150 | self.harness.container_pebble_ready(self.alertmanager_configurer_container_name)
151 |
152 | assert self.harness.charm.unit.status == ActiveStatus()
153 |
154 | @patch("ops.model.Container.push")
155 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_DEFAULT_CONFIG", new_callable=PropertyMock)
156 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIG_FILE", new_callable=PropertyMock)
157 | @patch("charm.AlertmanagerConfigDirWatcher", Mock())
158 | def test_given_alertmanager_default_config_and_can_connect_to_workload_container_when_start_then_alertmanager_config_is_created_using_default_data( # noqa: E501
159 | self, patched_alertmanager_config_file, patched_alertmanager_default_config, patched_push
160 | ):
161 | self.harness.set_can_connect(
162 | container=self.alertmanager_configurer_container_name, val=True
163 | )
164 | patched_alertmanager_config_file.return_value = TEST_ALERTMANAGER_CONFIG_FILE
165 | patched_alertmanager_default_config.return_value = TEST_ALERTMANAGER_DEFAULT_CONFIG
166 |
167 | self.harness.charm.on.start.emit()
168 |
169 | patched_push.assert_any_call(
170 | TEST_ALERTMANAGER_CONFIG_FILE, TEST_ALERTMANAGER_DEFAULT_CONFIG
171 | )
172 |
173 | @patch("ops.model.Container.push")
174 | def test_given_alertmanager_default_config_and_cant_connect_to_workload_container_when_start_then_alertmanager_config_is_not_created( # noqa: E501
175 | self, patched_push
176 | ):
177 | self.harness.set_can_connect(
178 | container=self.alertmanager_configurer_container_name, val=False
179 | )
180 |
181 | self.harness.charm.on.start.emit()
182 |
183 | patched_push.assert_not_called()
184 |
185 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIGURER_SERVICE_NAME", new_callable=PropertyMock)
186 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIGURER_PORT", new_callable=PropertyMock)
187 | def test_given_alertmanager_configurer_service_when_alertmanager_configurer_relation_joined_then_alertmanager_configurer_service_name_and_port_are_not_pushed_to_the_relation_data_bag( # noqa: E501
188 | self, patched_alertmanager_configurer_port, patched_alertmanager_configurer_service_name
189 | ):
190 | test_alertmanager_configurer_service_name = "whatever"
191 | test_alertmanager_configurer_port = 1234
192 | patched_alertmanager_configurer_service_name.return_value = (
193 | test_alertmanager_configurer_service_name
194 | )
195 | patched_alertmanager_configurer_port.return_value = test_alertmanager_configurer_port
196 | relation_id = self.harness.add_relation(
197 | self.alertmanager_configurer_container_name, self.harness.charm.app.name
198 | )
199 | self.harness.add_relation_unit(relation_id, f"{self.harness.charm.app.name}/0")
200 |
201 | self.assertEqual(
202 | self.harness.get_relation_data(relation_id, f"{self.harness.charm.app.name}"),
203 | {},
204 | )
205 |
206 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIG_FILE", new_callable=PropertyMock)
207 | @patch("charm.KubernetesServicePatch", lambda charm, ports: None)
208 | def test_given_alertmanager_config_in_config_dir_when_alertmanager_config_file_changed_then_config_is_not_pushed_to_the_data_bag( # noqa: E501
209 | self, patched_alertmanager_config_file
210 | ):
211 | test_config_file = "./tests/unit/test_config/alertmanager.yml"
212 | patched_alertmanager_config_file.return_value = test_config_file
213 | harness = testing.Harness(AlertmanagerConfigurerOperatorCharm, config=TEST_CONFIG)
214 | self.addCleanup(harness.cleanup)
215 | harness.begin()
216 | relation_id = harness.add_relation("alertmanager", "alertmanager-k8s")
217 | harness.add_relation_unit(relation_id, "alertmanager-k8s/0")
218 |
219 | harness.charm.on.alertmanager_config_file_changed.emit()
220 |
221 | with self.assertRaises(KeyError):
222 | _ = harness.get_relation_data(relation_id, "alertmanager-configurer-k8s")[
223 | "alertmanager_config"
224 | ]
225 |
226 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIG_FILE", new_callable=PropertyMock)
227 | @patch("charm.KubernetesServicePatch", lambda charm, ports: None)
228 | def test_given_non_existent_config_file_when_alertmanager_config_file_changed_then_charm_goes_to_blocked_state( # noqa: E501
229 | self, patched_alertmanager_config_file
230 | ):
231 | test_config_file = "whatever"
232 | patched_alertmanager_config_file.return_value = test_config_file
233 | relation_id = self.harness.add_relation("alertmanager", "alertmanager-k8s")
234 | self.harness.add_relation_unit(relation_id, "alertmanager-k8s/0")
235 |
236 | self.harness.charm.on.alertmanager_config_file_changed.emit()
237 |
238 | assert self.harness.charm.unit.status == BlockedStatus(
239 | "Error reading Alertmanager config file"
240 | )
241 |
--------------------------------------------------------------------------------
/tests/unit/test_charm_leader_unit.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright 2022 Canonical Ltd.
3 | # See LICENSE file for licensing details.
4 |
5 | import json
6 | import unittest
7 | from unittest.mock import Mock, PropertyMock, patch
8 |
9 | import yaml
10 | from ops import testing
11 | from ops.model import ActiveStatus, BlockedStatus, WaitingStatus
12 |
13 | from charm import AlertmanagerConfigurerOperatorCharm
14 |
15 | TEST_MULTITENANT_LABEL = "some_test_label"
16 | TEST_CONFIG = f"""options:
17 | multitenant_label:
18 | type: string
19 | description: |
20 | Alertmanager Configurer has been designed to support multiple tenants. In a multitenant
21 | Alertmanager Configurer setup, each alert is first routed on the tenancy label, and then
22 | the routing tree is distinct for each tenant.
23 | default: {TEST_MULTITENANT_LABEL}
24 | """
25 | with open("./tests/unit/test_config/alertmanager_default.yml", "r") as default_yaml:
26 | TEST_ALERTMANAGER_DEFAULT_CONFIG = default_yaml.read()
27 | TEST_ALERTMANAGER_CONFIG_FILE = "/test/rules/dir/config_file.yml"
28 | ALERTMANAGER_CLASS = "charm.AlertmanagerConfigurerOperatorCharm"
29 |
30 |
31 | class TestAlertmanagerConfigurerOperatorCharmLeader(unittest.TestCase):
32 | @patch("charm.KubernetesServicePatch", lambda charm, ports: None)
33 | def setUp(self):
34 | testing.SIMULATE_CAN_CONNECT = True
35 | self.harness = testing.Harness(AlertmanagerConfigurerOperatorCharm, config=TEST_CONFIG)
36 | self.addCleanup(self.harness.cleanup)
37 | self.harness.set_leader(True)
38 | self.harness.begin()
39 | self.alertmanager_configurer_container_name = (
40 | self.harness.charm.ALERTMANAGER_CONFIGURER_SERVICE_NAME
41 | )
42 |
43 | @patch("charm.AlertmanagerConfigDirWatcher")
44 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIG_DIR", new_callable=PropertyMock)
45 | @patch("ops.model.Container.push", Mock())
46 | def test_given_alertmanager_config_directory_and_can_connect_to_workload_when_start_then_watchdog_starts_watching_alertmanager_config_directory( # noqa: E501
47 | self, patched_config_dir, patched_alertmanager_config_dir_watcher
48 | ):
49 | self.harness.set_can_connect(
50 | container=self.alertmanager_configurer_container_name, val=True
51 | )
52 | test_config_dir = "/test/rules/dir"
53 | patched_config_dir.return_value = test_config_dir
54 | self.harness.charm.on.start.emit()
55 |
56 | patched_alertmanager_config_dir_watcher.assert_called_with(
57 | self.harness.charm, test_config_dir
58 | )
59 |
60 | @patch("charm.AlertmanagerConfigDirWatcher", Mock())
61 | def test_given_alertmanager_relation_not_created_when_pebble_ready_then_charm_goes_to_blocked_state( # noqa: E501
62 | self,
63 | ):
64 | self.harness.container_pebble_ready(self.alertmanager_configurer_container_name)
65 |
66 | assert self.harness.charm.unit.status == BlockedStatus(
67 | "Waiting for alertmanager relation to be created"
68 | )
69 |
70 | @patch("charm.AlertmanagerConfigDirWatcher", Mock())
71 | def test_given_alertmanager_relation_created_and_alertmanager_configurer_container_ready_but_dummy_http_server_not_yet_ready_when_alertmanager_configurer_pebble_ready_then_charm_goes_to_waiting_state( # noqa: E501
72 | self,
73 | ):
74 | self.harness.add_relation("alertmanager", "alertmanager-k8s")
75 | self.harness.container_pebble_ready(self.alertmanager_configurer_container_name)
76 |
77 | assert self.harness.charm.unit.status == WaitingStatus(
78 | "Waiting for the dummy HTTP server to be ready"
79 | )
80 |
81 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIG_FILE", new_callable=PropertyMock)
82 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIGURER_PORT", new_callable=PropertyMock)
83 | @patch(f"{ALERTMANAGER_CLASS}.DUMMY_HTTP_SERVER_HOST", new_callable=PropertyMock)
84 | @patch(f"{ALERTMANAGER_CLASS}.DUMMY_HTTP_SERVER_PORT", new_callable=PropertyMock)
85 | @patch("charm.AlertmanagerConfigDirWatcher", Mock())
86 | def test_given_prometheus_relation_created_and_prometheus_configurer_container_ready_when_pebble_ready_then_pebble_plan_is_updated_with_correct_pebble_layer( # noqa: E501
87 | self,
88 | patched_dummy_http_server_port,
89 | patched_dummy_http_server_host,
90 | patched_alertmanager_configurer_port,
91 | patched_alertmanager_config_file,
92 | ):
93 | test_dummy_http_server_port = 4321
94 | test_dummy_http_server_host = "testhost"
95 | test_alertmanager_configurer_port = 1234
96 | patched_dummy_http_server_port.return_value = test_dummy_http_server_port
97 | patched_dummy_http_server_host.return_value = test_dummy_http_server_host
98 | patched_alertmanager_configurer_port.return_value = test_alertmanager_configurer_port
99 | patched_alertmanager_config_file.return_value = TEST_ALERTMANAGER_CONFIG_FILE
100 | self.harness.add_relation("alertmanager", "alertmanager-k8s")
101 | self.harness.container_pebble_ready("dummy-http-server")
102 | expected_plan = {
103 | "services": {
104 | f"{self.alertmanager_configurer_container_name}": {
105 | "override": "replace",
106 | "startup": "enabled",
107 | "command": "alertmanager_configurer "
108 | f"-port={test_alertmanager_configurer_port} "
109 | f"-alertmanager-conf={TEST_ALERTMANAGER_CONFIG_FILE} "
110 | "-alertmanagerURL="
111 | f"{test_dummy_http_server_host}:{test_dummy_http_server_port} "
112 | f"-multitenant-label={TEST_MULTITENANT_LABEL} "
113 | "-delete-route-with-receiver=true ",
114 | }
115 | }
116 | }
117 |
118 | self.harness.container_pebble_ready(self.alertmanager_configurer_container_name)
119 |
120 | updated_plan = self.harness.get_container_pebble_plan(
121 | self.alertmanager_configurer_container_name
122 | ).to_dict()
123 | self.assertEqual(expected_plan, updated_plan)
124 |
125 | def test_given_dummy_http_server_container_ready_when_pebble_ready_then_pebble_plan_is_updated_with_correct_pebble_layer( # noqa: E501
126 | self,
127 | ):
128 | expected_plan = {
129 | "services": {
130 | "dummy-http-server": {
131 | "override": "replace",
132 | "startup": "enabled",
133 | "command": "nginx",
134 | }
135 | }
136 | }
137 | self.harness.container_pebble_ready("dummy-http-server")
138 |
139 | updated_plan = self.harness.get_container_pebble_plan("dummy-http-server").to_dict()
140 | self.assertEqual(expected_plan, updated_plan)
141 |
142 | @patch("charm.AlertmanagerConfigDirWatcher", Mock())
143 | def test_given_alertmanager_relation_created_and_alertmanager_configurer_container_ready_when_pebble_ready_then_charm_goes_to_active_state( # noqa: E501
144 | self,
145 | ):
146 | self.harness.add_relation("alertmanager", "alertmanager-k8s")
147 | self.harness.set_can_connect("dummy-http-server", True)
148 | self.harness.container_pebble_ready("dummy-http-server")
149 |
150 | self.harness.container_pebble_ready(self.alertmanager_configurer_container_name)
151 |
152 | assert self.harness.charm.unit.status == ActiveStatus()
153 |
154 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIGURER_PORT", new_callable=PropertyMock)
155 | def test_given_alertmanager_configurer_service_when_alertmanager_configurer_relation_joined_then_alertmanager_configurer_service_name_and_port_are_pushed_to_the_relation_data_bag( # noqa: E501
156 | self, patched_alertmanager_configurer_port
157 | ):
158 | test_alertmanager_configurer_port = 1234
159 | patched_alertmanager_configurer_port.return_value = test_alertmanager_configurer_port
160 | relation_id = self.harness.add_relation(
161 | self.alertmanager_configurer_container_name, self.harness.charm.app.name
162 | )
163 | self.harness.add_relation_unit(relation_id, f"{self.harness.charm.app.name}/0")
164 |
165 | self.assertEqual(
166 | self.harness.get_relation_data(relation_id, f"{self.harness.charm.app.name}"),
167 | {
168 | "service_name": self.harness.charm.app.name,
169 | "port": str(test_alertmanager_configurer_port),
170 | },
171 | )
172 |
173 | @patch("ops.model.Container.push")
174 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_DEFAULT_CONFIG", new_callable=PropertyMock)
175 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIG_FILE", new_callable=PropertyMock)
176 | @patch("charm.AlertmanagerConfigDirWatcher", Mock())
177 | def test_given_alertmanager_default_config_and_can_connect_to_workload_container_when_start_then_alertmanager_config_is_created_using_default_data( # noqa: E501
178 | self, patched_alertmanager_config_file, patched_alertmanager_default_config, patched_push
179 | ):
180 | self.harness.set_can_connect(
181 | container=self.alertmanager_configurer_container_name, val=True
182 | )
183 | patched_alertmanager_config_file.return_value = TEST_ALERTMANAGER_CONFIG_FILE
184 | patched_alertmanager_default_config.return_value = TEST_ALERTMANAGER_DEFAULT_CONFIG
185 |
186 | self.harness.charm.on.start.emit()
187 |
188 | patched_push.assert_any_call(
189 | TEST_ALERTMANAGER_CONFIG_FILE, TEST_ALERTMANAGER_DEFAULT_CONFIG
190 | )
191 |
192 | @patch("ops.model.Container.push")
193 | def test_given_alertmanager_default_config_and_cant_connect_to_workload_container_when_start_then_alertmanager_config_is_not_created( # noqa: E501
194 | self, patched_push
195 | ):
196 | self.harness.set_can_connect(
197 | container=self.alertmanager_configurer_container_name, val=False
198 | )
199 |
200 | self.harness.charm.on.start.emit()
201 |
202 | patched_push.assert_not_called()
203 |
204 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIG_FILE", new_callable=PropertyMock)
205 | @patch("charm.KubernetesServicePatch", lambda charm, ports: None)
206 | def test_given_non_existent_config_file_when_alertmanager_config_file_changed_then_charm_goes_to_blocked_state( # noqa: E501
207 | self, patched_alertmanager_config_file
208 | ):
209 | test_config_file = "whatever"
210 | patched_alertmanager_config_file.return_value = test_config_file
211 | relation_id = self.harness.add_relation("alertmanager", "alertmanager-k8s")
212 | self.harness.add_relation_unit(relation_id, "alertmanager-k8s/0")
213 |
214 | self.harness.charm.on.alertmanager_config_file_changed.emit()
215 |
216 | assert self.harness.charm.unit.status == BlockedStatus(
217 | "Error reading Alertmanager config file"
218 | )
219 |
220 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIG_FILE", new_callable=PropertyMock)
221 | @patch("charm.KubernetesServicePatch", lambda charm, ports: None)
222 | def test_given_alertmanager_config_in_config_dir_when_alertmanager_config_file_changed_then_config_is_pushed_to_the_data_bag( # noqa: E501
223 | self, patched_alertmanager_config_file
224 | ):
225 | test_config_file = "./tests/unit/test_config/alertmanager.yml"
226 | patched_alertmanager_config_file.return_value = test_config_file
227 | harness = testing.Harness(AlertmanagerConfigurerOperatorCharm, config=TEST_CONFIG)
228 | self.addCleanup(harness.cleanup)
229 | harness.set_leader(True)
230 | harness.begin()
231 | with open(test_config_file, "r") as config_yaml:
232 | expected_config = yaml.safe_load(config_yaml)
233 | relation_id = harness.add_relation("alertmanager", "alertmanager-k8s")
234 | harness.add_relation_unit(relation_id, "alertmanager-k8s/0")
235 |
236 | harness.charm.on.alertmanager_config_file_changed.emit()
237 |
238 | self.assertEqual(
239 | harness.get_relation_data(relation_id, "alertmanager-configurer-k8s")[
240 | "alertmanager_config"
241 | ],
242 | json.dumps(expected_config),
243 | )
244 |
245 | @patch(f"{ALERTMANAGER_CLASS}.ALERTMANAGER_CONFIG_FILE", new_callable=PropertyMock)
246 | @patch("charm.KubernetesServicePatch", lambda charm, ports: None)
247 | def test_given_invalid_config_when_alertmanager_config_file_changed_then_charm_goes_to_blocked_state( # noqa: E501
248 | self, patched_alertmanager_config_file
249 | ):
250 | test_config_file = "./tests/unit/test_config/alertmanager_invalid.yml"
251 | patched_alertmanager_config_file.return_value = test_config_file
252 | relation_id = self.harness.add_relation("alertmanager", "alertmanager-k8s")
253 | self.harness.add_relation_unit(relation_id, "alertmanager-k8s/0")
254 |
255 | self.harness.charm.on.alertmanager_config_file_changed.emit()
256 |
257 | assert self.harness.charm.unit.status == BlockedStatus(
258 | "Invalid Alertmanager configuration"
259 | )
260 |
--------------------------------------------------------------------------------
/src/charm.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # Copyright 2022 Canonical Ltd.
3 | # See LICENSE file for licensing details.
4 |
5 | """Alertmanager Configurer Operator Charm."""
6 |
7 | import logging
8 | import os
9 | from typing import Union
10 |
11 | import yaml
12 | from charms.alertmanager_k8s.v0.alertmanager_remote_configuration import (
13 | ConfigReadError,
14 | RemoteConfigurationProvider,
15 | )
16 | from charms.observability_libs.v1.kubernetes_service_patch import (
17 | KubernetesServicePatch,
18 | ServicePort,
19 | )
20 | from ops.charm import CharmBase, PebbleReadyEvent, RelationJoinedEvent
21 | from ops.main import main
22 | from ops.model import (
23 | ActiveStatus,
24 | BlockedStatus,
25 | MaintenanceStatus,
26 | ModelError,
27 | WaitingStatus,
28 | )
29 | from ops.pebble import ConnectionError, Layer
30 |
31 | from config_dir_watcher import (
32 | AlertmanagerConfigDirWatcher,
33 | AlertmanagerConfigFileChangedCharmEvents,
34 | AlertmanagerConfigFileChangedEvent,
35 | )
36 |
37 | logger = logging.getLogger(__name__)
38 |
39 |
40 | class AlertmanagerConfigurerOperatorCharm(CharmBase):
41 | """Alertmanager Configurer Operator Charm."""
42 |
43 | ALERTMANAGER_CONFIG_DIR = "/etc/alertmanager/"
44 | ALERTMANAGER_CONFIG_FILE = os.path.join(ALERTMANAGER_CONFIG_DIR, "alertmanager.yml")
45 | DUMMY_HTTP_SERVER_SERVICE_NAME = "dummy-http-server"
46 | DUMMY_HTTP_SERVER_HOST = "localhost"
47 | DUMMY_HTTP_SERVER_PORT = 80
48 | ALERTMANAGER_CONFIGURER_SERVICE_NAME = "alertmanager-configurer"
49 | ALERTMANAGER_CONFIGURER_PORT = 9101
50 | with open(
51 | os.path.join(os.path.dirname(os.path.realpath(__file__)), "alertmanager.yml"),
52 | "r",
53 | ) as default_yaml:
54 | ALERTMANAGER_DEFAULT_CONFIG = default_yaml.read()
55 |
56 | on = AlertmanagerConfigFileChangedCharmEvents()
57 |
58 | def __init__(self, *args):
59 | super().__init__(*args)
60 | self._alertmanager_configurer_container_name = (
61 | self._alertmanager_configurer_layer_name
62 | ) = self._alertmanager_configurer_service_name = self.ALERTMANAGER_CONFIGURER_SERVICE_NAME
63 | self._dummy_http_server_container_name = (
64 | self._dummy_http_server_layer_name
65 | ) = self._dummy_http_server_service_name = self.DUMMY_HTTP_SERVER_SERVICE_NAME
66 | self._alertmanager_configurer_container = self.unit.get_container(
67 | self._alertmanager_configurer_container_name
68 | )
69 | self._dummy_http_server_container = self.unit.get_container(
70 | self._dummy_http_server_container_name
71 | )
72 |
73 | self.service_patch = KubernetesServicePatch(
74 | charm=self,
75 | ports=[
76 | ServicePort(name="alertmanager-config", port=self.ALERTMANAGER_CONFIGURER_PORT),
77 | ServicePort(name="dummy-http-server", port=self.DUMMY_HTTP_SERVER_PORT),
78 | ],
79 | )
80 | self.remote_configuration_provider = RemoteConfigurationProvider(
81 | charm=self,
82 | alertmanager_config=yaml.safe_load(self.ALERTMANAGER_DEFAULT_CONFIG),
83 | relation_name="alertmanager",
84 | )
85 |
86 | self.framework.observe(self.on.start, self._on_start)
87 | self.framework.observe(
88 | self.on.alertmanager_configurer_pebble_ready,
89 | self._start_alertmanager_configurer,
90 | )
91 | self.framework.observe(
92 | self.on.dummy_http_server_pebble_ready, self._on_dummy_http_server_pebble_ready
93 | )
94 | self.framework.observe(
95 | self.on.alertmanager_configurer_relation_joined,
96 | self._on_alertmanager_configurer_relation_joined,
97 | )
98 | self.framework.observe(
99 | self.on.alertmanager_config_file_changed, self._on_alertmanager_config_changed
100 | )
101 | self.framework.observe(
102 | self.remote_configuration_provider.on.configuration_broken,
103 | self._on_configuration_broken,
104 | )
105 |
106 | def _on_start(self, event) -> None:
107 | """Event handler for the start event.
108 |
109 | Starts AlertmanagerConfigDirWatcher and pushes default Alertmanager config to the
110 | workload container upon unit start.
111 | """
112 | if not self._alertmanager_configurer_container.can_connect():
113 | self.unit.status = WaitingStatus(
114 | "Waiting to be able to connect to alertmanager-configurer"
115 | )
116 | event.defer()
117 | return
118 | self._push_default_config_to_workload()
119 | watchdog = AlertmanagerConfigDirWatcher(self, self.ALERTMANAGER_CONFIG_DIR)
120 | watchdog.start_watchdog()
121 |
122 | def _start_alertmanager_configurer(
123 | self, event: Union[AlertmanagerConfigFileChangedEvent, PebbleReadyEvent]
124 | ) -> None:
125 | """Event handler for AlertmanagerConfigFileChangedEvent and PebbleReadyEvent Juju events.
126 |
127 | Checks whether all conditions to start Alertmanager Configurer are met and, if yes,
128 | triggers start of the alertmanager-configurer service.
129 |
130 | Args:
131 | event (AlertmanagerConfigFileChangedEvent, PebbleReadyEvent): Juju event
132 | """
133 | if not self.model.get_relation("alertmanager"):
134 | self.unit.status = BlockedStatus("Waiting for alertmanager relation to be created")
135 | event.defer()
136 | return
137 | if not self._alertmanager_configurer_container.can_connect():
138 | self.unit.status = WaitingStatus(
139 | f"Waiting for {self._alertmanager_configurer_container_name} container to be ready"
140 | )
141 | event.defer()
142 | return
143 | if not self._dummy_http_server_running:
144 | self.unit.status = WaitingStatus("Waiting for the dummy HTTP server to be ready")
145 | event.defer()
146 | return
147 | self._start_alertmanager_configurer_service()
148 | self.unit.status = ActiveStatus()
149 |
150 | def _on_dummy_http_server_pebble_ready(self, event: PebbleReadyEvent) -> None:
151 | """Event handler for dummy-http-server pebble ready event.
152 |
153 | When dummy HTTP server Pebble is ready and the container is accessible, starts the
154 | dummy HTTP server.
155 |
156 | Args:
157 | event: Juju PebbleReadyEvent event
158 | """
159 | if self._dummy_http_server_container.can_connect():
160 | self._start_dummy_http_server()
161 | else:
162 | self.unit.status = WaitingStatus(
163 | f"Waiting for {self._dummy_http_server_container_name} container to be ready"
164 | )
165 | event.defer()
166 |
167 | def _on_alertmanager_config_changed(self, event: AlertmanagerConfigFileChangedEvent) -> None:
168 | """Updates relation data bag with updated Alertmanager config."""
169 | try:
170 | alertmanager_config = RemoteConfigurationProvider.load_config_file(
171 | self.ALERTMANAGER_CONFIG_FILE
172 | )
173 | self._start_alertmanager_configurer(event)
174 | self.remote_configuration_provider.update_relation_data_bag(alertmanager_config)
175 | except ConfigReadError:
176 | logger.error("Error reading Alertmanager config file.")
177 | self.model.unit.status = BlockedStatus("Error reading Alertmanager config file")
178 |
179 | def _on_configuration_broken(self, _) -> None:
180 | """Event handler for `configuration_broken` event.
181 |
182 | Puts the charm in `Blocked` status to indicate that the provided config is invalid.
183 | """
184 | self.model.unit.status = BlockedStatus("Invalid Alertmanager configuration")
185 |
186 | def _start_alertmanager_configurer_service(self) -> None:
187 | """Starts Alertmanager Configurer service."""
188 | plan = self._alertmanager_configurer_container.get_plan()
189 | layer = self._alertmanager_configurer_layer
190 | if plan.services != layer.services:
191 | self.unit.status = MaintenanceStatus(
192 | f"Configuring pebble layer for {self._alertmanager_configurer_service_name}"
193 | )
194 | self._alertmanager_configurer_container.add_layer(
195 | self._alertmanager_configurer_container_name, layer, combine=True
196 | )
197 | self._alertmanager_configurer_container.restart(
198 | self._alertmanager_configurer_container_name
199 | )
200 | logger.info(f"Restarted container {self._alertmanager_configurer_service_name}")
201 |
202 | def _start_dummy_http_server(self) -> None:
203 | """Starts dummy HTTP server service."""
204 | plan = self._dummy_http_server_container.get_plan()
205 | layer = self._dummy_http_server_layer
206 | if plan.services != layer.services:
207 | self.unit.status = MaintenanceStatus(
208 | f"Configuring pebble layer for {self._dummy_http_server_service_name}"
209 | )
210 | self._dummy_http_server_container.add_layer(
211 | self._dummy_http_server_container_name, layer, combine=True
212 | )
213 | self._dummy_http_server_container.restart(self._dummy_http_server_service_name)
214 | logger.info(f"Restarted container {self._dummy_http_server_service_name}")
215 |
216 | def _push_default_config_to_workload(self) -> None:
217 | """Pushes default Alertmanager config file to the workload container."""
218 | self._alertmanager_configurer_container.push(
219 | self.ALERTMANAGER_CONFIG_FILE, self._default_config
220 | )
221 |
222 | def _on_alertmanager_configurer_relation_joined(self, event: RelationJoinedEvent) -> None:
223 | """Handles actions taken when Alertmanager Configurer relation joins."""
224 | if not self.unit.is_leader():
225 | return
226 | self._add_service_info_to_relation_data_bag(event)
227 |
228 | def _add_service_info_to_relation_data_bag(self, event: RelationJoinedEvent) -> None:
229 | """Event handler for Alertmanager relation joined event.
230 |
231 | Adds information about Alertmanager Configurer service name and port to relation data
232 | bag.
233 | """
234 | alertmanager_configurer_relation = event.relation
235 | alertmanager_configurer_relation.data[self.app]["service_name"] = self.app.name
236 | alertmanager_configurer_relation.data[self.app]["port"] = str(
237 | self.ALERTMANAGER_CONFIGURER_PORT
238 | )
239 |
240 | @property
241 | def _alertmanager_configurer_layer(self) -> Layer:
242 | """Constructs the pebble layer for Alertmanager configurer.
243 |
244 | Returns:
245 | Layer: a Pebble layer specification for the Alertmanager configurer workload container.
246 | """
247 | return Layer(
248 | {
249 | "summary": "Alertmanager Configurer layer",
250 | "description": "Pebble config layer for Alertmanager Configurer",
251 | "services": {
252 | self._alertmanager_configurer_service_name: {
253 | "override": "replace",
254 | "startup": "enabled",
255 | "command": f"alertmanager_configurer "
256 | f"-port={str(self.ALERTMANAGER_CONFIGURER_PORT)} "
257 | f"-alertmanager-conf={self.ALERTMANAGER_CONFIG_FILE} "
258 | "-alertmanagerURL="
259 | f"{self.DUMMY_HTTP_SERVER_HOST}:{self.DUMMY_HTTP_SERVER_PORT} "
260 | f'-multitenant-label={self.model.config.get("multitenant_label")} '
261 | "-delete-route-with-receiver=true ",
262 | }
263 | },
264 | }
265 | )
266 |
267 | @property
268 | def _dummy_http_server_layer(self) -> Layer:
269 | """Constructs the pebble layer for the dummy HTTP server.
270 |
271 | Returns:
272 | Layer: a Pebble layer specification for the dummy HTTP server workload container.
273 | """
274 | return Layer(
275 | {
276 | "summary": "Dummy HTTP server pebble layer",
277 | "description": "Pebble layer configuration for the dummy HTTP server",
278 | "services": {
279 | self._dummy_http_server_service_name: {
280 | "override": "replace",
281 | "startup": "enabled",
282 | "command": "nginx",
283 | }
284 | },
285 | }
286 | )
287 |
288 | @property
289 | def _dummy_http_server_running(self) -> bool:
290 | """Checks the dummy HTTP server is running or not.
291 |
292 | Returns:
293 | bool: True/False.
294 | """
295 | try:
296 | self._dummy_http_server_container.get_service(self._dummy_http_server_service_name)
297 | return True
298 | except (ConnectionError, ModelError):
299 | return False
300 |
301 | @property
302 | def _default_config(self) -> str:
303 | """Provides default alertmanager.yml content in case it's not passed from the Alertmanager.
304 |
305 | Returns:
306 | str: default Alertmanager config
307 | """
308 | return self.ALERTMANAGER_DEFAULT_CONFIG
309 |
310 |
311 | if __name__ == "__main__":
312 | main(AlertmanagerConfigurerOperatorCharm)
313 |
--------------------------------------------------------------------------------
/lib/charms/observability_libs/v1/kubernetes_service_patch.py:
--------------------------------------------------------------------------------
1 | # Copyright 2021 Canonical Ltd.
2 | # See LICENSE file for licensing details.
3 |
4 | """# KubernetesServicePatch Library.
5 |
6 | This library is designed to enable developers to more simply patch the Kubernetes Service created
7 | by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a
8 | service named after the application in the namespace (named after the Juju model). This service by
9 | default contains a "placeholder" port, which is 65536/TCP.
10 |
11 | When modifying the default set of resources managed by Juju, one must consider the lifecycle of the
12 | charm. In this case, any modifications to the default service (created during deployment), will be
13 | overwritten during a charm upgrade.
14 |
15 | When initialised, this library binds a handler to the parent charm's `install` and `upgrade_charm`
16 | events which applies the patch to the cluster. This should ensure that the service ports are
17 | correct throughout the charm's life.
18 |
19 | The constructor simply takes a reference to the parent charm, and a list of
20 | [`lightkube`](https://github.com/gtsystem/lightkube) ServicePorts that each define a port for the
21 | service. For information regarding the `lightkube` `ServicePort` model, please visit the
22 | `lightkube` [docs](https://gtsystem.github.io/lightkube-models/1.23/models/core_v1/#serviceport).
23 |
24 | Optionally, a name of the service (in case service name needs to be patched as well), labels,
25 | selectors, and annotations can be provided as keyword arguments.
26 |
27 | ## Getting Started
28 |
29 | To get started using the library, you just need to fetch the library using `charmcraft`. **Note
30 | that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.**
31 |
32 | ```shell
33 | cd some-charm
34 | charmcraft fetch-lib charms.observability_libs.v1.kubernetes_service_patch
35 | cat << EOF >> requirements.txt
36 | lightkube
37 | lightkube-models
38 | EOF
39 | ```
40 |
41 | Then, to initialise the library:
42 |
43 | For `ClusterIP` services:
44 |
45 | ```python
46 | # ...
47 | from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
48 | from lightkube.models.core_v1 import ServicePort
49 |
50 | class SomeCharm(CharmBase):
51 | def __init__(self, *args):
52 | # ...
53 | port = ServicePort(443, name=f"{self.app.name}")
54 | self.service_patcher = KubernetesServicePatch(self, [port])
55 | # ...
56 | ```
57 |
58 | For `LoadBalancer`/`NodePort` services:
59 |
60 | ```python
61 | # ...
62 | from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
63 | from lightkube.models.core_v1 import ServicePort
64 |
65 | class SomeCharm(CharmBase):
66 | def __init__(self, *args):
67 | # ...
68 | port = ServicePort(443, name=f"{self.app.name}", targetPort=443, nodePort=30666)
69 | self.service_patcher = KubernetesServicePatch(
70 | self, [port], "LoadBalancer"
71 | )
72 | # ...
73 | ```
74 |
75 | Port protocols can also be specified. Valid protocols are `"TCP"`, `"UDP"`, and `"SCTP"`
76 |
77 | ```python
78 | # ...
79 | from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
80 | from lightkube.models.core_v1 import ServicePort
81 |
82 | class SomeCharm(CharmBase):
83 | def __init__(self, *args):
84 | # ...
85 | tcp = ServicePort(443, name=f"{self.app.name}-tcp", protocol="TCP")
86 | udp = ServicePort(443, name=f"{self.app.name}-udp", protocol="UDP")
87 | sctp = ServicePort(443, name=f"{self.app.name}-sctp", protocol="SCTP")
88 | self.service_patcher = KubernetesServicePatch(self, [tcp, udp, sctp])
89 | # ...
90 | ```
91 |
92 | Bound with custom events by providing `refresh_event` argument:
93 | For example, you would like to have a configurable port in your charm and want to apply
94 | service patch every time charm config is changed.
95 |
96 | ```python
97 | from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
98 | from lightkube.models.core_v1 import ServicePort
99 |
100 | class SomeCharm(CharmBase):
101 | def __init__(self, *args):
102 | # ...
103 | port = ServicePort(int(self.config["charm-config-port"]), name=f"{self.app.name}")
104 | self.service_patcher = KubernetesServicePatch(
105 | self,
106 | [port],
107 | refresh_event=self.on.config_changed
108 | )
109 | # ...
110 | ```
111 |
112 | Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library
113 | does not try to make any API calls, or open any files during testing that are unlikely to be
114 | present, and could break your tests. The easiest way to do this is during your test `setUp`:
115 |
116 | ```python
117 | # ...
118 |
119 | @patch("charm.KubernetesServicePatch", lambda x, y: None)
120 | def setUp(self, *unused):
121 | self.harness = Harness(SomeCharm)
122 | # ...
123 | ```
124 | """
125 |
126 | import logging
127 | from types import MethodType
128 | from typing import List, Literal, Optional, Union
129 |
130 | from lightkube import ApiError, Client
131 | from lightkube.core import exceptions
132 | from lightkube.models.core_v1 import ServicePort, ServiceSpec
133 | from lightkube.models.meta_v1 import ObjectMeta
134 | from lightkube.resources.core_v1 import Service
135 | from lightkube.types import PatchType
136 | from ops.charm import CharmBase
137 | from ops.framework import BoundEvent, Object
138 |
139 | logger = logging.getLogger(__name__)
140 |
141 | # The unique Charmhub library identifier, never change it
142 | LIBID = "0042f86d0a874435adef581806cddbbb"
143 |
144 | # Increment this major API version when introducing breaking changes
145 | LIBAPI = 1
146 |
147 | # Increment this PATCH version before using `charmcraft publish-lib` or reset
148 | # to 0 if you are raising the major API version
149 | LIBPATCH = 5
150 |
151 | ServiceType = Literal["ClusterIP", "LoadBalancer"]
152 |
153 |
154 | class KubernetesServicePatch(Object):
155 | """A utility for patching the Kubernetes service set up by Juju."""
156 |
157 | def __init__(
158 | self,
159 | charm: CharmBase,
160 | ports: List[ServicePort],
161 | service_name: Optional[str] = None,
162 | service_type: ServiceType = "ClusterIP",
163 | additional_labels: Optional[dict] = None,
164 | additional_selectors: Optional[dict] = None,
165 | additional_annotations: Optional[dict] = None,
166 | *,
167 | refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None,
168 | ):
169 | """Constructor for KubernetesServicePatch.
170 |
171 | Args:
172 | charm: the charm that is instantiating the library.
173 | ports: a list of ServicePorts
174 | service_name: allows setting custom name to the patched service. If none given,
175 | application name will be used.
176 | service_type: desired type of K8s service. Default value is in line with ServiceSpec's
177 | default value.
178 | additional_labels: Labels to be added to the kubernetes service (by default only
179 | "app.kubernetes.io/name" is set to the service name)
180 | additional_selectors: Selectors to be added to the kubernetes service (by default only
181 | "app.kubernetes.io/name" is set to the service name)
182 | additional_annotations: Annotations to be added to the kubernetes service.
183 | refresh_event: an optional bound event or list of bound events which
184 | will be observed to re-apply the patch (e.g. on port change).
185 | The `install` and `upgrade-charm` events would be observed regardless.
186 | """
187 | super().__init__(charm, "kubernetes-service-patch")
188 | self.charm = charm
189 | self.service_name = service_name if service_name else self._app
190 | self.service = self._service_object(
191 | ports,
192 | service_name,
193 | service_type,
194 | additional_labels,
195 | additional_selectors,
196 | additional_annotations,
197 | )
198 |
199 | # Make mypy type checking happy that self._patch is a method
200 | assert isinstance(self._patch, MethodType)
201 | # Ensure this patch is applied during the 'install' and 'upgrade-charm' events
202 | self.framework.observe(charm.on.install, self._patch)
203 | self.framework.observe(charm.on.upgrade_charm, self._patch)
204 |
205 | # apply user defined events
206 | if refresh_event:
207 | if not isinstance(refresh_event, list):
208 | refresh_event = [refresh_event]
209 |
210 | for evt in refresh_event:
211 | self.framework.observe(evt, self._patch)
212 |
213 | def _service_object(
214 | self,
215 | ports: List[ServicePort],
216 | service_name: Optional[str] = None,
217 | service_type: ServiceType = "ClusterIP",
218 | additional_labels: Optional[dict] = None,
219 | additional_selectors: Optional[dict] = None,
220 | additional_annotations: Optional[dict] = None,
221 | ) -> Service:
222 | """Creates a valid Service representation.
223 |
224 | Args:
225 | ports: a list of ServicePorts
226 | service_name: allows setting custom name to the patched service. If none given,
227 | application name will be used.
228 | service_type: desired type of K8s service. Default value is in line with ServiceSpec's
229 | default value.
230 | additional_labels: Labels to be added to the kubernetes service (by default only
231 | "app.kubernetes.io/name" is set to the service name)
232 | additional_selectors: Selectors to be added to the kubernetes service (by default only
233 | "app.kubernetes.io/name" is set to the service name)
234 | additional_annotations: Annotations to be added to the kubernetes service.
235 |
236 | Returns:
237 | Service: A valid representation of a Kubernetes Service with the correct ports.
238 | """
239 | if not service_name:
240 | service_name = self._app
241 | labels = {"app.kubernetes.io/name": self._app}
242 | if additional_labels:
243 | labels.update(additional_labels)
244 | selector = {"app.kubernetes.io/name": self._app}
245 | if additional_selectors:
246 | selector.update(additional_selectors)
247 | return Service(
248 | apiVersion="v1",
249 | kind="Service",
250 | metadata=ObjectMeta(
251 | namespace=self._namespace,
252 | name=service_name,
253 | labels=labels,
254 | annotations=additional_annotations, # type: ignore[arg-type]
255 | ),
256 | spec=ServiceSpec(
257 | selector=selector,
258 | ports=ports,
259 | type=service_type,
260 | ),
261 | )
262 |
263 | def _patch(self, _) -> None:
264 | """Patch the Kubernetes service created by Juju to map the correct port.
265 |
266 | Raises:
267 | PatchFailed: if patching fails due to lack of permissions, or otherwise.
268 | """
269 | try:
270 | client = Client()
271 | except exceptions.ConfigError as e:
272 | logger.warning("Error creating k8s client: %s", e)
273 | return
274 |
275 | try:
276 | if self._is_patched(client):
277 | return
278 | if self.service_name != self._app:
279 | self._delete_and_create_service(client)
280 | client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE)
281 | except ApiError as e:
282 | if e.status.code == 403:
283 | logger.error("Kubernetes service patch failed: `juju trust` this application.")
284 | else:
285 | logger.error("Kubernetes service patch failed: %s", str(e))
286 | else:
287 | logger.info("Kubernetes service '%s' patched successfully", self._app)
288 |
289 | def _delete_and_create_service(self, client: Client):
290 | service = client.get(Service, self._app, namespace=self._namespace)
291 | service.metadata.name = self.service_name # type: ignore[attr-defined]
292 | service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501
293 | client.delete(Service, self._app, namespace=self._namespace)
294 | client.create(service)
295 |
296 | def is_patched(self) -> bool:
297 | """Reports if the service patch has been applied.
298 |
299 | Returns:
300 | bool: A boolean indicating if the service patch has been applied.
301 | """
302 | client = Client()
303 | return self._is_patched(client)
304 |
305 | def _is_patched(self, client: Client) -> bool:
306 | # Get the relevant service from the cluster
307 | try:
308 | service = client.get(Service, name=self.service_name, namespace=self._namespace)
309 | except ApiError as e:
310 | if e.status.code == 404 and self.service_name != self._app:
311 | return False
312 | else:
313 | logger.error("Kubernetes service get failed: %s", str(e))
314 | raise
315 |
316 | # Construct a list of expected ports, should the patch be applied
317 | expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports]
318 | # Construct a list in the same manner, using the fetched service
319 | fetched_ports = [
320 | (p.port, p.targetPort) for p in service.spec.ports # type: ignore[attr-defined]
321 | ] # noqa: E501
322 | return expected_ports == fetched_ports
323 |
324 | @property
325 | def _app(self) -> str:
326 | """Name of the current Juju application.
327 |
328 | Returns:
329 | str: A string containing the name of the current Juju application.
330 | """
331 | return self.charm.app.name
332 |
333 | @property
334 | def _namespace(self) -> str:
335 | """The Kubernetes namespace we're running in.
336 |
337 | Returns:
338 | str: A string containing the name of the current Kubernetes namespace.
339 | """
340 | with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f:
341 | return f.read().strip()
342 |
--------------------------------------------------------------------------------
/lib/charms/alertmanager_k8s/v0/alertmanager_remote_configuration.py:
--------------------------------------------------------------------------------
1 | # Copyright 2022 Canonical Ltd.
2 | # See LICENSE file for licensing details.
3 |
4 | """Alertmanager Remote Configuration library.
5 |
6 | This library offers the option of configuring Alertmanager via relation data.
7 | It has been created with the `alertmanager-k8s` and the `alertmanager-k8s-configurer`
8 | (https://charmhub.io/alertmanager-configurer-k8s) charms in mind, but can be used by any charms
9 | which require functionalities implemented by this library.
10 |
11 | To get started using the library, you just need to fetch the library using `charmcraft`.
12 |
13 | ```shell
14 | cd some-charm
15 | charmcraft fetch-lib charms.alertmanager_k8s.v0.alertmanager_remote_configuration
16 | ```
17 |
18 | Charms that need to push Alertmanager configuration to a charm exposing relation using
19 | the `alertmanager_remote_configuration` interface, should use the `RemoteConfigurationProvider`.
20 | Charms that need to can utilize the Alertmanager configuration provided from the external source
21 | through a relation using the `alertmanager_remote_configuration` interface, should use
22 | the `RemoteConfigurationRequirer`.
23 | """
24 |
25 | import json
26 | import logging
27 | from typing import Optional, Tuple
28 |
29 | import yaml
30 | from ops.charm import CharmBase
31 | from ops.framework import EventBase, EventSource, Object, ObjectEvents
32 |
33 | # The unique Charmhub library identifier, never change it
34 | LIBID = "0e5a4c0ecde34c9880bb8899ac53444d"
35 |
36 | # Increment this major API version when introducing breaking changes
37 | LIBAPI = 0
38 |
39 | # Increment this PATCH version before using `charmcraft publish-lib` or reset
40 | # to 0 if you are raising the major API version
41 | LIBPATCH = 2
42 |
43 | logger = logging.getLogger(__name__)
44 |
45 | DEFAULT_RELATION_NAME = "remote-configuration"
46 |
47 |
48 | class ConfigReadError(Exception):
49 | """Raised if Alertmanager configuration can't be read."""
50 |
51 | def __init__(self, config_file: str):
52 | self.message = "Failed to read {}".format(config_file)
53 |
54 | super().__init__(self.message)
55 |
56 |
57 | def config_main_keys_are_valid(config: Optional[dict]) -> bool:
58 | """Checks whether main keys in the Alertmanager's config file are valid.
59 |
60 | This method facilitates the basic sanity check of Alertmanager's configuration. It checks
61 | whether given configuration contains only allowed main keys or not. `templates` have been
62 | removed from the list of allowed main keys to reflect the fact that `alertmanager-k8s` doesn't
63 | accept it as part of config (see `alertmanager-k8s` description for more details).
64 | Full validation of the config is done on the `alertmanager-k8s` charm side.
65 |
66 | Args:
67 | config: Alertmanager config dictionary
68 |
69 | Returns:
70 | bool: True/False
71 | """
72 | allowed_main_keys = [
73 | "global",
74 | "receivers",
75 | "route",
76 | "inhibit_rules",
77 | "time_intervals",
78 | "mute_time_intervals",
79 | ]
80 | return all(item in allowed_main_keys for item in config.keys()) if config else False
81 |
82 |
83 | class AlertmanagerRemoteConfigurationChangedEvent(EventBase):
84 | """Event emitted when Alertmanager remote_configuration relation data bag changes."""
85 |
86 | pass
87 |
88 |
89 | class AlertmanagerRemoteConfigurationRequirerEvents(ObjectEvents):
90 | """Event descriptor for events raised by `AlertmanagerRemoteConfigurationRequirer`."""
91 |
92 | remote_configuration_changed = EventSource(AlertmanagerRemoteConfigurationChangedEvent)
93 |
94 |
95 | class RemoteConfigurationRequirer(Object):
96 | """API that manages a required `alertmanager_remote_configuration` relation.
97 |
98 | The `RemoteConfigurationRequirer` object can be instantiated as follows in your charm:
99 |
100 | ```
101 | from charms.alertmanager_k8s.v0.alertmanager_remote_configuration import (
102 | RemoteConfigurationRequirer,
103 | )
104 |
105 | def __init__(self, *args):
106 | ...
107 | self.remote_configuration = RemoteConfigurationRequirer(self)
108 | ...
109 | ```
110 |
111 | The `RemoteConfigurationRequirer` assumes that, in the `metadata.yaml` of your charm,
112 | you declare a required relation as follows:
113 |
114 | ```
115 | requires:
116 | remote-configuration: # Relation name
117 | interface: alertmanager_remote_configuration # Relation interface
118 | limit: 1
119 | ```
120 |
121 | The `RemoteConfigurationRequirer` provides a public `config` method for exposing the data
122 | from the relation data bag. Typical usage of these methods in the provider charm would look
123 | something like:
124 |
125 | ```
126 | def get_config(self, *args):
127 | ...
128 | configuration, templates = self.remote_configuration.config()
129 | ...
130 | self.container.push("/alertmanager/config/file.yml", configuration)
131 | self.container.push("/alertmanager/templates/file.tmpl", templates)
132 | ...
133 | ```
134 |
135 | Separation of the main configuration and the templates is dictated by the assumption that
136 | the default provider of the `alertmanager_remote_configuration` relation will be
137 | `alertmanager-k8s` charm, which requires such separation.
138 | """
139 |
140 | on = AlertmanagerRemoteConfigurationRequirerEvents()
141 |
142 | def __init__(
143 | self,
144 | charm: CharmBase,
145 | relation_name: str = DEFAULT_RELATION_NAME,
146 | ):
147 | """API that manages a required `remote-configuration` relation.
148 |
149 | Args:
150 | charm: The charm object that instantiated this class.
151 | relation_name: Name of the relation with the `alertmanager_remote_configuration`
152 | interface as defined in metadata.yaml. Defaults to `remote-configuration`.
153 | """
154 | super().__init__(charm, relation_name)
155 | self._charm = charm
156 | self._relation_name = relation_name
157 |
158 | on_relation = self._charm.on[self._relation_name]
159 |
160 | self.framework.observe(on_relation.relation_created, self._on_relation_created)
161 | self.framework.observe(on_relation.relation_changed, self._on_relation_changed)
162 | self.framework.observe(on_relation.relation_broken, self._on_relation_broken)
163 |
164 | def _on_relation_created(self, _) -> None:
165 | """Event handler for remote configuration relation created event.
166 |
167 | Informs about the fact that the configuration from remote provider will be used.
168 | """
169 | logger.debug("Using remote configuration from the remote_configuration relation.")
170 |
171 | def _on_relation_changed(self, _) -> None:
172 | """Event handler for remote configuration relation changed event.
173 |
174 | Emits custom `remote_configuration_changed` event every time remote configuration
175 | changes.
176 | """
177 | self.on.remote_configuration_changed.emit()
178 |
179 | def _on_relation_broken(self, _) -> None:
180 | """Event handler for remote configuration relation broken event.
181 |
182 | Informs about the fact that the configuration from remote provider will no longer be used.
183 | """
184 | logger.debug("Remote configuration no longer available.")
185 |
186 | def config(self) -> Tuple[Optional[dict], Optional[list]]:
187 | """Exposes Alertmanager configuration sent inside the relation data bag.
188 |
189 | Charm which requires Alertmanager configuration, can access it like below:
190 |
191 | ```
192 | def get_config(self, *args):
193 | ...
194 | configuration, templates = self.remote_configuration.config()
195 | ...
196 | self.container.push("/alertmanager/config/file.yml", configuration)
197 | self.container.push("/alertmanager/templates/file.tmpl", templates)
198 | ...
199 | ```
200 |
201 | Returns:
202 | tuple: Alertmanager configuration (dict) and templates (list)
203 | """
204 | return self._alertmanager_config, self._alertmanager_templates
205 |
206 | @property
207 | def _alertmanager_config(self) -> Optional[dict]:
208 | """Returns Alertmanager configuration sent inside the relation data bag.
209 |
210 | If the `alertmanager-remote-configuration` relation exists, takes the Alertmanager
211 | configuration provided in the relation data bag and returns it in a form of a dictionary
212 | if configuration passes the validation against the Alertmanager config schema.
213 | If configuration fails the validation, error is logged and config is rejected (empty config
214 | is returned).
215 |
216 | Returns:
217 | dict: Alertmanager configuration dictionary
218 | """
219 | remote_configuration_relation = self._charm.model.get_relation(self._relation_name)
220 | if remote_configuration_relation and remote_configuration_relation.app:
221 | try:
222 | config_raw = remote_configuration_relation.data[remote_configuration_relation.app][
223 | "alertmanager_config"
224 | ]
225 | config = yaml.safe_load(config_raw)
226 | if config_main_keys_are_valid(config):
227 | return config
228 | except KeyError:
229 | logger.warning(
230 | "Remote config provider relation exists, but no config has been provided."
231 | )
232 | return None
233 |
234 | @property
235 | def _alertmanager_templates(self) -> Optional[list]:
236 | """Returns Alertmanager templates sent inside the relation data bag.
237 |
238 | If the `alertmanager-remote-configuration` relation exists and the relation data bag
239 | contains Alertmanager templates, returns the templates in the form of a list.
240 |
241 | Returns:
242 | list: Alertmanager templates
243 | """
244 | templates = None
245 | remote_configuration_relation = self._charm.model.get_relation(self._relation_name)
246 | if remote_configuration_relation and remote_configuration_relation.app:
247 | try:
248 | templates_raw = remote_configuration_relation.data[
249 | remote_configuration_relation.app
250 | ]["alertmanager_templates"]
251 | templates = json.loads(templates_raw)
252 | except KeyError:
253 | logger.warning(
254 | "Remote config provider relation exists, but no templates have been provided."
255 | )
256 | return templates
257 |
258 |
259 | class AlertmanagerConfigurationBrokenEvent(EventBase):
260 | """Event emitted when configuration provided by the Provider charm is invalid."""
261 |
262 | pass
263 |
264 |
265 | class AlertmanagerRemoteConfigurationProviderEvents(ObjectEvents):
266 | """Event descriptor for events raised by `AlertmanagerRemoteConfigurationProvider`."""
267 |
268 | configuration_broken = EventSource(AlertmanagerConfigurationBrokenEvent)
269 |
270 |
271 | class RemoteConfigurationProvider(Object):
272 | """API that manages a provided `alertmanager_remote_configuration` relation.
273 |
274 | The `RemoteConfigurationProvider` is intended to be used by charms that need to push data
275 | to other charms over the `alertmanager_remote_configuration` interface.
276 |
277 | The `RemoteConfigurationProvider` object can be instantiated as follows in your charm:
278 |
279 | ```
280 | from charms.alertmanager_k8s.v0.alertmanager_remote_configuration import
281 | RemoteConfigurationProvider,
282 | )
283 |
284 | def __init__(self, *args):
285 | ...
286 | config = RemoteConfigurationProvider.load_config_file(FILE_PATH)
287 | self.remote_configuration_provider = RemoteConfigurationProvider(
288 | charm=self,
289 | alertmanager_config=config,
290 | )
291 | ...
292 | ```
293 |
294 | Alternatively, RemoteConfigurationProvider can be instantiated using a factory, which allows
295 | using a configuration file path directly instead of a configuration string:
296 |
297 | ```
298 | from charms.alertmanager_k8s.v0.alertmanager_remote_configuration import
299 | RemoteConfigurationProvider,
300 | )
301 |
302 | def __init__(self, *args):
303 | ...
304 | self.remote_configuration_provider = RemoteConfigurationProvider.with_config_file(
305 | charm=self,
306 | config_file=FILE_PATH,
307 | )
308 | ...
309 | ```
310 |
311 | The `RemoteConfigurationProvider` assumes that, in the `metadata.yaml` of your charm,
312 | you declare a required relation as follows:
313 |
314 | ```
315 | provides:
316 | remote-configuration: # Relation name
317 | interface: alertmanager_remote_configuration # Relation interface
318 | ```
319 |
320 | The `RemoteConfigurationProvider` provides handling of the most relevant charm
321 | lifecycle events. On each of the defined Juju events, Alertmanager configuration and templates
322 | from a specified file will be pushed to the relation data bag.
323 | Inside the relation data bag, Alertmanager configuration will be stored under
324 | `alertmanager_configuration` key, while the templates under the `alertmanager_templates` key.
325 | Separation of the main configuration and the templates is dictated by the assumption that
326 | the default provider of the `alertmanager_remote_configuration` relation will be
327 | `alertmanager-k8s` charm, which requires such separation.
328 | """
329 |
330 | on = AlertmanagerRemoteConfigurationProviderEvents()
331 |
332 | def __init__(
333 | self,
334 | charm: CharmBase,
335 | alertmanager_config: Optional[dict] = None,
336 | relation_name: str = DEFAULT_RELATION_NAME,
337 | ):
338 | """API that manages a provided `remote-configuration` relation.
339 |
340 | Args:
341 | charm: The charm object that instantiated this class.
342 | alertmanager_config: Alertmanager configuration dictionary.
343 | relation_name: Name of the relation with the `alertmanager_remote_configuration`
344 | interface as defined in metadata.yaml. Defaults to `remote-configuration`.
345 | """
346 | super().__init__(charm, relation_name)
347 | self._charm = charm
348 | self.alertmanager_config = alertmanager_config
349 | self._relation_name = relation_name
350 |
351 | on_relation = self._charm.on[self._relation_name]
352 |
353 | self.framework.observe(on_relation.relation_joined, self._on_relation_joined)
354 |
355 | @classmethod
356 | def with_config_file(
357 | cls,
358 | charm: CharmBase,
359 | config_file: str,
360 | relation_name: str = DEFAULT_RELATION_NAME,
361 | ):
362 | """The RemoteConfigurationProvider object factory.
363 |
364 | This factory provides an alternative way of instantiating the RemoteConfigurationProvider.
365 | While the default constructor requires passing a config dict, the factory allows using
366 | a configuration file path.
367 |
368 | Args:
369 | charm: The charm object that instantiated this class.
370 | config_file: Path to the Alertmanager configuration file.
371 | relation_name: Name of the relation with the `alertmanager_remote_configuration`
372 | interface as defined in metadata.yaml. Defaults to `remote-configuration`.
373 |
374 | Returns:
375 | RemoteConfigurationProvider object
376 | """
377 | return cls(charm, cls.load_config_file(config_file), relation_name)
378 |
379 | def _on_relation_joined(self, _) -> None:
380 | """Event handler for RelationJoinedEvent.
381 |
382 | Takes care of pushing Alertmanager configuration to the relation data bag.
383 | """
384 | if not self._charm.unit.is_leader():
385 | return
386 | self.update_relation_data_bag(self.alertmanager_config)
387 |
388 | @staticmethod
389 | def load_config_file(path: str) -> dict:
390 | """Reads given Alertmanager configuration file and turns it into a dictionary.
391 |
392 | Args:
393 | path: Path to the Alertmanager configuration file
394 |
395 | Returns:
396 | dict: Alertmanager configuration file in a form of a dictionary
397 |
398 | Raises:
399 | ConfigReadError: if a problem with reading given config file happens
400 | """
401 | try:
402 | with open(path, "r") as config_yaml:
403 | config = yaml.safe_load(config_yaml)
404 | return config
405 | except (FileNotFoundError, OSError, yaml.YAMLError) as e:
406 | raise ConfigReadError(path) from e
407 |
408 | def update_relation_data_bag(self, alertmanager_config: Optional[dict]) -> None:
409 | """Updates relation data bag with Alertmanager config and templates.
410 |
411 | Before updating relation data bag, basic sanity check of given configuration is done.
412 |
413 | Args:
414 | alertmanager_config: Alertmanager configuration dictionary.
415 | """
416 | if not self._charm.unit.is_leader():
417 | return
418 | config, templates = self._prepare_relation_data(alertmanager_config)
419 | if config_main_keys_are_valid(config):
420 | for relation in self._charm.model.relations[self._relation_name]:
421 | relation.data[self._charm.app]["alertmanager_config"] = json.dumps(config)
422 | relation.data[self._charm.app]["alertmanager_templates"] = json.dumps(templates)
423 | else:
424 | logger.warning("Invalid Alertmanager configuration. Ignoring...")
425 | self._clear_relation_data()
426 | self.on.configuration_broken.emit()
427 |
428 | def _prepare_relation_data(
429 | self, config: Optional[dict]
430 | ) -> Tuple[Optional[dict], Optional[list]]:
431 | """Prepares relation data to be put in a relation data bag.
432 |
433 | If the main config file contains templates section, content of the files specified in this
434 | section will be concatenated. At the same time, templates section will be removed from
435 | the main config, as alertmanager-k8s-operator charm doesn't tolerate it.
436 |
437 | Args:
438 | config: Content of the Alertmanager configuration file
439 |
440 | Returns:
441 | dict: Alertmanager configuration
442 | list: List of templates
443 | """
444 | templates = []
445 | if config and config.get("templates") is not None:
446 | for file in config.pop("templates"):
447 | try:
448 | templates.append(self._load_templates_file(file))
449 | except FileNotFoundError:
450 | logger.warning("Template file {} not found. Skipping.".format(file))
451 | continue
452 | return config, templates
453 |
454 | @staticmethod
455 | def _load_templates_file(path: str) -> str:
456 | """Reads given Alertmanager templates file and returns its content in a form of a string.
457 |
458 | Args:
459 | path: Alertmanager templates file path
460 |
461 | Returns:
462 | str: Alertmanager templates
463 |
464 | Raises:
465 | ConfigReadError: if a problem with reading given config file happens
466 | """
467 | try:
468 | with open(path, "r") as template_file:
469 | templates = template_file.read()
470 | return templates
471 | except (FileNotFoundError, OSError, ValueError) as e:
472 | raise ConfigReadError(path) from e
473 |
474 | def _clear_relation_data(self) -> None:
475 | """Clears relation data bag."""
476 | for relation in self._charm.model.relations[self._relation_name]:
477 | relation.data[self._charm.app]["alertmanager_config"] = ""
478 | relation.data[self._charm.app]["alertmanager_templates"] = ""
479 |
--------------------------------------------------------------------------------