├── CODEOWNERS ├── requirements.txt ├── .gitignore ├── renovate.json ├── charmcraft.yaml ├── README.md ├── pyproject.toml ├── config.yaml ├── tests ├── integration │ └── test_charm.py └── unit │ └── test_charm.py ├── metadata.yaml ├── tox.ini ├── src └── charm.py ├── LICENSE ├── icon.svg └── lib └── charms └── traefik_k8s └── v2 └── ingress.py /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @canonical/is-charms 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ops==2.21.0 2 | pydantic==2.11.5 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.charm 3 | *.py[cod] 4 | *.swp 5 | .coverage 6 | .tox/ 7 | build/ 8 | htmlcov/ 9 | __pycache__/ 10 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:base" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /charmcraft.yaml: -------------------------------------------------------------------------------- 1 | # This file configures Charmcraft. 2 | # See https://juju.is/docs/sdk/charmcraft-config for guidance. 3 | 4 | type: charm 5 | bases: 6 | - build-on: 7 | - name: ubuntu 8 | channel: "22.04" 9 | run-on: 10 | - name: ubuntu 11 | channel: "22.04" 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # atlantis-operator 2 | 3 | This charm is still under active development. 4 | 5 | Currently this operator only supports GitHub. Support for GitLab, Bitbucket and Azure DevOps could be added in the 6 | future based on user requests. 7 | 8 | To deploy from charmhub: 9 | 10 | juju deploy atlantis --channel=edge 11 | 12 | To run this locally: 13 | 14 | charmcraft pack 15 | juju deploy ./atlantis_ubuntu-22.04-amd64.charm --resource atlantis-image='ghcr.io/runatlantis/atlantis' 16 | 17 | You'll need to provide the following config options: 18 | 19 | * gh-user 20 | * gh-token 21 | * webhook-secret 22 | * repo-allowlist 23 | 24 | The charm supports the ingress integration, provided by the nginx-ingress-integrator or traefik-k8s charms: 25 | 26 | juju deploy nginx-ingress-integrator --trust --config service-hostname=atlantis.local --config path-routes=/ 27 | juju integrate nginx-ingress-integrator atlantis 28 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | # Testing tools configuration 2 | [tool.coverage.run] 3 | branch = true 4 | 5 | [tool.coverage.report] 6 | show_missing = true 7 | 8 | [tool.pytest.ini_options] 9 | minversion = "6.0" 10 | log_cli_level = "INFO" 11 | 12 | # Formatting tools configuration 13 | [tool.black] 14 | line-length = 99 15 | target-version = ["py38"] 16 | 17 | [tool.isort] 18 | line_length = 99 19 | profile = "black" 20 | 21 | # Linting tools configuration 22 | [tool.flake8] 23 | max-line-length = 99 24 | max-doc-length = 99 25 | max-complexity = 10 26 | exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"] 27 | select = ["E", "W", "F", "C", "N", "R", "D", "H"] 28 | # Ignore W503, E501 because using black creates errors with this 29 | # Ignore D107 Missing docstring in __init__ 30 | ignore = ["W503", "E501", "D107"] 31 | # D100, D101, D102, D103: Ignore missing docstrings in tests 32 | per-file-ignores = ["tests/*:D100,D101,D102,D103,D104"] 33 | docstring-convention = "google" 34 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | options: 2 | repo-allowlist: 3 | description: | 4 | An allowlist of repositories Atlantis will accept webhooks from, e.g. 'github.com/myorg/*'. See https://www.runatlantis.io/docs/server-configuration.html#repo-allowlist for more details. 5 | 6 | Required. 7 | default: "" 8 | type: string 9 | gh-token: 10 | description: GitHub (classic) token to use - requires 'repo' access. Required. 11 | default: "" 12 | type: string 13 | gh-user: 14 | description: GitHub user to connect as. Required. 15 | default: "" 16 | type: string 17 | additional-env-variables: 18 | description: | 19 | Any additional environment variables that should be specified, in JSON format. 20 | 21 | This may include things like a Vault address and token, or AWS credentials for storing backend state. 22 | 23 | e.g. '{"VAULT_ADDR": "https://vault.internal", "VAULT_TOKEN": "s.sekrit"}' 24 | default: "" 25 | type: string 26 | webhook-secret: 27 | description: Webhook secret. Required. 28 | default: "" 29 | type: string 30 | -------------------------------------------------------------------------------- /tests/integration/test_charm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright 2023 Canonical Ltd. 3 | # See LICENSE file for licensing details. 4 | 5 | import asyncio 6 | import logging 7 | from pathlib import Path 8 | 9 | import pytest 10 | import yaml 11 | from pytest_operator.plugin import OpsTest 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) 16 | APP_NAME = METADATA["name"] 17 | 18 | 19 | @pytest.mark.abort_on_fail 20 | async def test_build_and_deploy(ops_test: OpsTest): 21 | """Build the charm-under-test and deploy it together with related charms. 22 | 23 | Assert on the unit status before any relations/configurations take place. 24 | """ 25 | # Build and deploy charm from local source folder 26 | charm = await ops_test.build_charm(".") 27 | resources = {"atlantis-image": METADATA["resources"]["atlantis-image"]["upstream-source"]} 28 | 29 | # Deploy the charm and wait for active/idle status 30 | await asyncio.gather( 31 | ops_test.model.deploy(await charm, resources=resources, application_name=APP_NAME), 32 | ops_test.model.wait_for_idle( 33 | apps=[APP_NAME], status="active", raise_on_blocked=True, timeout=1000 34 | ), 35 | ) 36 | -------------------------------------------------------------------------------- /metadata.yaml: -------------------------------------------------------------------------------- 1 | # This file populates the Overview on Charmhub. 2 | # See https://juju.is/docs/sdk/metadata-reference for a checklist and guidance. 3 | 4 | # The charm package name, no spaces (required) 5 | # See https://juju.is/docs/sdk/naming#heading--naming-charms for guidance. 6 | name: atlantis 7 | 8 | # The following metadata are human-readable and will be published prominently on Charmhub. 9 | 10 | # (Recommended) 11 | display-name: Atlantis 12 | 13 | # (Required) 14 | summary: Terraform Pull Request Automation. 15 | 16 | description: | 17 | A charm providing Terraform pull request automation using [Atlantis](https://www.runatlantis.io/). 18 | 19 | When PRs are generated against a Terraform config repository this charm can be configured to listen for a webhook which then triggers it to run `terraform plan` and post that to the PR. Reviewers can then post a message that will trigger this service to apply the changes if appropriate. 20 | 21 | Improve your Terraform workflow, making it easier for devops teams to collaborate on infrastructure changes. 22 | 23 | This charm is useful for teams wanting to automate their Terraform workflows. 24 | 25 | maintainers: 26 | - https://launchpad.net/~canonical-is-devops 27 | issues: https://github.com/canonical/atlantis-operator/issues 28 | source: https://github.com/canonical/atlantis-operator 29 | assumes: 30 | - k8s-api 31 | 32 | containers: 33 | atlantis: 34 | resource: atlantis-image 35 | 36 | resources: 37 | atlantis-image: 38 | type: oci-image 39 | description: OCI image for atlantis 40 | upstream-source: ghcr.io/runatlantis/atlantis 41 | 42 | requires: 43 | ingress: 44 | interface: ingress 45 | limit: 1 46 | -------------------------------------------------------------------------------- /tests/unit/test_charm.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Canonical Ltd. 2 | # See LICENSE file for licensing details. 3 | # 4 | # Learn more about testing at: https://juju.is/docs/sdk/testing 5 | 6 | import unittest 7 | from unittest.mock import patch 8 | 9 | import ops.testing 10 | from ops.model import ActiveStatus 11 | from ops.testing import Harness 12 | 13 | from charm import AtlantisOperatorCharm 14 | 15 | 16 | class TestCharm(unittest.TestCase): 17 | def setUp(self): 18 | # Enable more accurate simulation of container networking. 19 | # For more information, see https://juju.is/docs/sdk/testing#heading--simulate-can-connect 20 | ops.testing.SIMULATE_CAN_CONNECT = True 21 | self.addCleanup(setattr, ops.testing, "SIMULATE_CAN_CONNECT", False) 22 | 23 | self.harness = Harness(AtlantisOperatorCharm) 24 | self.addCleanup(self.harness.cleanup) 25 | self.harness.begin() 26 | 27 | def test_atlantis_required_data(self): 28 | # Simulate the container coming up and emission of pebble-ready event 29 | self.harness.container_pebble_ready("atlantis") 30 | # Confirm we're missing all config and our ingress-url 31 | self.assertEqual( 32 | self.harness.charm._required_data(), 33 | ["gh-token", "gh-user", "repo-allowlist", "webhook-secret", "ingress-url"] 34 | ) 35 | # Set required config 36 | self.harness.update_config( 37 | { 38 | "gh-token": "test", 39 | "gh-user": "test", 40 | "repo-allowlist": "github.com/myorg/*", 41 | "webhook-secret": "test", 42 | } 43 | ) 44 | # Confirm we're now just missing our ingress-url 45 | self.assertEqual(self.harness.charm._required_data(), ["ingress-url"]) 46 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Canonical Ltd. 2 | # See LICENSE file for licensing details. 3 | 4 | [tox] 5 | skipsdist=True 6 | skip_missing_interpreters = True 7 | envlist = lint, unit 8 | 9 | [vars] 10 | src_path = {toxinidir}/src/ 11 | tst_path = {toxinidir}/tests/ 12 | ;lib_path = {toxinidir}/lib/charms/operator_name_with_underscores 13 | all_path = {[vars]src_path} {[vars]tst_path} 14 | 15 | [testenv] 16 | setenv = 17 | PYTHONPATH = {toxinidir}:{toxinidir}/lib:{[vars]src_path} 18 | PYTHONBREAKPOINT=pdb.set_trace 19 | PY_COLORS=1 20 | passenv = 21 | PYTHONPATH 22 | CHARM_BUILD_DIR 23 | MODEL_SETTINGS 24 | 25 | [testenv:fmt] 26 | description = Apply coding style standards to code 27 | deps = 28 | black 29 | isort 30 | commands = 31 | isort {[vars]all_path} 32 | black {[vars]all_path} 33 | 34 | [testenv:lint] 35 | description = Check code against coding style standards 36 | deps = 37 | black 38 | flake8-docstrings 39 | flake8-builtins 40 | pyproject-flake8 41 | pep8-naming 42 | isort 43 | codespell 44 | commands = 45 | # uncomment the following line if this charm owns a lib 46 | # codespell {[vars]lib_path} 47 | codespell {toxinidir} --skip {toxinidir}/.git --skip {toxinidir}/.tox \ 48 | --skip {toxinidir}/build --skip {toxinidir}/lib --skip {toxinidir}/venv \ 49 | --skip {toxinidir}/.mypy_cache --skip {toxinidir}/icon.svg 50 | # pflake8 wrapper supports config from pyproject.toml 51 | pflake8 {[vars]all_path} 52 | isort --check-only --diff {[vars]all_path} 53 | black --check --diff {[vars]all_path} 54 | 55 | [testenv:unit] 56 | description = Run unit tests 57 | deps = 58 | pytest 59 | coverage[toml] 60 | -r{toxinidir}/requirements.txt 61 | commands = 62 | coverage run --source={[vars]src_path} \ 63 | -m pytest --ignore={[vars]tst_path}integration -v --tb native -s {posargs} 64 | coverage report 65 | 66 | [testenv:integration] 67 | description = Run integration tests 68 | deps = 69 | pytest 70 | juju 71 | pytest-operator 72 | -r{toxinidir}/requirements.txt 73 | commands = 74 | pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} 75 | -------------------------------------------------------------------------------- /src/charm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright 2023 Canonical Ltd. 3 | # See LICENSE file for licensing details. 4 | """Charm for Atlantis on kubernetes.""" 5 | 6 | import json 7 | import logging 8 | 9 | from charms.traefik_k8s.v2.ingress import IngressPerAppRequirer, IngressPerAppReadyEvent 10 | from ops.charm import CharmBase, ConfigChangedEvent, PebbleReadyEvent 11 | from ops.main import main 12 | from ops.model import ActiveStatus, BlockedStatus, WaitingStatus 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | ATLANTIS_PORT = 4141 17 | 18 | 19 | class AtlantisOperatorCharm(CharmBase): 20 | """Charm for Atlantis on kubernetes.""" 21 | 22 | def __init__(self, *args): 23 | """Init function for the charm. 24 | 25 | Args: 26 | args: Variable list of positional arguments passed to the parent constructor. 27 | """ 28 | super().__init__(*args) 29 | self.ingress = IngressPerAppRequirer(self, port=ATLANTIS_PORT) 30 | self.framework.observe(self.on.atlantis_pebble_ready, self._on_atlantis_pebble_ready) 31 | self.framework.observe(self.on.config_changed, self._on_config_changed) 32 | self.framework.observe(self.ingress.on.ready, self._on_ingress_ready) 33 | 34 | ######################################################################### 35 | # Juju event handlers 36 | ######################################################################### 37 | 38 | def _on_ingress_ready(self, _) -> None: 39 | """Handle the _on_ingress_ready event.""" 40 | # Trigger a config-changed which will check if we have the data we need 41 | # and then configure Atlantis if appropriate. 42 | self.on.config_changed.emit() 43 | 44 | def _on_atlantis_pebble_ready(self, event: PebbleReadyEvent) -> None: 45 | """Handle atlantis_pebble_ready event and configure workload container. 46 | 47 | Args: 48 | event: Event triggering the pebble ready hook for the atlantis container. 49 | """ 50 | required_data = self._required_data() 51 | if required_data: 52 | missing_data = ", ".join(required_data) 53 | self.unit.status = BlockedStatus(f"Missing required config or integrations: {missing_data}") 54 | return 55 | # Get a reference the container attribute on the PebbleReadyEvent 56 | container = event.workload 57 | # Add initial Pebble config layer using the Pebble API 58 | container.add_layer("atlantis", self._pebble_layer, combine=True) 59 | # Make Pebble reevaluate its plan, ensuring any services are started if enabled. 60 | container.replan() 61 | # Mark status active. 62 | self.unit.status = ActiveStatus() 63 | 64 | def _on_config_changed(self, event: ConfigChangedEvent) -> None: 65 | """Handle configuration changed event. 66 | 67 | Args: 68 | event: Event indicating configuration has been changed. 69 | """ 70 | required_data = self._required_data() 71 | if required_data: 72 | missing_data = ", ".join(required_data) 73 | self.unit.status = BlockedStatus(f"Missing required config or integrations: {missing_data}") 74 | return 75 | 76 | # The config is good, so update the configuration of the workload 77 | container = self.unit.get_container("atlantis") 78 | # Verify that we can connect to the Pebble API in the workload container 79 | if container.can_connect(): 80 | # Push an updated layer with the new config 81 | container.add_layer("atlantis", self._pebble_layer, combine=True) 82 | container.replan() 83 | 84 | self.unit.status = ActiveStatus() 85 | else: 86 | # We were unable to connect to the Pebble API, so we defer this event 87 | event.defer() 88 | self.unit.status = WaitingStatus("waiting for Pebble API") 89 | 90 | ######################################################################### 91 | # Charm-specific functions and properties 92 | ######################################################################### 93 | 94 | def _required_data(self) -> list[str]: 95 | """Return a list of required data that aren't set. 96 | 97 | Returns: 98 | A list of strings of the juju config options that are required but 99 | not specified. 100 | """ 101 | required_config = [ 102 | "gh-token", 103 | "gh-user", 104 | "repo-allowlist", 105 | "webhook-secret", 106 | ] 107 | required_data = [x for x in required_config if not self.config[x]] 108 | if not self.ingress.url: 109 | required_data += ["ingress-url"] 110 | return required_data 111 | 112 | def _env_variables(self) -> dict: 113 | """Assemble the environment variables that should be passed to Atlantis. 114 | 115 | Returns: 116 | A dictionary of environment variables to be passed to Atlantis. 117 | """ 118 | environment = { 119 | "ATLANTIS_ATLANTIS_URL": self.ingress.url, 120 | "ATLANTIS_GH_USER": self.config["gh-user"], 121 | "ATLANTIS_GH_TOKEN": self.config["gh-token"], 122 | "ATLANTIS_GH_WEBHOOK_SECRET": self.config["webhook-secret"], 123 | "ATLANTIS_REPO_ALLOWLIST": self.config["repo-allowlist"], 124 | } 125 | if self.config["additional-env-variables"]: 126 | # XXX: We should do some validation here. 127 | environment.update(json.loads(self.config["additional-env-variables"])) 128 | return environment 129 | 130 | @property 131 | def _pebble_layer(self) -> dict: 132 | """Return a dictionary representing a Pebble layer. 133 | 134 | Returns: 135 | A dictionary representing the Pebbel layer for Atlantis. 136 | """ 137 | # We need to pass the --port variable to 'atlantis server` because 138 | # otherwise it's looking for an environment variable of ATLANTIS_PORT 139 | # which is set by Juju/K8s based on the pod configuration to 140 | # 'tcp://${POD_IP}:65535'. 141 | return { 142 | "summary": "atlantis layer", 143 | "description": "pebble config layer for atlantis", 144 | "services": { 145 | "atlantis": { 146 | "override": "replace", 147 | "summary": "atlantis", 148 | "command": f"atlantis server --port={ATLANTIS_PORT}", 149 | "startup": "enabled", 150 | "environment": self._env_variables(), 151 | } 152 | }, 153 | "checks": { 154 | "atlantis-ready": { 155 | "override": "replace", 156 | "level": "ready", 157 | "tcp": {"port": ATLANTIS_PORT}, 158 | } 159 | }, 160 | } 161 | 162 | 163 | if __name__ == "__main__": # pragma: nocover 164 | main(AtlantisOperatorCharm) 165 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2023 Canonical Ltd. 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /icon.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 17 | 19 | 37 | 41 | 306 | 307 | 308 | -------------------------------------------------------------------------------- /lib/charms/traefik_k8s/v2/ingress.py: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Canonical Ltd. 2 | # See LICENSE file for licensing details. 3 | 4 | r"""# Interface Library for ingress. 5 | 6 | This library wraps relation endpoints using the `ingress` interface 7 | and provides a Python API for both requesting and providing per-application 8 | ingress, with load-balancing occurring across all units. 9 | 10 | ## Getting Started 11 | 12 | To get started using the library, you just need to fetch the library using `charmcraft`. 13 | 14 | ```shell 15 | cd some-charm 16 | charmcraft fetch-lib charms.traefik_k8s.v2.ingress 17 | ``` 18 | 19 | In the `metadata.yaml` of the charm, add the following: 20 | 21 | ```yaml 22 | requires: 23 | ingress: 24 | interface: ingress 25 | limit: 1 26 | ``` 27 | 28 | Then, to initialise the library: 29 | 30 | ```python 31 | from charms.traefik_k8s.v2.ingress import (IngressPerAppRequirer, 32 | IngressPerAppReadyEvent, IngressPerAppRevokedEvent) 33 | 34 | class SomeCharm(CharmBase): 35 | def __init__(self, *args): 36 | # ... 37 | self.ingress = IngressPerAppRequirer(self, port=80) 38 | # The following event is triggered when the ingress URL to be used 39 | # by this deployment of the `SomeCharm` is ready (or changes). 40 | self.framework.observe( 41 | self.ingress.on.ready, self._on_ingress_ready 42 | ) 43 | self.framework.observe( 44 | self.ingress.on.revoked, self._on_ingress_revoked 45 | ) 46 | 47 | def _on_ingress_ready(self, event: IngressPerAppReadyEvent): 48 | logger.info("This app's ingress URL: %s", event.url) 49 | 50 | def _on_ingress_revoked(self, event: IngressPerAppRevokedEvent): 51 | logger.info("This app no longer has ingress") 52 | """ 53 | import ipaddress 54 | import json 55 | import logging 56 | import socket 57 | import typing 58 | from dataclasses import dataclass 59 | from typing import Any, Callable, Dict, List, MutableMapping, Optional, Sequence, Tuple, Union 60 | 61 | import pydantic 62 | from ops.charm import CharmBase, RelationBrokenEvent, RelationEvent 63 | from ops.framework import EventSource, Object, ObjectEvents, StoredState 64 | from ops.model import ModelError, Relation, Unit 65 | from pydantic import AnyHttpUrl, BaseModel, Field, validator 66 | 67 | # The unique Charmhub library identifier, never change it 68 | LIBID = "e6de2a5cd5b34422a204668f3b8f90d2" 69 | 70 | # Increment this major API version when introducing breaking changes 71 | LIBAPI = 2 72 | 73 | # Increment this PATCH version before using `charmcraft publish-lib` or reset 74 | # to 0 if you are raising the major API version 75 | LIBPATCH = 13 76 | 77 | PYDEPS = ["pydantic"] 78 | 79 | DEFAULT_RELATION_NAME = "ingress" 80 | RELATION_INTERFACE = "ingress" 81 | 82 | log = logging.getLogger(__name__) 83 | BUILTIN_JUJU_KEYS = {"ingress-address", "private-address", "egress-subnets"} 84 | 85 | PYDANTIC_IS_V1 = int(pydantic.version.VERSION.split(".")[0]) < 2 86 | if PYDANTIC_IS_V1: 87 | 88 | class DatabagModel(BaseModel): # type: ignore 89 | """Base databag model.""" 90 | 91 | class Config: 92 | """Pydantic config.""" 93 | 94 | allow_population_by_field_name = True 95 | """Allow instantiating this class by field name (instead of forcing alias).""" 96 | 97 | _NEST_UNDER = None 98 | 99 | @classmethod 100 | def load(cls, databag: MutableMapping): 101 | """Load this model from a Juju databag.""" 102 | if cls._NEST_UNDER: 103 | return cls.parse_obj(json.loads(databag[cls._NEST_UNDER])) 104 | 105 | try: 106 | data = { 107 | k: json.loads(v) 108 | for k, v in databag.items() 109 | # Don't attempt to parse model-external values 110 | if k in {f.alias for f in cls.__fields__.values()} # type: ignore 111 | } 112 | except json.JSONDecodeError as e: 113 | msg = f"invalid databag contents: expecting json. {databag}" 114 | log.error(msg) 115 | raise DataValidationError(msg) from e 116 | 117 | try: 118 | return cls.parse_raw(json.dumps(data)) # type: ignore 119 | except pydantic.ValidationError as e: 120 | msg = f"failed to validate databag: {databag}" 121 | log.debug(msg, exc_info=True) 122 | raise DataValidationError(msg) from e 123 | 124 | def dump(self, databag: Optional[MutableMapping] = None, clear: bool = True): 125 | """Write the contents of this model to Juju databag. 126 | 127 | :param databag: the databag to write the data to. 128 | :param clear: ensure the databag is cleared before writing it. 129 | """ 130 | if clear and databag: 131 | databag.clear() 132 | 133 | if databag is None: 134 | databag = {} 135 | 136 | if self._NEST_UNDER: 137 | databag[self._NEST_UNDER] = self.json(by_alias=True, exclude_defaults=True) 138 | return databag 139 | 140 | for key, value in self.dict(by_alias=True, exclude_defaults=True).items(): # type: ignore 141 | databag[key] = json.dumps(value) 142 | 143 | return databag 144 | 145 | else: 146 | from pydantic import ConfigDict 147 | 148 | class DatabagModel(BaseModel): 149 | """Base databag model.""" 150 | 151 | model_config = ConfigDict( 152 | # tolerate additional keys in databag 153 | extra="ignore", 154 | # Allow instantiating this class by field name (instead of forcing alias). 155 | populate_by_name=True, 156 | # Custom config key: whether to nest the whole datastructure (as json) 157 | # under a field or spread it out at the toplevel. 158 | _NEST_UNDER=None, 159 | ) # type: ignore 160 | """Pydantic config.""" 161 | 162 | @classmethod 163 | def load(cls, databag: MutableMapping): 164 | """Load this model from a Juju databag.""" 165 | nest_under = cls.model_config.get("_NEST_UNDER") 166 | if nest_under: 167 | return cls.model_validate(json.loads(databag[nest_under])) # type: ignore 168 | 169 | try: 170 | data = { 171 | k: json.loads(v) 172 | for k, v in databag.items() 173 | # Don't attempt to parse model-external values 174 | if k in {(f.alias or n) for n, f in cls.__fields__.items()} # type: ignore 175 | } 176 | except json.JSONDecodeError as e: 177 | msg = f"invalid databag contents: expecting json. {databag}" 178 | log.error(msg) 179 | raise DataValidationError(msg) from e 180 | 181 | try: 182 | return cls.model_validate_json(json.dumps(data)) # type: ignore 183 | except pydantic.ValidationError as e: 184 | msg = f"failed to validate databag: {databag}" 185 | log.debug(msg, exc_info=True) 186 | raise DataValidationError(msg) from e 187 | 188 | def dump(self, databag: Optional[MutableMapping] = None, clear: bool = True): 189 | """Write the contents of this model to Juju databag. 190 | 191 | :param databag: the databag to write the data to. 192 | :param clear: ensure the databag is cleared before writing it. 193 | """ 194 | if clear and databag: 195 | databag.clear() 196 | 197 | if databag is None: 198 | databag = {} 199 | nest_under = self.model_config.get("_NEST_UNDER") 200 | if nest_under: 201 | databag[nest_under] = self.model_dump_json( # type: ignore 202 | by_alias=True, 203 | # skip keys whose values are default 204 | exclude_defaults=True, 205 | ) 206 | return databag 207 | 208 | dct = self.model_dump(mode="json", by_alias=True, exclude_defaults=True) # type: ignore 209 | databag.update({k: json.dumps(v) for k, v in dct.items()}) 210 | return databag 211 | 212 | 213 | # todo: import these models from charm-relation-interfaces/ingress/v2 instead of redeclaring them 214 | class IngressUrl(BaseModel): 215 | """Ingress url schema.""" 216 | 217 | url: AnyHttpUrl 218 | 219 | 220 | class IngressProviderAppData(DatabagModel): 221 | """Ingress application databag schema.""" 222 | 223 | ingress: IngressUrl 224 | 225 | 226 | class ProviderSchema(BaseModel): 227 | """Provider schema for Ingress.""" 228 | 229 | app: IngressProviderAppData 230 | 231 | 232 | class IngressRequirerAppData(DatabagModel): 233 | """Ingress requirer application databag model.""" 234 | 235 | model: str = Field(description="The model the application is in.") 236 | name: str = Field(description="the name of the app requesting ingress.") 237 | port: int = Field(description="The port the app wishes to be exposed.") 238 | 239 | # fields on top of vanilla 'ingress' interface: 240 | strip_prefix: Optional[bool] = Field( 241 | default=False, 242 | description="Whether to strip the prefix from the ingress url.", 243 | alias="strip-prefix", 244 | ) 245 | redirect_https: Optional[bool] = Field( 246 | default=False, 247 | description="Whether to redirect http traffic to https.", 248 | alias="redirect-https", 249 | ) 250 | 251 | scheme: Optional[str] = Field( 252 | default="http", description="What scheme to use in the generated ingress url" 253 | ) 254 | 255 | @validator("scheme", pre=True) 256 | def validate_scheme(cls, scheme): # noqa: N805 # pydantic wants 'cls' as first arg 257 | """Validate scheme arg.""" 258 | if scheme not in {"http", "https", "h2c"}: 259 | raise ValueError("invalid scheme: should be one of `http|https|h2c`") 260 | return scheme 261 | 262 | @validator("port", pre=True) 263 | def validate_port(cls, port): # noqa: N805 # pydantic wants 'cls' as first arg 264 | """Validate port.""" 265 | assert isinstance(port, int), type(port) 266 | assert 0 < port < 65535, "port out of TCP range" 267 | return port 268 | 269 | 270 | class IngressRequirerUnitData(DatabagModel): 271 | """Ingress requirer unit databag model.""" 272 | 273 | host: str = Field(description="Hostname at which the unit is reachable.") 274 | ip: Optional[str] = Field( 275 | None, 276 | description="IP at which the unit is reachable, " 277 | "IP can only be None if the IP information can't be retrieved from juju.", 278 | ) 279 | 280 | @validator("host", pre=True) 281 | def validate_host(cls, host): # noqa: N805 # pydantic wants 'cls' as first arg 282 | """Validate host.""" 283 | assert isinstance(host, str), type(host) 284 | return host 285 | 286 | @validator("ip", pre=True) 287 | def validate_ip(cls, ip): # noqa: N805 # pydantic wants 'cls' as first arg 288 | """Validate ip.""" 289 | if ip is None: 290 | return None 291 | if not isinstance(ip, str): 292 | raise TypeError(f"got ip of type {type(ip)} instead of expected str") 293 | try: 294 | ipaddress.IPv4Address(ip) 295 | return ip 296 | except ipaddress.AddressValueError: 297 | pass 298 | try: 299 | ipaddress.IPv6Address(ip) 300 | return ip 301 | except ipaddress.AddressValueError: 302 | raise ValueError(f"{ip!r} is not a valid ip address") 303 | 304 | 305 | class RequirerSchema(BaseModel): 306 | """Requirer schema for Ingress.""" 307 | 308 | app: IngressRequirerAppData 309 | unit: IngressRequirerUnitData 310 | 311 | 312 | class IngressError(RuntimeError): 313 | """Base class for custom errors raised by this library.""" 314 | 315 | 316 | class NotReadyError(IngressError): 317 | """Raised when a relation is not ready.""" 318 | 319 | 320 | class DataValidationError(IngressError): 321 | """Raised when data validation fails on IPU relation data.""" 322 | 323 | 324 | class _IngressPerAppBase(Object): 325 | """Base class for IngressPerUnit interface classes.""" 326 | 327 | def __init__(self, charm: CharmBase, relation_name: str = DEFAULT_RELATION_NAME): 328 | super().__init__(charm, relation_name) 329 | 330 | self.charm: CharmBase = charm 331 | self.relation_name = relation_name 332 | self.app = self.charm.app 333 | self.unit = self.charm.unit 334 | 335 | observe = self.framework.observe 336 | rel_events = charm.on[relation_name] 337 | observe(rel_events.relation_created, self._handle_relation) 338 | observe(rel_events.relation_joined, self._handle_relation) 339 | observe(rel_events.relation_changed, self._handle_relation) 340 | observe(rel_events.relation_departed, self._handle_relation) 341 | observe(rel_events.relation_broken, self._handle_relation_broken) 342 | observe(charm.on.leader_elected, self._handle_upgrade_or_leader) # type: ignore 343 | observe(charm.on.upgrade_charm, self._handle_upgrade_or_leader) # type: ignore 344 | 345 | @property 346 | def relations(self): 347 | """The list of Relation instances associated with this endpoint.""" 348 | return list(self.charm.model.relations[self.relation_name]) 349 | 350 | def _handle_relation(self, event): 351 | """Subclasses should implement this method to handle a relation update.""" 352 | pass 353 | 354 | def _handle_relation_broken(self, event): 355 | """Subclasses should implement this method to handle a relation breaking.""" 356 | pass 357 | 358 | def _handle_upgrade_or_leader(self, event): 359 | """Subclasses should implement this method to handle upgrades or leadership change.""" 360 | pass 361 | 362 | 363 | class _IPAEvent(RelationEvent): 364 | __args__: Tuple[str, ...] = () 365 | __optional_kwargs__: Dict[str, Any] = {} 366 | 367 | @classmethod 368 | def __attrs__(cls): 369 | return cls.__args__ + tuple(cls.__optional_kwargs__.keys()) 370 | 371 | def __init__(self, handle, relation, *args, **kwargs): 372 | super().__init__(handle, relation) 373 | 374 | if not len(self.__args__) == len(args): 375 | raise TypeError("expected {} args, got {}".format(len(self.__args__), len(args))) 376 | 377 | for attr, obj in zip(self.__args__, args): 378 | setattr(self, attr, obj) 379 | for attr, default in self.__optional_kwargs__.items(): 380 | obj = kwargs.get(attr, default) 381 | setattr(self, attr, obj) 382 | 383 | def snapshot(self): 384 | dct = super().snapshot() 385 | for attr in self.__attrs__(): 386 | obj = getattr(self, attr) 387 | try: 388 | dct[attr] = obj 389 | except ValueError as e: 390 | raise ValueError( 391 | "cannot automagically serialize {}: " 392 | "override this method and do it " 393 | "manually.".format(obj) 394 | ) from e 395 | 396 | return dct 397 | 398 | def restore(self, snapshot) -> None: 399 | super().restore(snapshot) 400 | for attr, obj in snapshot.items(): 401 | setattr(self, attr, obj) 402 | 403 | 404 | class IngressPerAppDataProvidedEvent(_IPAEvent): 405 | """Event representing that ingress data has been provided for an app.""" 406 | 407 | __args__ = ("name", "model", "hosts", "strip_prefix", "redirect_https") 408 | 409 | if typing.TYPE_CHECKING: 410 | name: Optional[str] = None 411 | model: Optional[str] = None 412 | # sequence of hostname, port dicts 413 | hosts: Sequence["IngressRequirerUnitData"] = () 414 | strip_prefix: bool = False 415 | redirect_https: bool = False 416 | 417 | 418 | class IngressPerAppDataRemovedEvent(RelationEvent): 419 | """Event representing that ingress data has been removed for an app.""" 420 | 421 | 422 | class IngressPerAppProviderEvents(ObjectEvents): 423 | """Container for IPA Provider events.""" 424 | 425 | data_provided = EventSource(IngressPerAppDataProvidedEvent) 426 | data_removed = EventSource(IngressPerAppDataRemovedEvent) 427 | 428 | 429 | @dataclass 430 | class IngressRequirerData: 431 | """Data exposed by the ingress requirer to the provider.""" 432 | 433 | app: "IngressRequirerAppData" 434 | units: List["IngressRequirerUnitData"] 435 | 436 | 437 | class IngressPerAppProvider(_IngressPerAppBase): 438 | """Implementation of the provider of ingress.""" 439 | 440 | on = IngressPerAppProviderEvents() # type: ignore 441 | 442 | def __init__( 443 | self, 444 | charm: CharmBase, 445 | relation_name: str = DEFAULT_RELATION_NAME, 446 | ): 447 | """Constructor for IngressPerAppProvider. 448 | 449 | Args: 450 | charm: The charm that is instantiating the instance. 451 | relation_name: The name of the relation endpoint to bind to 452 | (defaults to "ingress"). 453 | """ 454 | super().__init__(charm, relation_name) 455 | 456 | def _handle_relation(self, event): 457 | # created, joined or changed: if remote side has sent the required data: 458 | # notify listeners. 459 | if self.is_ready(event.relation): 460 | data = self.get_data(event.relation) 461 | self.on.data_provided.emit( # type: ignore 462 | event.relation, 463 | data.app.name, 464 | data.app.model, 465 | [unit.dict() for unit in data.units], 466 | data.app.strip_prefix or False, 467 | data.app.redirect_https or False, 468 | ) 469 | 470 | def _handle_relation_broken(self, event): 471 | self.on.data_removed.emit(event.relation) # type: ignore 472 | 473 | def wipe_ingress_data(self, relation: Relation): 474 | """Clear ingress data from relation.""" 475 | assert self.unit.is_leader(), "only leaders can do this" 476 | try: 477 | relation.data 478 | except ModelError as e: 479 | log.warning( 480 | "error {} accessing relation data for {!r}. " 481 | "Probably a ghost of a dead relation is still " 482 | "lingering around.".format(e, relation.name) 483 | ) 484 | return 485 | del relation.data[self.app]["ingress"] 486 | 487 | def _get_requirer_units_data(self, relation: Relation) -> List["IngressRequirerUnitData"]: 488 | """Fetch and validate the requirer's app databag.""" 489 | out: List["IngressRequirerUnitData"] = [] 490 | 491 | unit: Unit 492 | for unit in relation.units: 493 | databag = relation.data[unit] 494 | try: 495 | data = IngressRequirerUnitData.load(databag) 496 | out.append(data) 497 | except pydantic.ValidationError: 498 | log.info(f"failed to validate remote unit data for {unit}") 499 | raise 500 | return out 501 | 502 | @staticmethod 503 | def _get_requirer_app_data(relation: Relation) -> "IngressRequirerAppData": 504 | """Fetch and validate the requirer's app databag.""" 505 | app = relation.app 506 | if app is None: 507 | raise NotReadyError(relation) 508 | 509 | databag = relation.data[app] 510 | return IngressRequirerAppData.load(databag) 511 | 512 | def get_data(self, relation: Relation) -> IngressRequirerData: 513 | """Fetch the remote (requirer) app and units' databags.""" 514 | try: 515 | return IngressRequirerData( 516 | self._get_requirer_app_data(relation), self._get_requirer_units_data(relation) 517 | ) 518 | except (pydantic.ValidationError, DataValidationError) as e: 519 | raise DataValidationError("failed to validate ingress requirer data") from e 520 | 521 | def is_ready(self, relation: Optional[Relation] = None): 522 | """The Provider is ready if the requirer has sent valid data.""" 523 | if not relation: 524 | return any(map(self.is_ready, self.relations)) 525 | 526 | try: 527 | self.get_data(relation) 528 | except (DataValidationError, NotReadyError) as e: 529 | log.debug("Provider not ready; validation error encountered: %s" % str(e)) 530 | return False 531 | return True 532 | 533 | def _published_url(self, relation: Relation) -> Optional["IngressProviderAppData"]: 534 | """Fetch and validate this app databag; return the ingress url.""" 535 | if not self.is_ready(relation) or not self.unit.is_leader(): 536 | # Handle edge case where remote app name can be missing, e.g., 537 | # relation_broken events. 538 | # Also, only leader units can read own app databags. 539 | # FIXME https://github.com/canonical/traefik-k8s-operator/issues/34 540 | return None 541 | 542 | # fetch the provider's app databag 543 | databag = relation.data[self.app] 544 | if not databag.get("ingress"): 545 | raise NotReadyError("This application did not `publish_url` yet.") 546 | 547 | return IngressProviderAppData.load(databag) 548 | 549 | def publish_url(self, relation: Relation, url: str): 550 | """Publish to the app databag the ingress url.""" 551 | ingress_url = {"url": url} 552 | IngressProviderAppData(ingress=ingress_url).dump(relation.data[self.app]) # type: ignore 553 | 554 | @property 555 | def proxied_endpoints(self) -> Dict[str, Dict[str, str]]: 556 | """Returns the ingress settings provided to applications by this IngressPerAppProvider. 557 | 558 | For example, when this IngressPerAppProvider has provided the 559 | `http://foo.bar/my-model.my-app` URL to the my-app application, the returned dictionary 560 | will be: 561 | 562 | ``` 563 | { 564 | "my-app": { 565 | "url": "http://foo.bar/my-model.my-app" 566 | } 567 | } 568 | ``` 569 | """ 570 | results: Dict[str, Dict[str, str]] = {} 571 | 572 | for ingress_relation in self.relations: 573 | if not ingress_relation.app: 574 | log.warning( 575 | f"no app in relation {ingress_relation} when fetching proxied endpoints: skipping" 576 | ) 577 | continue 578 | try: 579 | ingress_data = self._published_url(ingress_relation) 580 | except NotReadyError: 581 | log.warning( 582 | f"no published url found in {ingress_relation}: " 583 | f"traefik didn't publish_url yet to this relation." 584 | ) 585 | continue 586 | 587 | if not ingress_data: 588 | log.warning(f"relation {ingress_relation} not ready yet: try again in some time.") 589 | continue 590 | if PYDANTIC_IS_V1: 591 | results[ingress_relation.app.name] = ingress_data.ingress.dict() 592 | else: 593 | results[ingress_relation.app.name] = ingress_data.ingress.model_dump(mode="json") 594 | return results 595 | 596 | 597 | class IngressPerAppReadyEvent(_IPAEvent): 598 | """Event representing that ingress for an app is ready.""" 599 | 600 | __args__ = ("url",) 601 | if typing.TYPE_CHECKING: 602 | url: Optional[str] = None 603 | 604 | 605 | class IngressPerAppRevokedEvent(RelationEvent): 606 | """Event representing that ingress for an app has been revoked.""" 607 | 608 | 609 | class IngressPerAppRequirerEvents(ObjectEvents): 610 | """Container for IPA Requirer events.""" 611 | 612 | ready = EventSource(IngressPerAppReadyEvent) 613 | revoked = EventSource(IngressPerAppRevokedEvent) 614 | 615 | 616 | class IngressPerAppRequirer(_IngressPerAppBase): 617 | """Implementation of the requirer of the ingress relation.""" 618 | 619 | on = IngressPerAppRequirerEvents() # type: ignore 620 | 621 | # used to prevent spurious urls to be sent out if the event we're currently 622 | # handling is a relation-broken one. 623 | _stored = StoredState() 624 | 625 | def __init__( 626 | self, 627 | charm: CharmBase, 628 | relation_name: str = DEFAULT_RELATION_NAME, 629 | *, 630 | host: Optional[str] = None, 631 | ip: Optional[str] = None, 632 | port: Optional[int] = None, 633 | strip_prefix: bool = False, 634 | redirect_https: bool = False, 635 | # fixme: this is horrible UX. 636 | # shall we switch to manually calling provide_ingress_requirements with all args when ready? 637 | scheme: Union[Callable[[], str], str] = lambda: "http", 638 | ): 639 | """Constructor for IngressRequirer. 640 | 641 | The request args can be used to specify the ingress properties when the 642 | instance is created. If any are set, at least `port` is required, and 643 | they will be sent to the ingress provider as soon as it is available. 644 | All request args must be given as keyword args. 645 | 646 | Args: 647 | charm: the charm that is instantiating the library. 648 | relation_name: the name of the relation endpoint to bind to (defaults to `ingress`); 649 | relation must be of interface type `ingress` and have "limit: 1") 650 | host: Hostname to be used by the ingress provider to address the requiring 651 | application; if unspecified, the default Kubernetes service name will be used. 652 | ip: Alternative addressing method other than host to be used by the ingress provider; 653 | if unspecified, binding address from juju network API will be used. 654 | strip_prefix: configure Traefik to strip the path prefix. 655 | redirect_https: redirect incoming requests to HTTPS. 656 | scheme: callable returning the scheme to use when constructing the ingress url. 657 | Or a string, if the scheme is known and stable at charm-init-time. 658 | 659 | Request Args: 660 | port: the port of the service 661 | """ 662 | super().__init__(charm, relation_name) 663 | self.charm: CharmBase = charm 664 | self.relation_name = relation_name 665 | self._strip_prefix = strip_prefix 666 | self._redirect_https = redirect_https 667 | self._get_scheme = scheme if callable(scheme) else lambda: scheme 668 | 669 | self._stored.set_default(current_url=None) # type: ignore 670 | 671 | # if instantiated with a port, and we are related, then 672 | # we immediately publish our ingress data to speed up the process. 673 | if port: 674 | self._auto_data = host, ip, port 675 | else: 676 | self._auto_data = None 677 | 678 | def _handle_relation(self, event): 679 | # created, joined or changed: if we have auto data: publish it 680 | self._publish_auto_data() 681 | if self.is_ready(): 682 | # Avoid spurious events, emit only when there is a NEW URL available 683 | new_url = ( 684 | None 685 | if isinstance(event, RelationBrokenEvent) 686 | else self._get_url_from_relation_data() 687 | ) 688 | if self._stored.current_url != new_url: # type: ignore 689 | self._stored.current_url = new_url # type: ignore 690 | self.on.ready.emit(event.relation, new_url) # type: ignore 691 | 692 | def _handle_relation_broken(self, event): 693 | self._stored.current_url = None # type: ignore 694 | self.on.revoked.emit(event.relation) # type: ignore 695 | 696 | def _handle_upgrade_or_leader(self, event): 697 | """On upgrade/leadership change: ensure we publish the data we have.""" 698 | self._publish_auto_data() 699 | 700 | def is_ready(self): 701 | """The Requirer is ready if the Provider has sent valid data.""" 702 | try: 703 | return bool(self._get_url_from_relation_data()) 704 | except DataValidationError as e: 705 | log.debug("Requirer not ready; validation error encountered: %s" % str(e)) 706 | return False 707 | 708 | def _publish_auto_data(self): 709 | if self._auto_data: 710 | host, ip, port = self._auto_data 711 | self.provide_ingress_requirements(host=host, ip=ip, port=port) 712 | 713 | def provide_ingress_requirements( 714 | self, 715 | *, 716 | scheme: Optional[str] = None, 717 | host: Optional[str] = None, 718 | ip: Optional[str] = None, 719 | port: int, 720 | ): 721 | """Publishes the data that Traefik needs to provide ingress. 722 | 723 | Args: 724 | scheme: Scheme to be used; if unspecified, use the one used by __init__. 725 | host: Hostname to be used by the ingress provider to address the 726 | requirer unit; if unspecified, FQDN will be used instead 727 | ip: Alternative addressing method other than host to be used by the ingress provider. 728 | if unspecified, binding address from juju network API will be used. 729 | port: the port of the service (required) 730 | """ 731 | for relation in self.relations: 732 | self._provide_ingress_requirements(scheme, host, ip, port, relation) 733 | 734 | def _provide_ingress_requirements( 735 | self, 736 | scheme: Optional[str], 737 | host: Optional[str], 738 | ip: Optional[str], 739 | port: int, 740 | relation: Relation, 741 | ): 742 | if self.unit.is_leader(): 743 | self._publish_app_data(scheme, port, relation) 744 | 745 | self._publish_unit_data(host, ip, relation) 746 | 747 | def _publish_unit_data( 748 | self, 749 | host: Optional[str], 750 | ip: Optional[str], 751 | relation: Relation, 752 | ): 753 | if not host: 754 | host = socket.getfqdn() 755 | 756 | if ip is None: 757 | network_binding = self.charm.model.get_binding(relation) 758 | if ( 759 | network_binding is not None 760 | and (bind_address := network_binding.network.bind_address) is not None 761 | ): 762 | ip = str(bind_address) 763 | else: 764 | log.error("failed to retrieve ip information from juju") 765 | 766 | unit_databag = relation.data[self.unit] 767 | try: 768 | IngressRequirerUnitData(host=host, ip=ip).dump(unit_databag) 769 | except pydantic.ValidationError as e: 770 | msg = "failed to validate unit data" 771 | log.info(msg, exc_info=True) # log to INFO because this might be expected 772 | raise DataValidationError(msg) from e 773 | 774 | def _publish_app_data( 775 | self, 776 | scheme: Optional[str], 777 | port: int, 778 | relation: Relation, 779 | ): 780 | # assumes leadership! 781 | app_databag = relation.data[self.app] 782 | 783 | if not scheme: 784 | # If scheme was not provided, use the one given to the constructor. 785 | scheme = self._get_scheme() 786 | 787 | try: 788 | IngressRequirerAppData( # type: ignore # pyright does not like aliases 789 | model=self.model.name, 790 | name=self.app.name, 791 | scheme=scheme, 792 | port=port, 793 | strip_prefix=self._strip_prefix, # type: ignore # pyright does not like aliases 794 | redirect_https=self._redirect_https, # type: ignore # pyright does not like aliases 795 | ).dump(app_databag) 796 | except pydantic.ValidationError as e: 797 | msg = "failed to validate app data" 798 | log.info(msg, exc_info=True) # log to INFO because this might be expected 799 | raise DataValidationError(msg) from e 800 | 801 | @property 802 | def relation(self): 803 | """The established Relation instance, or None.""" 804 | return self.relations[0] if self.relations else None 805 | 806 | def _get_url_from_relation_data(self) -> Optional[str]: 807 | """The full ingress URL to reach the current unit. 808 | 809 | Returns None if the URL isn't available yet. 810 | """ 811 | relation = self.relation 812 | if not relation or not relation.app: 813 | return None 814 | 815 | # fetch the provider's app databag 816 | try: 817 | databag = relation.data[relation.app] 818 | except ModelError as e: 819 | log.debug( 820 | f"Error {e} attempting to read remote app data; " 821 | f"probably we are in a relation_departed hook" 822 | ) 823 | return None 824 | 825 | if not databag: # not ready yet 826 | return None 827 | 828 | return str(IngressProviderAppData.load(databag).ingress.url) 829 | 830 | @property 831 | def url(self) -> Optional[str]: 832 | """The full ingress URL to reach the current unit. 833 | 834 | Returns None if the URL isn't available yet. 835 | """ 836 | data = ( 837 | typing.cast(Optional[str], self._stored.current_url) # type: ignore 838 | or self._get_url_from_relation_data() 839 | ) 840 | return data 841 | --------------------------------------------------------------------------------