├── version ├── src ├── interfaces │ ├── __init__.py │ └── etcd.py ├── charm.py └── ams.py ├── tests ├── integration │ ├── application-charm │ │ ├── requirements.txt │ │ ├── metadata.yaml │ │ ├── charmcraft.yaml │ │ └── src │ │ │ └── charm.py │ ├── conftest.py │ ├── test_charm.py │ ├── test_rest_charm.py │ ├── test_etcd.py │ └── test_lxd_integrator.py └── unit │ ├── conftest.py │ └── test_charm.py ├── templates ├── 10-ams-unix-socket-chown.conf.j2 └── settings.yaml.j2 ├── requirements.txt ├── .github ├── renovate-config.js ├── workflows │ ├── cla_check.yaml │ ├── pull-request.yaml │ ├── renovate.yaml │ ├── get-version.yaml │ ├── release.yaml │ └── build-and-test.yaml └── renovate.json ├── charmcraft.yaml ├── pyproject.toml ├── metadata.yaml ├── scripts └── ci │ └── get-runners.py ├── README.md ├── icon.svg ├── CONTRIBUTING.md ├── tox.ini ├── .gitignore ├── config.yaml ├── lib └── charms │ ├── operator_libs_linux │ ├── v0 │ │ └── passwd.py │ ├── v1 │ │ └── systemd.py │ └── v2 │ │ └── snap.py │ └── grafana_agent │ └── v0 │ └── cos_agent.py └── LICENSE /version: -------------------------------------------------------------------------------- 1 | 1.22 2 | -------------------------------------------------------------------------------- /src/interfaces/__init__.py: -------------------------------------------------------------------------------- 1 | """Interfaces for AMS Charm.""" 2 | -------------------------------------------------------------------------------- /tests/integration/application-charm/requirements.txt: -------------------------------------------------------------------------------- 1 | jsonschema 2 | cryptography==38.0.4 3 | pylxd 4 | ops==2.8.0 5 | -------------------------------------------------------------------------------- /templates/10-ams-unix-socket-chown.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | Type=notify 3 | ExecStartPost=/bin/sh -c "chown :{{ group }} /var/snap/ams/common/server/unix.socket" 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ops>=2.7.0 2 | jinja2==3.1.2 3 | markupsafe==2.1.3 4 | netifaces==0.11.0 5 | jsonschema==4.19.1 6 | cryptography<41.0.0; python_version <= "3.8" 7 | cryptography==42.0.5; python_version >= "3.10" 8 | cosl==0.0.10 9 | pydantic==1.10.13 10 | -------------------------------------------------------------------------------- /.github/renovate-config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | branchPrefix: "renovate/", 3 | dryRun: null, 4 | username: "renovate-release", 5 | gitAuthor: "Renovate Bot ", 6 | onboarding: true, 7 | platform: "github", 8 | includeForks: true, 9 | repositories: ["anbox-cloud/ams-operator"], 10 | } 11 | -------------------------------------------------------------------------------- /tests/integration/application-charm/metadata.yaml: -------------------------------------------------------------------------------- 1 | name: ams-api-tester 2 | display-name: ams-client-app 3 | description: a testing application for ams charm 4 | summary: A simple application to relate to the rest interface to test the ams charm 5 | subordinate: false 6 | series: 7 | - jammy 8 | requires: 9 | client: 10 | interface: rest 11 | 12 | -------------------------------------------------------------------------------- /.github/workflows/cla_check.yaml: -------------------------------------------------------------------------------- 1 | name: CLA check 2 | 3 | on: 4 | pull_request: 5 | branches: [main] 6 | 7 | jobs: 8 | cla-check: 9 | runs-on: ubuntu-22.04 10 | steps: 11 | - name: Check if Canonical's Contributor License Agreement has been signed 12 | uses: canonical/has-signed-canonical-cla@9a7e0da38a13dbc25b14c389851bcf1624f4784d # v1 13 | -------------------------------------------------------------------------------- /tests/integration/application-charm/charmcraft.yaml: -------------------------------------------------------------------------------- 1 | type: charm 2 | bases: 3 | - build-on: 4 | - name: "ubuntu" 5 | channel: "22.04" 6 | run-on: 7 | - name: "ubuntu" 8 | channel: "22.04" 9 | parts: 10 | charm: 11 | charm-requirements: ["requirements.txt"] 12 | build-packages: 13 | - git 14 | - libssl-dev 15 | - libffi-dev 16 | - rustc 17 | - cargo 18 | - pkg-config 19 | -------------------------------------------------------------------------------- /.github/workflows/pull-request.yaml: -------------------------------------------------------------------------------- 1 | name: Pull Request 2 | on: 3 | pull_request: 4 | paths-ignore: 5 | - ".github/renovate*" 6 | - ".github/workflows/release.yaml" 7 | - ".github/workflows/renovate.yaml" 8 | - ".github/workflows/update-libs.yaml" 9 | - ".gitignore" 10 | - ".jujuignore" 11 | push: 12 | branches: 13 | - "renovate/*" 14 | 15 | concurrency: 16 | group: ${{ github.workflow }}-${{ github.ref }} 17 | cancel-in-progress: true 18 | 19 | jobs: 20 | test: 21 | uses: ./.github/workflows/build-and-test.yaml 22 | with: 23 | snap_risk_level: edge 24 | -------------------------------------------------------------------------------- /charmcraft.yaml: -------------------------------------------------------------------------------- 1 | # This file configures Charmcraft. 2 | # See https://juju.is/docs/sdk/charmcraft-config for guidance. 3 | 4 | type: charm 5 | bases: 6 | - name: ubuntu 7 | channel: "20.04" 8 | architectures: [amd64] 9 | - name: ubuntu 10 | channel: "20.04" 11 | architectures: [arm64] 12 | - name: ubuntu 13 | channel: "22.04" 14 | architectures: [amd64] 15 | - name: ubuntu 16 | channel: "22.04" 17 | architectures: [arm64] 18 | parts: 19 | charm: 20 | charm-requirements: ["requirements.txt"] 21 | build-packages: 22 | - git 23 | - libffi-dev 24 | - libssl-dev 25 | - rustc 26 | - cargo 27 | - pkg-config 28 | -------------------------------------------------------------------------------- /.github/workflows/renovate.yaml: -------------------------------------------------------------------------------- 1 | # workflow for checking package versions and opening PRs to bump 2 | name: Renovate 3 | on: 4 | schedule: 5 | - cron: "0 12 * * *" 6 | workflow_dispatch: 7 | workflow_call: 8 | 9 | jobs: 10 | renovate: 11 | runs-on: ubuntu-22.04 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0 15 | 16 | - name: Self-hosted Renovate 17 | uses: renovatebot/github-action@89bd050bafa5a15de5d9383e3129edf210422004 # v40.1.5 18 | with: 19 | configurationFile: .github/renovate-config.js 20 | token: ${{ github.token }} 21 | -------------------------------------------------------------------------------- /.github/workflows/get-version.yaml: -------------------------------------------------------------------------------- 1 | name: Get current charm version 2 | 3 | on: 4 | workflow_call: 5 | outputs: 6 | charm_version: 7 | description: "Get the current version of the charm" 8 | value: ${{ jobs.get-charm-version.outputs.version }} 9 | 10 | jobs: 11 | get-charm-version: 12 | runs-on: [self-hosted, linux, X64, jammy, large] 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 16 | - name: Get charm version 17 | id: charm_version 18 | run: | 19 | echo "version=$(cat version)" >> $GITHUB_OUTPUT 20 | outputs: 21 | version: ${{ steps.charm_version.outputs.version }} 22 | 23 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | # Testing tools configuration 2 | [tool.coverage.run] 3 | branch = true 4 | 5 | [tool.coverage.report] 6 | show_missing = true 7 | 8 | [tool.pytest.ini_options] 9 | minversion = "6.0" 10 | log_cli_level = "INFO" 11 | 12 | # Formatting tools configuration 13 | [tool.black] 14 | line-length = 99 15 | target-version = ["py38"] 16 | 17 | # Linting tools configuration 18 | [tool.ruff] 19 | line-length = 99 20 | select = ["E", "W", "F", "C", "N", "D", "I001"] 21 | extend-ignore = [ 22 | "D203", 23 | "D204", 24 | "D213", 25 | "D215", 26 | "D400", 27 | "D404", 28 | "D406", 29 | "D407", 30 | "D408", 31 | "D409", 32 | "D413", 33 | ] 34 | ignore = ["E501", "D107"] 35 | extend-exclude = ["__pycache__", "*.egg_info"] 36 | per-file-ignores = {"tests/*" = ["D100","D101","D102","D103","D104"]} 37 | 38 | [tool.ruff.mccabe] 39 | max-complexity = 10 40 | 41 | [tool.codespell] 42 | skip = "build,lib,venv,icon.svg,.tox,.git,.mypy_cache,.ruff_cache,.vscode,.coverage" 43 | -------------------------------------------------------------------------------- /metadata.yaml: -------------------------------------------------------------------------------- 1 | # This file populates the Overview on Charmhub. 2 | # See https://juju.is/docs/sdk/metadata-reference for a checklist and guidance. 3 | 4 | # The charm package name, no spaces (required) 5 | # See https://juju.is/docs/sdk/naming#heading--naming-charms for guidance. 6 | name: ams 7 | display-name: Anbox Management Service 8 | summary: Anbox Management Service 9 | website: https://anbox-cloud.io 10 | issues: https://bugs.launchpad.net/anbox-cloud 11 | maintainers: 12 | - Indore team 13 | description: | 14 | The Anbox Management Service (AMS) is responsible to manage a set of 15 | Android containers on a cluster of machines running LXD. It provides 16 | all kinds of different features to make the management of Android 17 | applications running inside these containers as easy as possible. 18 | tags: 19 | - service 20 | - anbox 21 | provides: 22 | rest-api: 23 | interface: rest 24 | cos-agent: 25 | interface: cos_agent 26 | requires: 27 | lxd-cluster: 28 | interface: lxd 29 | etcd: 30 | interface: etcd 31 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release To Edge 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths-ignore: 8 | - 'tests/**' 9 | - 'docs/**' 10 | - .github/renovate.json5 11 | - pyproject.toml 12 | 13 | concurrency: 14 | group: ${{ github.workflow }}-${{ github.ref }} 15 | cancel-in-progress: true 16 | 17 | jobs: 18 | get-version: 19 | name: Get Charm Version 20 | uses: ./.github/workflows/get-version.yaml 21 | 22 | ci: 23 | name: Build & Test 24 | uses: ./.github/workflows/build-and-test.yaml 25 | secrets: inherit 26 | with: 27 | snap_risk_level: edge 28 | 29 | release: 30 | name: Release to edge 31 | needs: 32 | - ci 33 | - get-version 34 | uses: canonical/data-platform-workflows/.github/workflows/release_charm.yaml@v13.0.0 35 | with: 36 | # TODO: change this when we actually want to cutover the charm to main 37 | # channels 38 | channel: "${{ needs.get-version.outputs.charm_version }}-ops/edge" 39 | artifact-prefix: ${{ needs.ci.outputs.artifact-prefix }} 40 | secrets: 41 | charmhub-token: ${{ secrets.CHARMHUB_TOKEN }} 42 | -------------------------------------------------------------------------------- /scripts/ci/get-runners.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import yaml 5 | import json 6 | import logging 7 | 8 | SUPPORTED_ARCHITECTURES = ('arm64', 'amd64') 9 | RUNNER_LABELS = { 10 | 'arm64': ["Ubuntu_ARM64_4C_16G_01"], 11 | 'amd64': ["self-hosted", "linux", "X64", "jammy", "large"] 12 | } 13 | 14 | logging.basicConfig(level=logging.INFO) 15 | 16 | def main() -> int: 17 | with open('charmcraft.yaml', 'r') as f: 18 | charmcraft_cfg = yaml.safe_load(f) 19 | data = [] 20 | for base_idx, base in enumerate(charmcraft_cfg['bases']): 21 | for arch in base['architectures']: 22 | if arch not in SUPPORTED_ARCHITECTURES: 23 | raise ValueError(f'Base {base_idx} architecture: {arch} is not supported') 24 | data.append({ 25 | "base_index": base_idx, 26 | "runner_labels": RUNNER_LABELS[arch], 27 | "arch": arch 28 | }) 29 | logging.info(f'bases: {data}') 30 | with open(os.environ['GITHUB_OUTPUT'], 'a') as f: 31 | f.write(f"bases={json.dumps(data)}") 32 | return 0 33 | 34 | if __name__ == "__main__": 35 | SystemExit(main()) 36 | -------------------------------------------------------------------------------- /tests/unit/conftest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright 2024 Canonical Ltd. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from unittest.mock import patch, MagicMock, PropertyMock 18 | import pytest 19 | 20 | 21 | @pytest.fixture 22 | def current_version(): 23 | with open("version", "r") as f: 24 | return f.read().strip("\n") 25 | 26 | 27 | @pytest.fixture 28 | def mocked_ams(): 29 | with patch("src.charm.AMS") as mocked_ams: 30 | mock = MagicMock() 31 | workload_version = "x1" 32 | type(mock).version = PropertyMock(return_value=workload_version) 33 | mocked_ams.return_value = mock 34 | yield mock 35 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:base", 5 | ":disableDependencyDashboard", 6 | ":automergeDigest", 7 | ":automergePatch", 8 | ":automergeMinor", 9 | ":rebaseStalePrs", 10 | ":semanticCommits", 11 | ":semanticCommitScope(deps)", 12 | "docker:pinDigests", 13 | "helpers:pinGitHubActionDigests", 14 | "regexManagers:dockerfileVersions" 15 | ], 16 | "automergeType": "branch", 17 | "packageRules": [ 18 | { 19 | "groupName": "github actions", 20 | "matchManagers": ["github-actions"], 21 | "automerge": true, 22 | "schedule": ["on monday"] 23 | }, 24 | { 25 | "groupName": "testing deps", 26 | "matchFiles": ["tox.ini"], 27 | "matchUpdateTypes": ["major", "minor", "patch", "pin", "digest"], 28 | "automerge": true, 29 | "schedule": ["on monday"] 30 | }, 31 | { 32 | "groupName": "renovate packages", 33 | "matchSourceUrlPrefixes": ["https://github.com/renovatebot/"], 34 | "matchUpdateTypes": ["major", "minor", "patch", "pin", "digest"], 35 | "automerge": true, 36 | "schedule": ["on monday"] 37 | } 38 | ], 39 | "regexManagers": [ 40 | { 41 | "fileMatch": ["tox.ini"], 42 | "matchStrings": [ 43 | "# renovate: datasource=(?\\S+)\n\\s+(?.*?)==(?.*?)\\n" 44 | ] 45 | } 46 | ] 47 | } 48 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AMS Operator 2 | 3 | Anbox Cloud offers a software stack that runs Android applications in any cloud enabling high-performance 4 | streaming of graphics to desktop and mobile client devices. 5 | 6 | At its heart, it uses lightweight container technology instead of full virtual machines to achieve higher 7 | density and better performance per host while ensuring security and isolation of each container. Depending 8 | on the target platform, payload, and desired application performance (e.g. frame rate), more than 9 | 100 containers can be run on a single machine. 10 | 11 | For containerization of Android, Anbox Cloud uses the well established and secure container hypervisor 12 | LXD. LXD is secure by design, scales to a large number of containers and provides advanced resource 13 | management for hosted containers. 14 | 15 | Also have a look at the official Anbox Cloud website (https://anbox-cloud.io) for more information. 16 | 17 | ## Anbox Management System 18 | 19 | The Anbox Management System, or *ams* is the main piece of software responsible for managing containers, 20 | applications, addons, and more. 21 | 22 | Running the `AMS` charm requires other charms to be deployed beforehand. 23 | 24 | ```sh 25 | $ juju deploy etcd 26 | $ juju deploy easyrsa 27 | $ juju relate etcd easyrsa 28 | ``` 29 | 30 | Then deploy `ams` 31 | 32 | ```sh 33 | $ juju deploy ams 34 | $ juju relate ams etcd 35 | ``` 36 | 37 | For more information about AMS, visit the official documentation on https://anbox-cloud.io 38 | -------------------------------------------------------------------------------- /templates/settings.yaml.j2: -------------------------------------------------------------------------------- 1 | listen-address: {{ ip }}:{{ port }} 2 | logger: 3 | level: {{ log_level }} 4 | 5 | store: 6 | {% if store.use_embedded %} 7 | driver: embedded-etcd 8 | data: "/var/snap/ams/common/etcd-data" 9 | {% else %} 10 | driver: etcd 11 | data: 12 | servers: {{ store.servers }} 13 | key: {{ store.key }} 14 | cert: {{ store.cert }} 15 | ca: {{ store.ca }} 16 | {% endif %} 17 | 18 | artifacts: 19 | provider: filesystem 20 | path: /var/snap/ams/common/data/artifacts 21 | 22 | backend: 23 | port-range: {{ backend.port_range }} 24 | default-session-specification: 25 | log-level: {{ log_level }} 26 | metrics-server: {{ backend.metrics_server }} 27 | force-tls12: {{ backend.force_tls12 }} 28 | use-network-acl: {{ backend.use_network_acl }} 29 | {%- if backend.lxd_project|length > 0 %} 30 | lxd: 31 | project: {{ backend.lxd_project }} 32 | {%- endif %} 33 | 34 | {% if metrics.enabled %} 35 | metrics: 36 | prometheus: 37 | {%- if metrics.extra_labels|length > 0 %} 38 | extra-labels: 39 | {%- for key, value in metrics.extra_labels.items() %} 40 | "{{ key }}": "{{ value }}" 41 | {%- endfor%} 42 | {%- endif %} 43 | listen-address: {{ metrics.target_ip }}:{{ metrics.target_port }} 44 | {% if metrics.basic_auth_username|length and metrics.basic_auth_password|length -%} 45 | username: {{metrics.basic_auth_username}} 46 | password: {{metrics.basic_auth_password}} 47 | {%- endif %} 48 | {% if metrics.tls_key|length and metrics.tls_cert|length -%} 49 | tls: 50 | key: {{metrics.tls_key}} 51 | cert: {{metrics.tls_cert}} 52 | {%- endif %} 53 | {% endif %} 54 | -------------------------------------------------------------------------------- /tests/integration/conftest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright 2024 Canonical Ltd. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | from pathlib import Path 17 | 18 | import pytest 19 | import yaml 20 | 21 | 22 | def pytest_addoption(parser): 23 | parser.addoption("--constraints", default="", action="store", help="Model constraints") 24 | parser.addoption("--charm", default="", action="store", help="Path to a built charm") 25 | parser.addoption( 26 | "--snap-risk-level", 27 | default="", 28 | action="store", 29 | help="Risk level to use for the snap deployed by the charm", 30 | ) 31 | 32 | 33 | @pytest.fixture 34 | def charm_name(): 35 | metadata = yaml.safe_load(Path("./metadata.yaml").read_text()) 36 | return metadata["name"] 37 | 38 | 39 | @pytest.fixture 40 | def constraints(request) -> dict: 41 | constraints = request.config.getoption("--constraints") 42 | cts = {} 43 | for constraint in constraints.split(" "): 44 | if not constraint: 45 | continue 46 | k, v = constraint.split("=") 47 | cts[k] = v 48 | return cts 49 | 50 | 51 | @pytest.fixture(scope="module") 52 | def snap_risk_level(request): 53 | return request.config.getoption("--snap-risk-level") 54 | 55 | 56 | @pytest.fixture 57 | def charm_path(request): 58 | return request.config.getoption("--charm") 59 | -------------------------------------------------------------------------------- /tests/integration/test_charm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # Copyright 2024 Canonical Ltd. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | import logging 19 | 20 | import pytest 21 | from pytest_operator.plugin import OpsTest 22 | 23 | logger = logging.getLogger(__name__) 24 | 25 | 26 | @pytest.fixture(scope="module") 27 | def charm_config(snap_risk_level) -> dict: 28 | cfg = {"use_embedded_etcd": True} 29 | if snap_risk_level: 30 | cfg.update(snap_risk_level=snap_risk_level) 31 | return cfg 32 | 33 | 34 | @pytest.mark.abort_on_fail 35 | async def test_can_deploy_with_embedded_etcd( 36 | ops_test: OpsTest, constraints, charm_name, charm_path, charm_config 37 | ): 38 | """Build the charm-under-test and deploy it together with related charms. 39 | 40 | Assert on the unit status before any relations/configurations take place. 41 | """ 42 | # Build and deploy charm from local source folder 43 | if not charm_path: 44 | charm_path = await ops_test.build_charm(".") 45 | if constraints: 46 | await ops_test.model.set_constraints(constraints) 47 | await ops_test.model.deploy( 48 | charm_path, 49 | application_name=charm_name, 50 | config=charm_config, 51 | ) 52 | async with ops_test.fast_forward(): 53 | await ops_test.model.wait_for_idle(apps=[charm_name], status="active", timeout=1000) 54 | -------------------------------------------------------------------------------- /icon.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | ## Overview 4 | 5 | This documents explains the processes and practices recommended for contributing enhancements to 6 | this operator. 7 | 8 | - Generally, before developing enhancements to this charm, consider [opening an issue 9 | ](https://github.com/canonical/ams-operator/issues) explaining your use case. 10 | - Familiarising yourself with the [Charmed Operator Framework](https://juju.is/docs/sdk) library 11 | will help you a lot when working on new features or bug fixes. 12 | - All enhancements require review before being merged. Code review typically examines 13 | - code quality 14 | - test coverage 15 | - user experience for Juju administrators of this charm. 16 | - It is a good practice to rebase your pull request branch onto the `main` branch for a linear 17 | commit history, avoiding merge commits and easy reviews. 18 | 19 | ## Developing 20 | 21 | ### Prerequisites 22 | 23 | To run integration tests you require a juju deployment with a juju controller ready. You can refer to 24 | [how to setup a juju deployment](https://juju.is/docs/juju/get-started-with-juju). 25 | 26 | 27 | ### Develop 28 | You can create an environment for development with `tox`: 29 | 30 | ```shell 31 | tox devenv -e integration-juju3 32 | source venv/bin/activate 33 | ``` 34 | 35 | ### Test 36 | 37 | ```shell 38 | tox run -e format # update your code according to linting rules 39 | tox run -e lint # code style 40 | tox run -e unit # unit tests 41 | tox run -e integration-juju2 # integration tests for juju 2.9 42 | tox run -e integration-juju3 # integration tests for juju 3.2 43 | tox # runs 'lint' and 'unit' environments 44 | ``` 45 | 46 | ### Build 47 | 48 | Build the charm in this git repository using: 49 | 50 | ```shell 51 | charmcraft pack 52 | ``` 53 | 54 | ### Deploy 55 | 56 | ```bash 57 | # Create a model 58 | juju add-model dev 59 | 60 | # Enable DEBUG logging 61 | juju model-config logging-config="=INFO;unit=DEBUG" 62 | 63 | # Deploy the charm 64 | juju deploy ./ams_ubuntu-22.04-amd64.charm 65 | ``` 66 | 67 | ## Canonical Contributor Agreement 68 | 69 | Canonical welcomes contributions to the AMS Operator. Please check out our 70 | [contributor agreement](https://ubuntu.com/legal/contributors) if you're 71 | interested in contributing to the solution. 72 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | skipsdist=True 3 | skip_missing_interpreters = True 4 | envlist = fmt, lint, unit 5 | 6 | [vars] 7 | src_path = {toxinidir}/src/ 8 | tst_path = {toxinidir}/tests/ 9 | all_path = {[vars]src_path} {[vars]tst_path} 10 | 11 | [testenv] 12 | base_python = py310 13 | setenv = 14 | PYTHONPATH = {toxinidir}:{toxinidir}/lib:{[vars]src_path} 15 | PYTHONBREAKPOINT=pdb.set_trace 16 | PY_COLORS=1 17 | juju2: LIBJUJU="2.9.44" # libjuju2 18 | juju3: LIBJUJU="3.2.2" # libjuju3 19 | passenv = 20 | PYTHONPATH 21 | CHARM_BUILD_DIR 22 | MODEL_SETTINGS 23 | 24 | [testenv:fmt] 25 | description = Apply coding style standards to code 26 | deps = 27 | # renovate: datasource=pypi 28 | black==23.7.0 29 | # renovate: datasource=pypi 30 | ruff==0.0.287 31 | commands = 32 | ruff --fix {[vars]src_path} 33 | black {[vars]all_path} 34 | 35 | [testenv:lint] 36 | description = Check code against coding style standards 37 | deps = 38 | # renovate: datasource=pypi 39 | black==23.7.0 40 | # renovate: datasource=pypi 41 | ruff==0.0.287 42 | # renovate: datasource=pypi 43 | codespell==2.2.5 44 | commands = 45 | codespell {toxinidir} 46 | ruff {[vars]src_path} 47 | black --check --diff {[vars]all_path} 48 | 49 | [testenv:unit] 50 | description = Run unit tests 51 | deps = 52 | -r{toxinidir}/requirements.txt 53 | # renovate: datasource=pypi 54 | pytest==7.4.1 55 | # renovate: datasource=pypi 56 | coverage[toml]==6.5.0 57 | commands = 58 | coverage run --source={[vars]src_path} \ 59 | -m pytest \ 60 | --ignore={[vars]tst_path}integration \ 61 | --tb native \ 62 | -v \ 63 | -s \ 64 | {posargs} 65 | coverage report 66 | 67 | [testenv:integration-{juju2,juju3}] 68 | description = Run integration tests 69 | deps = 70 | # renovate: datasource=pypi 71 | pytest==7.4.1 72 | # renovate: datasource=pypi 73 | pytest-operator==0.32.0 74 | tenacity==8.2.3 75 | git+https://github.com/juju/juju-crashdump.git 76 | -r{toxinidir}/requirements.txt 77 | commands = 78 | pip install -q juju=={env:LIBJUJU} 79 | pytest -v \ 80 | -s \ 81 | --tb native \ 82 | --ignore={[vars]tst_path}unit \ 83 | --log-cli-level=INFO \ 84 | --asyncio-mode=auto \ 85 | {[vars]tst_path}integration \ 86 | {posargs} 87 | -------------------------------------------------------------------------------- /tests/integration/test_rest_charm.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright 2024 Canonical Ltd. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import asyncio 18 | import logging 19 | 20 | import pytest 21 | from pytest_operator.plugin import OpsTest 22 | 23 | logger = logging.getLogger(__name__) 24 | 25 | TEST_APP_CHARM_PATH = "tests/integration/application-charm" 26 | TEST_APP_CHARM_NAME = "ams-api-tester" 27 | 28 | 29 | @pytest.fixture(scope="module") 30 | def charm_config(snap_risk_level) -> dict: 31 | cfg = {"use_embedded_etcd": True} 32 | if snap_risk_level: 33 | cfg.update(snap_risk_level=snap_risk_level) 34 | return cfg 35 | 36 | 37 | @pytest.mark.abort_on_fail 38 | async def test_can_relate_to_client_charms( 39 | ops_test: OpsTest, charm_name, charm_config, constraints, charm_path 40 | ): 41 | """Build the charm-under-test and deploy it together with related charms. 42 | 43 | Assert on the unit status before any relations/configurations take place. 44 | """ 45 | # Build and deploy charm from local source folder 46 | if not charm_path: 47 | charm_path = await ops_test.build_charm(".") 48 | if constraints: 49 | await ops_test.model.set_constraints(constraints) 50 | client_charm_path = await ops_test.build_charm(TEST_APP_CHARM_PATH) 51 | await asyncio.gather( 52 | ops_test.model.deploy( 53 | charm_path, 54 | application_name=charm_name, 55 | num_units=1, 56 | config=charm_config, 57 | ), 58 | ops_test.model.deploy( 59 | client_charm_path, 60 | application_name=TEST_APP_CHARM_NAME, 61 | channel="latest/stable", 62 | num_units=1, 63 | ), 64 | ) 65 | async with ops_test.fast_forward(): 66 | await ops_test.model.relate(f"{TEST_APP_CHARM_NAME}:client", f"{charm_name}:rest-api"), 67 | await ops_test.model.wait_for_idle( 68 | apps=[TEST_APP_CHARM_NAME, charm_name], status="active", timeout=1000 69 | ) 70 | -------------------------------------------------------------------------------- /src/interfaces/etcd.py: -------------------------------------------------------------------------------- 1 | """ETCD Interface for AMS charm.""" 2 | # -*- coding: utf-8 -*- 3 | # 4 | # Copyright 2024 Canonical Ltd. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | import logging 19 | 20 | import ops 21 | 22 | logger = logging.getLogger(__name__) 23 | 24 | 25 | class Available(ops.EventBase): 26 | """Event emitted when a new client is registered.""" 27 | 28 | 29 | class ETCDEvents(ops.ObjectEvents): 30 | """Event wrapper for ETCD events.""" 31 | 32 | available = ops.EventSource(Available) 33 | 34 | 35 | class ETCDEndpointConsumer(ops.framework.Object): 36 | """ETCD consumer interface.""" 37 | 38 | on: ops.Object = ETCDEvents() 39 | _state = ops.StoredState() 40 | 41 | def __init__(self, charm: ops.CharmBase, relation_name: str): 42 | super().__init__(charm, relation_name) 43 | self._charm = charm 44 | self._state.set_default(cert="", key="", ca="", connection_string="") 45 | events = self._charm.on[relation_name] 46 | self.framework.observe(events.relation_changed, self._on_etcd_changed) 47 | 48 | @property 49 | def is_available(self): 50 | """Check if etcd relation is ready.""" 51 | return ( 52 | self._state.ca 53 | and self._state.cert 54 | and self._state.key 55 | and self._state.connection_string 56 | ) 57 | 58 | def _on_etcd_changed(self, event: ops.RelationChangedEvent): 59 | data = event.relation.data[event.unit] 60 | self._state.cert = data.get("client_cert") 61 | self._state.key = data.get("client_key") 62 | self._state.ca = data.get("client_ca") 63 | self._state.connection_string = data.get("connection_string") 64 | if self.is_available: 65 | self.on.available.emit() 66 | 67 | def get_config(self) -> dict: 68 | """Get configuration from etcd relation.""" 69 | return { 70 | "cert": self._state.cert, 71 | "ca": self._state.ca, 72 | "key": self._state.key, 73 | "connection_string": self._state.connection_string, 74 | } 75 | -------------------------------------------------------------------------------- /tests/integration/test_etcd.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright 2024 Canonical Ltd. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import asyncio 18 | import logging 19 | 20 | import pytest 21 | from pytest_operator.plugin import OpsTest 22 | 23 | logger = logging.getLogger(__name__) 24 | 25 | ETCD_CHARM_NAME = "etcd" 26 | TLS_CHARM_NAME = "easyrsa" 27 | APP_NAMES = [ETCD_CHARM_NAME, TLS_CHARM_NAME] 28 | 29 | 30 | @pytest.fixture(scope="module") 31 | def charm_config(snap_risk_level) -> dict: 32 | cfg = {} 33 | if snap_risk_level: 34 | cfg.update(snap_risk_level=snap_risk_level) 35 | return cfg 36 | 37 | 38 | @pytest.mark.abort_on_fail 39 | async def test_can_relate_to_etcd( 40 | ops_test: OpsTest, charm_name, constraints, charm_config, charm_path 41 | ): 42 | """Build the charm-under-test and deploy it together with related charms. 43 | 44 | Assert on the unit status before any relations/configurations take place. 45 | """ 46 | # Build and deploy charm from local source folder 47 | if not charm_path: 48 | charm_path = await ops_test.build_charm(".") 49 | if constraints: 50 | await ops_test.model.set_constraints(constraints) 51 | await asyncio.gather( 52 | ops_test.model.deploy( 53 | charm_path, application_name=charm_name, num_units=1, config=charm_config 54 | ), 55 | ops_test.model.deploy( 56 | ETCD_CHARM_NAME, 57 | application_name=ETCD_CHARM_NAME, 58 | channel="latest/stable", 59 | num_units=1, 60 | ), 61 | ops_test.model.deploy( 62 | TLS_CHARM_NAME, 63 | application_name=TLS_CHARM_NAME, 64 | channel="latest/stable", 65 | num_units=1, 66 | ), 67 | ) 68 | 69 | await asyncio.gather( 70 | ops_test.model.relate(f"{ETCD_CHARM_NAME}:db", f"{charm_name}:etcd"), 71 | ops_test.model.relate(f"{TLS_CHARM_NAME}:client", f"{ETCD_CHARM_NAME}:certificates"), 72 | ) 73 | async with ops_test.fast_forward(): 74 | await ops_test.model.wait_for_idle( 75 | apps=[*APP_NAMES, charm_name], status="active", timeout=1000 76 | ) 77 | -------------------------------------------------------------------------------- /tests/integration/test_lxd_integrator.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright 2024 Canonical Ltd. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import logging 18 | 19 | import asyncio 20 | import os 21 | import pytest 22 | from pytest_operator.plugin import OpsTest 23 | 24 | logger = logging.getLogger(__name__) 25 | 26 | INTEGRATOR_CHARM_NAME = "lxd-integrator" 27 | 28 | 29 | @pytest.fixture(scope="module") 30 | def charm_config(snap_risk_level) -> dict: 31 | cfg = {"use_embedded_etcd": True} 32 | if snap_risk_level: 33 | cfg.update(snap_risk_level=snap_risk_level) 34 | return cfg 35 | 36 | 37 | @pytest.mark.abort_on_fail 38 | async def test_can_relate_to_lxd( 39 | ops_test: OpsTest, constraints, charm_name, charm_config, charm_path 40 | ): 41 | """Build the charm-under-test and deploy it together with related charms. 42 | 43 | Assert on the unit status before any relations/configurations take place. 44 | """ 45 | # Build and deploy charm from local source folder 46 | if not charm_path: 47 | charm_path = await ops_test.build_charm(".") 48 | if constraints: 49 | await ops_test.model.set_constraints(constraints) 50 | 51 | # FIXME: lxd-integrator charm currently has an issue with the platform and 52 | # series compatibility on different platforms so we modify the deployment to 53 | # get around it 54 | deploy_opts = {"base": "ubuntu@20.04"} 55 | if "2.9" in os.environ["LIBJUJU"]: 56 | deploy_opts.update(series="jammy") 57 | deploy_opts.pop("base") 58 | 59 | await asyncio.gather( 60 | ops_test.model.deploy( 61 | charm_path, 62 | application_name=charm_name, 63 | num_units=1, 64 | config=charm_config, 65 | ), 66 | ops_test.model.deploy( 67 | INTEGRATOR_CHARM_NAME, 68 | application_name=INTEGRATOR_CHARM_NAME, 69 | channel="stable", 70 | trust=True, 71 | **deploy_opts, 72 | ), 73 | ) 74 | async with ops_test.fast_forward(): 75 | await ops_test.model.relate(f"{INTEGRATOR_CHARM_NAME}:api", f"{charm_name}:lxd-cluster"), 76 | await ops_test.model.wait_for_idle( 77 | apps=[charm_name, INTEGRATOR_CHARM_NAME], status="active", timeout=1000 78 | ) 79 | -------------------------------------------------------------------------------- /tests/unit/test_charm.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Copyright 2024 Canonical Ltd. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | from unittest.mock import PropertyMock 17 | import pytest 18 | 19 | from ops import BlockedStatus 20 | from ops.testing import Harness 21 | from ams import SNAP_DEFAULT_RISK 22 | 23 | from src.charm import AmsOperatorCharm 24 | 25 | 26 | @pytest.fixture 27 | def charm(): 28 | charm_cls = AmsOperatorCharm 29 | charm_cls.private_ip = "10.0.0.1" 30 | return charm_cls 31 | 32 | 33 | def test_charm_installs_specific_revision(request, mocked_ams, charm, current_version): 34 | harness = Harness(charm) 35 | harness.update_config({"snap_revision": "567"}) 36 | request.addfinalizer(harness.cleanup) 37 | harness.begin() 38 | harness.charm.on.install.emit() 39 | harness.charm.ams.install.assert_called_with( 40 | channel=f"{current_version}/{SNAP_DEFAULT_RISK}", revision="567" 41 | ) 42 | 43 | 44 | def test_charm_sets_workload_version_on_install(request, mocked_ams, charm): 45 | workload_version = "1.21" 46 | type(mocked_ams).version = PropertyMock(return_value=workload_version) 47 | harness = Harness(charm) 48 | request.addfinalizer(harness.cleanup) 49 | harness.begin() 50 | harness.charm.on.install.emit() 51 | assert harness.charm.app._backend._workload_version == workload_version 52 | 53 | 54 | def test_blocks_on_external_etcd_if_not_embedded(request, mocked_ams, charm): 55 | harness = Harness(charm) 56 | request.addfinalizer(harness.cleanup) 57 | harness.begin_with_initial_hooks() 58 | assert harness.charm.unit.status == BlockedStatus("Waiting for etcd") 59 | 60 | 61 | def test_can_apply_config_items_to_ams(request, mocked_ams, charm): 62 | harness = Harness(charm) 63 | request.addfinalizer(harness.cleanup) 64 | harness.set_leader(True) 65 | harness.update_config( 66 | { 67 | "use_embedded_etcd": True, 68 | "config": "images.url=https://dummy.image.io\nimages.auth=custom:auth", 69 | } 70 | ) 71 | harness.begin() 72 | harness.charm.on.config_changed.emit() 73 | harness.charm.ams.apply_service_configuration.assert_called_once() 74 | assert len(harness.charm.ams.apply_service_configuration.call_args.args[0]) == 2 75 | 76 | 77 | def test_can_set_location_in_ams(request, mocked_ams, charm): 78 | harness = Harness(charm) 79 | request.addfinalizer(harness.cleanup) 80 | harness.set_leader(True) 81 | harness.update_config({"use_embedded_etcd": True, "location": "https://custom-endpoint.com"}) 82 | harness.begin() 83 | harness.charm.on.config_changed.emit() 84 | harness.charm.ams.set_location.assert_called_once() 85 | -------------------------------------------------------------------------------- /tests/integration/application-charm/src/charm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import json 4 | import logging 5 | import tempfile 6 | import requests 7 | 8 | import ops 9 | from charms.tls_certificates_interface.v3.tls_certificates import ( 10 | generate_ca, 11 | generate_certificate, 12 | generate_csr, 13 | generate_private_key, 14 | ) 15 | from ops.framework import StoredState 16 | from ops.model import WaitingStatus 17 | 18 | logger = logging.getLogger(__name__) 19 | 20 | 21 | class ApplicationCharm(ops.CharmBase): 22 | """Application charm that connects to database charms.""" 23 | 24 | _state = StoredState() 25 | 26 | def __init__(self, *args): 27 | super().__init__(*args) 28 | 29 | self._state.set_default(cert=None, key=None, lxd_nodes=[]) 30 | self.framework.observe(self.on.start, self._on_start) 31 | self.framework.observe(self.on.client_relation_joined, self._on_client_relation_joined) 32 | self.framework.observe(self.on.client_relation_changed, self._on_client_relation_changed) 33 | 34 | @property 35 | def public_ip(self) -> str: 36 | """Public address of the unit.""" 37 | return self.model.get_binding("juju-info").network.ingress_address.exploded 38 | 39 | @property 40 | def private_ip(self) -> str: 41 | """Private address of the unit.""" 42 | return self.model.get_binding("juju-info").network.bind_address.exploded 43 | 44 | def _on_start(self, _): 45 | self.unit.status = ops.ActiveStatus() 46 | 47 | def _on_client_relation_joined(self, event): 48 | self._state.cert, self._state.key = self._generate_selfsigned_cert( 49 | self.public_ip, self.public_ip, self.private_ip 50 | ) 51 | relation_data = event.relation.data[self.unit] 52 | relation_data["client_certificate"] = json.dumps(self._state.cert.decode("utf-8")) 53 | with open("client.key", "w") as key, open("client.cert", "w") as cert: 54 | cert.write(self._state.cert.decode()) 55 | key.write(self._state.key.decode()) 56 | 57 | def _on_client_relation_changed(self, event): 58 | data = event.relation.data[event.unit] 59 | if not ("public_address" in data or "port" in data): 60 | event.defer() 61 | return 62 | test_url = f"https://{data['public_address']}:{data['port']}/1.0/instances" 63 | resp = requests.get(test_url, verify=False, cert=("client.cert", "client.key")) 64 | resp.raise_for_status() 65 | logger.info("Connected to ams successfully with authentication") 66 | 67 | self.unit.status = ops.ActiveStatus() 68 | 69 | def _generate_selfsigned_cert(self, hostname, public_ip, private_ip) -> tuple[bytes, bytes]: 70 | if not hostname: 71 | raise Exception("A hostname is required") 72 | 73 | if not public_ip: 74 | raise Exception("A public IP is required") 75 | 76 | if not private_ip: 77 | raise Exception("A private IP is required") 78 | 79 | ca_key = generate_private_key(key_size=4096) 80 | ca_cert = generate_ca(ca_key, hostname) 81 | 82 | key = generate_private_key(key_size=4096) 83 | csr = generate_csr( 84 | private_key=key, 85 | subject=hostname, 86 | sans_dns=[public_ip, private_ip, hostname], 87 | sans_ip=[public_ip, private_ip], 88 | ) 89 | cert = generate_certificate(csr=csr, ca=ca_cert, ca_key=ca_key) 90 | return cert, key 91 | 92 | 93 | if __name__ == "__main__": 94 | ops.main(ApplicationCharm) 95 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | parts/ 18 | sdist/ 19 | var/ 20 | wheels/ 21 | share/python-wheels/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | MANIFEST 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .nox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | *.py,cover 48 | .hypothesis/ 49 | .pytest_cache/ 50 | cover/ 51 | 52 | # Translations 53 | *.mo 54 | *.pot 55 | 56 | # Django stuff: 57 | *.log 58 | local_settings.py 59 | db.sqlite3 60 | db.sqlite3-journal 61 | 62 | # Flask stuff: 63 | instance/ 64 | .webassets-cache 65 | 66 | # Scrapy stuff: 67 | .scrapy 68 | 69 | # Sphinx documentation 70 | docs/_build/ 71 | 72 | # PyBuilder 73 | .pybuilder/ 74 | target/ 75 | 76 | # Jupyter Notebook 77 | .ipynb_checkpoints 78 | 79 | # IPython 80 | profile_default/ 81 | ipython_config.py 82 | 83 | # pyenv 84 | # For a library or package, you might want to ignore these files since the code is 85 | # intended to run in multiple environments; otherwise, check them in: 86 | # .python-version 87 | 88 | # pipenv 89 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 90 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 91 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 92 | # install all needed dependencies. 93 | #Pipfile.lock 94 | 95 | # poetry 96 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 97 | # This is especially recommended for binary packages to ensure reproducibility, and is more 98 | # commonly ignored for libraries. 99 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 100 | #poetry.lock 101 | 102 | # pdm 103 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 104 | #pdm.lock 105 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 106 | # in version control. 107 | # https://pdm.fming.dev/#use-with-ide 108 | .pdm.toml 109 | 110 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 111 | __pypackages__/ 112 | 113 | # Celery stuff 114 | celerybeat-schedule 115 | celerybeat.pid 116 | 117 | # SageMath parsed files 118 | *.sage.py 119 | 120 | # Environments 121 | .env 122 | .venv 123 | env/ 124 | venv/ 125 | ENV/ 126 | env.bak/ 127 | venv.bak/ 128 | 129 | # Spyder project settings 130 | .spyderproject 131 | .spyproject 132 | 133 | # Rope project settings 134 | .ropeproject 135 | 136 | # mkdocs documentation 137 | /site 138 | 139 | # mypy 140 | .mypy_cache/ 141 | .dmypy.json 142 | dmypy.json 143 | 144 | # Pyre type checker 145 | .pyre/ 146 | 147 | # pytype static type analyzer 148 | .pytype/ 149 | 150 | # Cython debug symbols 151 | cython_debug/ 152 | 153 | # PyCharm 154 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 155 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 156 | # and can be added to the global gitignore or merged into this file. For a more nuclear 157 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 158 | #.idea/ 159 | *.snap 160 | -------------------------------------------------------------------------------- /.github/workflows/build-and-test.yaml: -------------------------------------------------------------------------------- 1 | name: Build & Test 2 | 3 | on: 4 | workflow_call: 5 | inputs: 6 | snap_risk_level: 7 | type: string 8 | description: | 9 | The track of the snap to test against. Defaults to the default risk 10 | level defined in the charm. 11 | default: "" 12 | required: false 13 | outputs: 14 | artifact-prefix: 15 | description: "The charms built by this workflow" 16 | value: ${{ jobs.build.outputs.artifact-prefix }} 17 | jobs: 18 | lint: 19 | name: Lint 20 | runs-on: [self-hosted, linux, X64, jammy, large] 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 24 | - name: Install dependencies 25 | run: python3 -m pip install tox 26 | - name: Run linters 27 | run: tox -e lint 28 | 29 | unit-test: 30 | name: Unit tests 31 | runs-on: [self-hosted, linux, X64, jammy, large] 32 | steps: 33 | - name: Checkout 34 | uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 35 | - name: Install dependencies 36 | run: python -m pip install tox 37 | - name: Run tests 38 | run: tox -e unit 39 | 40 | build: 41 | name: Build charm 42 | uses: canonical/data-platform-workflows/.github/workflows/build_charm.yaml@v13.0.0 43 | 44 | collect-tests: 45 | name: Collect tests for charms 46 | needs: build 47 | runs-on: ubuntu-latest 48 | steps: 49 | - name: Checkout 50 | uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 51 | - name: Assign charm artifacts to runners 52 | id: charm-to-runner 53 | run: | 54 | # Reverse lookup for artifact base index to its architecture 55 | ./scripts/ci/get-runners.py 56 | outputs: 57 | bases: ${{ steps.charm-to-runner.outputs.bases }} 58 | 59 | integration-test: 60 | name: Integration tests 61 | needs: 62 | - lint 63 | - unit-test 64 | - collect-tests 65 | - build 66 | strategy: 67 | fail-fast: false 68 | max-parallel: 6 69 | matrix: 70 | agent-versions: 71 | - "3.2.2" # renovate: latest juju 3 72 | - "2.9.44" # renovate: latest juju 2 73 | base: ${{ fromJSON(needs.collect-tests.outputs.bases) }} 74 | runs-on: "${{matrix.base.runner_labels}}" 75 | steps: 76 | - name: Checkout 77 | uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3 78 | - name: Download packed charm(s) 79 | uses: actions/download-artifact@v4 80 | id: downloaded-charm 81 | with: 82 | name: ${{ needs.build.outputs.artifact-prefix }}-base-${{ matrix.base.base_index }} 83 | - name: Set channel 84 | run: | 85 | juju_channel=$(echo "${{ matrix.agent-versions }}" | cut -c 1-3) 86 | echo "channel=${juju_channel}/stable" >> "$GITHUB_ENV" 87 | juju_major=$(echo "${{ matrix.agent-versions }}" | cut -c 1) 88 | echo "libjuju=juju${juju_major}" >> "$GITHUB_ENV" 89 | - name: Setup operator environment 90 | uses: charmed-kubernetes/actions-operator@main 91 | with: 92 | provider: lxd 93 | juju-channel: "${{ env.channel }}" 94 | bootstrap-options: "--agent-version ${{ matrix.agent-versions }}" 95 | - name: Run integration tests 96 | run: | 97 | mv ${{ steps.downloaded-charm.outputs.download-path }}/*.charm ams.charm 98 | args="--charm=./ams.charm" 99 | if [ "${{ matrix.base.arch }}" == "arm64" ]; then 100 | args="${args} --constraints arch=arm64" 101 | fi 102 | if [ -n "${{ inputs.snap_risk_level }}" ]; then 103 | args="${args} --snap-risk-level ${{ inputs.snap_risk_level }}" 104 | fi 105 | tox -e integration-${{ env.libjuju }} -- ${args} 106 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | # This file defines charm config options, and populates the Configure tab on Charmhub. 2 | # If your charm does not require configuration options, delete this file entirely. 3 | # 4 | # See https://juju.is/docs/config for guidance. 5 | 6 | options: 7 | snap_risk_level: 8 | type: string 9 | default: "stable" 10 | description: Risk level to use for the snap version 11 | snap_revision: 12 | type: string 13 | default: "" 14 | description: | 15 | Specific revision of the snap to install or pin to. This takes precedence 16 | over the channel if both are set. 17 | port: 18 | type: int 19 | default: 8444 20 | description: Port where AMS binds to 21 | storage_device: 22 | type: string 23 | default: "" 24 | description: Path to storage device to be used on this node (i. e. "/dev/sdb") 25 | storage_pool: 26 | type: string 27 | default: "" 28 | description: | 29 | Name of a LXD storage pool to use instead of creating a custom one. This will 30 | only work when using LXD clusters not managed by AMS via the lxd-integrator charm. 31 | log_level: 32 | type: string 33 | default: "info" 34 | description: Logging level. Allowed values are debug, info, warning, error and critical 35 | prometheus_target_port: 36 | type: int 37 | default: 9104 38 | description: Port where Prometheus target binds to 39 | prometheus_metrics_path: 40 | type: string 41 | default: /internal/1.0/metrics 42 | description: The path where AMS exposes the metrics for Prometheus 43 | prometheus_tls_cert_path: 44 | type: string 45 | default: "" 46 | description: Path to certificate used by prometheus for TLS 47 | prometheus_tls_key_path: 48 | type: string 49 | default: "" 50 | description: Path to key used by prometheus for TLS 51 | prometheus_basic_auth_username: 52 | type: string 53 | default: "" 54 | description: Username used for HTTP basic auth of the prometheus endpoint 55 | prometheus_basic_auth_password: 56 | type: string 57 | default: "" 58 | description: Password used for HTTP basic auth of the prometheus endpoint 59 | prometheus_extra_labels: 60 | type: string 61 | default: "" 62 | description: Comma separated list of extra labels (key=value) to add to every reported metric 63 | port_range: 64 | type: string 65 | default: "10000-11000" 66 | description: Port range to be assigned for container creation 67 | metrics_server: 68 | type: string 69 | default: "" 70 | description: | 71 | The endpoint where all containers will send their metrics to. It might be 72 | overridden by a REST API request to AMS when a container is launched. If no 73 | value is set, AMS will take a reasonable default. 74 | The format of the value is 'influxdb:[username:password@][:]' 75 | config: 76 | type: string 77 | default: "" 78 | description: | 79 | Configuration options for the AMS service. Multiple options are separated by a new 80 | line and the format of each option is `=`. 81 | registry_mode: 82 | default: "" 83 | type: string 84 | description: | 85 | Override the mode the registry is configured in 86 | lxd_project: 87 | default: "" 88 | type: string 89 | description: | 90 | LXD project AMS should use. MUST be set before any LXD node is added to AMS. 91 | Changing it afterwards will cause undefined behavior. 92 | force_tls12: 93 | default: false 94 | type: boolean 95 | description: | 96 | AMS enforces the use TLS version 1.3 since the 1.15.0 release. To allow older clients 97 | not supporting TLS 1.3 to still connect this configuration option allows users to allow 98 | TLS 1.2 again. Please note that this is affecting the security of your installation and 99 | should only applied with great care. 100 | use_network_acl: 101 | default: false 102 | type: boolean 103 | description: | 104 | This configuration option enables AMS to use LXD networking ACLs rather than the node 105 | controller which is used by default to restrict the network access of AMS containers. 106 | NOTE: this is not a runtime configuration item and it should only be set at deployment time. 107 | location: 108 | type: string 109 | default: "" 110 | description: | 111 | Location AMS is available on. If not set the private address will 112 | be used. 113 | use_embedded_etcd: 114 | type: boolean 115 | default: false 116 | description: | 117 | Use an embedded etcd database rather than connecting to an external host one 118 | 119 | -------------------------------------------------------------------------------- /lib/charms/operator_libs_linux/v0/passwd.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Canonical Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """Simple library for managing Linux users and groups. 16 | 17 | The `passwd` module provides convenience methods and abstractions around users and groups on a 18 | Linux system, in order to make adding and managing users and groups easy. 19 | 20 | Example of adding a user named 'test': 21 | 22 | ```python 23 | import passwd 24 | passwd.add_group(name='special_group') 25 | passwd.add_user(username='test', secondary_groups=['sudo']) 26 | 27 | if passwd.user_exists('some_user'): 28 | do_stuff() 29 | ``` 30 | """ 31 | 32 | import grp 33 | import logging 34 | import pwd 35 | from subprocess import STDOUT, check_output 36 | from typing import List, Optional, Union 37 | 38 | logger = logging.getLogger(__name__) 39 | 40 | # The unique Charmhub library identifier, never change it 41 | LIBID = "cf7655b2bf914d67ac963f72b930f6bb" 42 | 43 | # Increment this major API version when introducing breaking changes 44 | LIBAPI = 0 45 | 46 | # Increment this PATCH version before using `charmcraft publish-lib` or reset 47 | # to 0 if you are raising the major API version 48 | LIBPATCH = 4 49 | 50 | 51 | def user_exists(user: Union[str, int]) -> Optional[pwd.struct_passwd]: 52 | """Check if a user exists. 53 | 54 | Args: 55 | user: username or gid of user whose existence to check 56 | 57 | Raises: 58 | TypeError: where neither a string or int is passed as the first argument 59 | """ 60 | try: 61 | if type(user) is int: 62 | return pwd.getpwuid(user) 63 | elif type(user) is str: 64 | return pwd.getpwnam(user) 65 | else: 66 | raise TypeError("specified argument '%r' should be a string or int", user) 67 | except KeyError: 68 | logger.info("specified user '%s' doesn't exist", str(user)) 69 | return None 70 | 71 | 72 | def group_exists(group: Union[str, int]) -> Optional[grp.struct_group]: 73 | """Check if a group exists. 74 | 75 | Args: 76 | group: username or gid of user whose existence to check 77 | 78 | Raises: 79 | TypeError: where neither a string or int is passed as the first argument 80 | """ 81 | try: 82 | if type(group) is int: 83 | return grp.getgrgid(group) 84 | elif type(group) is str: 85 | return grp.getgrnam(group) 86 | else: 87 | raise TypeError("specified argument '%r' should be a string or int", group) 88 | except KeyError: 89 | logger.info("specified group '%s' doesn't exist", str(group)) 90 | return None 91 | 92 | 93 | def add_user( 94 | username: str, 95 | password: Optional[str] = None, 96 | shell: str = "/bin/bash", 97 | system_user: bool = False, 98 | primary_group: str = None, 99 | secondary_groups: List[str] = None, 100 | uid: int = None, 101 | home_dir: str = None, 102 | create_home: bool = True, 103 | ) -> str: 104 | """Add a user to the system. 105 | 106 | Will log but otherwise succeed if the user already exists. 107 | 108 | Arguments: 109 | username: Username to create 110 | password: Password for user; if ``None``, create a system user 111 | shell: The default shell for the user 112 | system_user: Whether to create a login or system user 113 | primary_group: Primary group for user; defaults to username 114 | secondary_groups: Optional list of additional groups 115 | uid: UID for user being created 116 | home_dir: Home directory for user 117 | create_home: Force home directory creation 118 | 119 | Returns: 120 | The password database entry struct, as returned by `pwd.getpwnam` 121 | """ 122 | try: 123 | if uid: 124 | user_info = pwd.getpwuid(int(uid)) 125 | logger.info("user '%d' already exists", uid) 126 | return user_info 127 | user_info = pwd.getpwnam(username) 128 | logger.info("user with uid '%s' already exists", username) 129 | return user_info 130 | except KeyError: 131 | logger.info("creating user '%s'", username) 132 | 133 | cmd = ["useradd", "--shell", shell] 134 | 135 | if uid: 136 | cmd.extend(["--uid", str(uid)]) 137 | if home_dir: 138 | cmd.extend(["--home", str(home_dir)]) 139 | if password: 140 | cmd.extend(["--password", password]) 141 | if create_home: 142 | cmd.append("--create-home") 143 | if system_user or password is None: 144 | cmd.append("--system") 145 | 146 | if not primary_group: 147 | try: 148 | grp.getgrnam(username) 149 | primary_group = username # avoid "group exists" error 150 | except KeyError: 151 | pass 152 | 153 | if primary_group: 154 | cmd.extend(["-g", primary_group]) 155 | if secondary_groups: 156 | cmd.extend(["-G", ",".join(secondary_groups)]) 157 | 158 | cmd.append(username) 159 | check_output(cmd, stderr=STDOUT) 160 | user_info = pwd.getpwnam(username) 161 | return user_info 162 | 163 | 164 | def add_group(group_name: str, system_group: bool = False, gid: int = None): 165 | """Add a group to the system. 166 | 167 | Will log but otherwise succeed if the group already exists. 168 | 169 | Args: 170 | group_name: group to create 171 | system_group: Create system group 172 | gid: GID for user being created 173 | 174 | Returns: 175 | The group's password database entry struct, as returned by `grp.getgrnam` 176 | """ 177 | try: 178 | group_info = grp.getgrnam(group_name) 179 | logger.info("group '%s' already exists", group_name) 180 | if gid: 181 | group_info = grp.getgrgid(gid) 182 | logger.info("group with gid '%d' already exists", gid) 183 | except KeyError: 184 | logger.info("creating group '%s'", group_name) 185 | cmd = ["addgroup"] 186 | if gid: 187 | cmd.extend(["--gid", str(gid)]) 188 | if system_group: 189 | cmd.append("--system") 190 | else: 191 | cmd.extend(["--group"]) 192 | cmd.append(group_name) 193 | check_output(cmd, stderr=STDOUT) 194 | group_info = grp.getgrnam(group_name) 195 | return group_info 196 | 197 | 198 | def add_user_to_group(username: str, group: str): 199 | """Add a user to a group. 200 | 201 | Args: 202 | username: user to add to specified group 203 | group: name of group to add user to 204 | 205 | Returns: 206 | The group's password database entry struct, as returned by `grp.getgrnam` 207 | """ 208 | if not user_exists(username): 209 | raise ValueError("user '{}' does not exist".format(username)) 210 | if not group_exists(group): 211 | raise ValueError("group '{}' does not exist".format(group)) 212 | 213 | logger.info("adding user '%s' to group '%s'", username, group) 214 | check_output(["gpasswd", "-a", username, group], stderr=STDOUT) 215 | return grp.getgrnam(group) 216 | 217 | 218 | def remove_user(user: Union[str, int], remove_home: bool = False) -> bool: 219 | """Remove a user from the system. 220 | 221 | Args: 222 | user: the username or uid of the user to remove 223 | remove_home: indicates whether the user's home directory should be removed 224 | """ 225 | u = user_exists(user) 226 | if not u: 227 | logger.info("user '%s' does not exist", str(u)) 228 | return True 229 | 230 | cmd = ["userdel"] 231 | if remove_home: 232 | cmd.append("-f") 233 | cmd.append(u.pw_name) 234 | 235 | logger.info("removing user '%s'", u.pw_name) 236 | check_output(cmd, stderr=STDOUT) 237 | return True 238 | 239 | 240 | def remove_group(group: Union[str, int], force: bool = False) -> bool: 241 | """Remove a user from the system. 242 | 243 | Args: 244 | group: the name or gid of the group to remove 245 | force: force group removal even if it's the primary group for a user 246 | """ 247 | g = group_exists(group) 248 | if not g: 249 | logger.info("group '%s' does not exist", str(g)) 250 | return True 251 | 252 | cmd = ["groupdel"] 253 | if force: 254 | cmd.append("-f") 255 | cmd.append(g.gr_name) 256 | 257 | logger.info("removing group '%s'", g.gr_name) 258 | check_output(cmd, stderr=STDOUT) 259 | return True 260 | -------------------------------------------------------------------------------- /lib/charms/operator_libs_linux/v1/systemd.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Canonical Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | """Abstractions for stopping, starting and managing system services via systemd. 17 | 18 | This library assumes that your charm is running on a platform that uses systemd. E.g., 19 | Centos 7 or later, Ubuntu Xenial (16.04) or later. 20 | 21 | For the most part, we transparently provide an interface to a commonly used selection of 22 | systemd commands, with a few shortcuts baked in. For example, service_pause and 23 | service_resume with run the mask/unmask and enable/disable invocations. 24 | 25 | Example usage: 26 | 27 | ```python 28 | from charms.operator_libs_linux.v0.systemd import service_running, service_reload 29 | 30 | # Start a service 31 | if not service_running("mysql"): 32 | success = service_start("mysql") 33 | 34 | # Attempt to reload a service, restarting if necessary 35 | success = service_reload("nginx", restart_on_failure=True) 36 | ``` 37 | """ 38 | 39 | __all__ = [ # Don't export `_systemctl`. (It's not the intended way of using this lib.) 40 | "SystemdError", 41 | "daemon_reload", 42 | "service_disable", 43 | "service_enable", 44 | "service_failed", 45 | "service_pause", 46 | "service_reload", 47 | "service_restart", 48 | "service_resume", 49 | "service_running", 50 | "service_start", 51 | "service_stop", 52 | ] 53 | 54 | import logging 55 | import subprocess 56 | 57 | logger = logging.getLogger(__name__) 58 | 59 | # The unique Charmhub library identifier, never change it 60 | LIBID = "045b0d179f6b4514a8bb9b48aee9ebaf" 61 | 62 | # Increment this major API version when introducing breaking changes 63 | LIBAPI = 1 64 | 65 | # Increment this PATCH version before using `charmcraft publish-lib` or reset 66 | # to 0 if you are raising the major API version 67 | LIBPATCH = 4 68 | 69 | 70 | class SystemdError(Exception): 71 | """Custom exception for SystemD related errors.""" 72 | 73 | 74 | def _systemctl(*args: str, check: bool = False) -> int: 75 | """Control a system service using systemctl. 76 | 77 | Args: 78 | *args: Arguments to pass to systemctl. 79 | check: Check the output of the systemctl command. Default: False. 80 | 81 | Returns: 82 | Returncode of systemctl command execution. 83 | 84 | Raises: 85 | SystemdError: Raised if calling systemctl returns a non-zero returncode and check is True. 86 | """ 87 | cmd = ["systemctl", *args] 88 | logger.debug(f"Executing command: {cmd}") 89 | try: 90 | proc = subprocess.run( 91 | cmd, 92 | stdout=subprocess.PIPE, 93 | stderr=subprocess.STDOUT, 94 | text=True, 95 | bufsize=1, 96 | encoding="utf-8", 97 | check=check, 98 | ) 99 | logger.debug( 100 | f"Command {cmd} exit code: {proc.returncode}. systemctl output:\n{proc.stdout}" 101 | ) 102 | return proc.returncode 103 | except subprocess.CalledProcessError as e: 104 | raise SystemdError( 105 | f"Command {cmd} failed with returncode {e.returncode}. systemctl output:\n{e.stdout}" 106 | ) 107 | 108 | 109 | def service_running(service_name: str) -> bool: 110 | """Report whether a system service is running. 111 | 112 | Args: 113 | service_name: The name of the service to check. 114 | 115 | Return: 116 | True if service is running/active; False if not. 117 | """ 118 | # If returncode is 0, this means that is service is active. 119 | return _systemctl("--quiet", "is-active", service_name) == 0 120 | 121 | 122 | def service_failed(service_name: str) -> bool: 123 | """Report whether a system service has failed. 124 | 125 | Args: 126 | service_name: The name of the service to check. 127 | 128 | Returns: 129 | True if service is marked as failed; False if not. 130 | """ 131 | # If returncode is 0, this means that the service has failed. 132 | return _systemctl("--quiet", "is-failed", service_name) == 0 133 | 134 | 135 | def service_start(*args: str) -> bool: 136 | """Start a system service. 137 | 138 | Args: 139 | *args: Arguments to pass to `systemctl start` (normally the service name). 140 | 141 | Returns: 142 | On success, this function returns True for historical reasons. 143 | 144 | Raises: 145 | SystemdError: Raised if `systemctl start ...` returns a non-zero returncode. 146 | """ 147 | return _systemctl("start", *args, check=True) == 0 148 | 149 | 150 | def service_stop(*args: str) -> bool: 151 | """Stop a system service. 152 | 153 | Args: 154 | *args: Arguments to pass to `systemctl stop` (normally the service name). 155 | 156 | Returns: 157 | On success, this function returns True for historical reasons. 158 | 159 | Raises: 160 | SystemdError: Raised if `systemctl stop ...` returns a non-zero returncode. 161 | """ 162 | return _systemctl("stop", *args, check=True) == 0 163 | 164 | 165 | def service_restart(*args: str) -> bool: 166 | """Restart a system service. 167 | 168 | Args: 169 | *args: Arguments to pass to `systemctl restart` (normally the service name). 170 | 171 | Returns: 172 | On success, this function returns True for historical reasons. 173 | 174 | Raises: 175 | SystemdError: Raised if `systemctl restart ...` returns a non-zero returncode. 176 | """ 177 | return _systemctl("restart", *args, check=True) == 0 178 | 179 | 180 | def service_enable(*args: str) -> bool: 181 | """Enable a system service. 182 | 183 | Args: 184 | *args: Arguments to pass to `systemctl enable` (normally the service name). 185 | 186 | Returns: 187 | On success, this function returns True for historical reasons. 188 | 189 | Raises: 190 | SystemdError: Raised if `systemctl enable ...` returns a non-zero returncode. 191 | """ 192 | return _systemctl("enable", *args, check=True) == 0 193 | 194 | 195 | def service_disable(*args: str) -> bool: 196 | """Disable a system service. 197 | 198 | Args: 199 | *args: Arguments to pass to `systemctl disable` (normally the service name). 200 | 201 | Returns: 202 | On success, this function returns True for historical reasons. 203 | 204 | Raises: 205 | SystemdError: Raised if `systemctl disable ...` returns a non-zero returncode. 206 | """ 207 | return _systemctl("disable", *args, check=True) == 0 208 | 209 | 210 | def service_reload(service_name: str, restart_on_failure: bool = False) -> bool: 211 | """Reload a system service, optionally falling back to restart if reload fails. 212 | 213 | Args: 214 | service_name: The name of the service to reload. 215 | restart_on_failure: 216 | Boolean indicating whether to fall back to a restart if the reload fails. 217 | 218 | Returns: 219 | On success, this function returns True for historical reasons. 220 | 221 | Raises: 222 | SystemdError: Raised if `systemctl reload|restart ...` returns a non-zero returncode. 223 | """ 224 | try: 225 | return _systemctl("reload", service_name, check=True) == 0 226 | except SystemdError: 227 | if restart_on_failure: 228 | return service_restart(service_name) 229 | else: 230 | raise 231 | 232 | 233 | def service_pause(service_name: str) -> bool: 234 | """Pause a system service. 235 | 236 | Stops the service and prevents the service from starting again at boot. 237 | 238 | Args: 239 | service_name: The name of the service to pause. 240 | 241 | Returns: 242 | On success, this function returns True for historical reasons. 243 | 244 | Raises: 245 | SystemdError: Raised if service is still running after being paused by systemctl. 246 | """ 247 | _systemctl("disable", "--now", service_name) 248 | _systemctl("mask", service_name) 249 | 250 | if service_running(service_name): 251 | raise SystemdError(f"Attempted to pause {service_name!r}, but it is still running.") 252 | 253 | return True 254 | 255 | 256 | def service_resume(service_name: str) -> bool: 257 | """Resume a system service. 258 | 259 | Re-enable starting the service again at boot. Start the service. 260 | 261 | Args: 262 | service_name: The name of the service to resume. 263 | 264 | Returns: 265 | On success, this function returns True for historical reasons. 266 | 267 | Raises: 268 | SystemdError: Raised if service is not running after being resumed by systemctl. 269 | """ 270 | _systemctl("unmask", service_name) 271 | _systemctl("enable", "--now", service_name) 272 | 273 | if not service_running(service_name): 274 | raise SystemdError(f"Attempted to resume {service_name!r}, but it is not running.") 275 | 276 | return True 277 | 278 | 279 | def daemon_reload() -> bool: 280 | """Reload systemd manager configuration. 281 | 282 | Returns: 283 | On success, this function returns True for historical reasons. 284 | 285 | Raises: 286 | SystemdError: Raised if `systemctl daemon-reload` returns a non-zero returncode. 287 | """ 288 | return _systemctl("daemon-reload", check=True) == 0 289 | -------------------------------------------------------------------------------- /src/charm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Operator Charm for AMS.""" 3 | # -*- coding: utf-8 -*- 4 | # 5 | # Copyright 2024 Canonical Ltd. 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | 19 | from __future__ import annotations 20 | 21 | import ast 22 | import json 23 | import logging 24 | from typing import Dict, List 25 | 26 | from ams import AMS, SNAP_DEFAULT_RISK, BackendConfig, ETCDConfig, PrometheusConfig, ServiceConfig 27 | from charms.grafana_agent.v0.cos_agent import COSAgentProvider 28 | from charms.tls_certificates_interface.v3.tls_certificates import ( 29 | generate_ca, 30 | generate_certificate, 31 | generate_csr, 32 | generate_private_key, 33 | ) 34 | from interfaces.etcd import ETCDEndpointConsumer 35 | from ops.charm import ( 36 | CharmBase, 37 | ConfigChangedEvent, 38 | InstallEvent, 39 | RelationDepartedEvent, 40 | RelationJoinedEvent, 41 | StopEvent, 42 | UpgradeCharmEvent, 43 | ) 44 | from ops.framework import StoredState 45 | from ops.main import main 46 | from ops.model import ActiveStatus, BlockedStatus, WaitingStatus 47 | 48 | # Log messages can be retrieved using juju debug-log 49 | logger = logging.getLogger(__name__) 50 | 51 | 52 | def _is_pro_attached(): 53 | return True 54 | 55 | 56 | with open("version", "r") as f: 57 | CHARM_VERSION = f.read().strip("\n") 58 | 59 | 60 | class AmsOperatorCharm(CharmBase): 61 | """Charm the service.""" 62 | 63 | _state = StoredState() 64 | 65 | def __init__(self, *args): 66 | super().__init__(*args) 67 | self.ams = AMS(self) 68 | self._state.set_default(registered_clients=set()) 69 | self.etcd = ETCDEndpointConsumer(self, "etcd") 70 | self.framework.observe(self.on.install, self._on_install) 71 | self.framework.observe(self.on.upgrade_charm, self._on_upgrade) 72 | self.framework.observe(self.on.config_changed, self._on_config_changed) 73 | self.framework.observe(self.on.stop, self._on_stop) 74 | self.framework.observe(self.etcd.on.available, self._on_etcd_available) 75 | self.metrics_cfg = PrometheusConfig( 76 | target_ip=self.private_ip, 77 | target_port=int(self.config["prometheus_target_port"]), 78 | tls_cert_path=self.config["prometheus_tls_cert_path"], 79 | tls_key_path=self.config["prometheus_tls_key_path"], 80 | basic_auth_username=self.config["prometheus_basic_auth_username"], 81 | basic_auth_password=self.config["prometheus_basic_auth_password"], 82 | extra_labels=self.config["prometheus_extra_labels"], 83 | metrics_path=self.config["prometheus_metrics_path"], 84 | ) 85 | self._cos = COSAgentProvider( 86 | self, 87 | relation_name="cos-agent", 88 | refresh_events=[self.on.update_status, self.on.upgrade_charm, self.on.config_changed], 89 | scrape_configs=self.generate_scrape_config, 90 | ) 91 | self.framework.observe( 92 | self.on["lxd-cluster"].relation_joined, self._on_lxd_integrator_joined 93 | ) 94 | self.framework.observe(self.on["rest-api"].relation_joined, self._on_rest_api_joined) 95 | self.framework.observe(self.on["rest-api"].relation_departed, self._on_rest_api_departed) 96 | 97 | @property 98 | def public_ip(self) -> str: 99 | """Public address of the unit.""" 100 | return self.model.get_binding("juju-info").network.ingress_address.exploded 101 | 102 | @property 103 | def private_ip(self) -> str: 104 | """Private address of the unit.""" 105 | return self.model.get_binding("juju-info").network.bind_address.exploded 106 | 107 | def generate_scrape_config(self) -> List[Dict]: 108 | """Generate dynamic configs for sending metrics to prometheus.""" 109 | if not (self.metrics_cfg and self.metrics_cfg.enabled): 110 | return [] 111 | logger.debug("Generated prometheus config: %s", self.metrics_cfg.scrape_jobs) 112 | return self.metrics_cfg.scrape_jobs 113 | 114 | def _on_install(self, event: InstallEvent): 115 | if not _is_pro_attached(): 116 | self.unit.status = BlockedStatus("Waiting for Ubuntu Pro attachment") 117 | return 118 | snap_risk_level = self.config.get("snap_risk_level", SNAP_DEFAULT_RISK) 119 | revision = self.config.get("snap_revision", "") 120 | self.ams.install(channel=f"{CHARM_VERSION}/{snap_risk_level}", revision=revision) 121 | self.unit.set_workload_version(self.ams.version) 122 | 123 | def _on_upgrade(self, _: UpgradeCharmEvent): 124 | snap_risk_level = self.config.get("snap_risk_level", SNAP_DEFAULT_RISK) 125 | revision = self.config.get("snap_revision", "") 126 | self.ams.install(f"{CHARM_VERSION}/{snap_risk_level}", revision=revision) 127 | self.unit.set_workload_version(self.ams.version) 128 | 129 | def _on_stop(self, _: StopEvent): 130 | self.ams.remove() 131 | 132 | def _on_config_changed(self, event: ConfigChangedEvent): 133 | self.unit.status = WaitingStatus("Configuring AMS") 134 | etcd_cfg = ETCDConfig( 135 | use_embedded=self.config["use_embedded_etcd"], 136 | ) 137 | if not etcd_cfg.is_ready: 138 | if not self.etcd.is_available: 139 | self.unit.status = BlockedStatus("Waiting for etcd") 140 | return 141 | servers = self.etcd.get_config().get("connection_string", "").split(",") 142 | logger.info(f"Received servers {servers}") 143 | if not servers: 144 | self.unit.status = BlockedStatus("Waiting for etcd") 145 | return 146 | etcd_cfg.servers = servers 147 | backend_cfg = BackendConfig( 148 | port_range=self.config["port_range"], 149 | lxd_project=self.config["lxd_project"], 150 | force_tls12=self.config["force_tls12"], 151 | use_network_acl=self.config["use_network_acl"], 152 | ) 153 | if self.config["metrics_server"]: 154 | backend_cfg.metrics_server = f"influxdb:{self.config['metrics_server']}" 155 | 156 | cfg = ServiceConfig( 157 | ip=self.private_ip, 158 | port=int(self.config["port"]), 159 | log_level=self.config["log_level"], 160 | metrics=self.metrics_cfg, 161 | backend=backend_cfg, 162 | store=etcd_cfg, 163 | ) 164 | self.ams.configure(cfg) 165 | if self.config["location"]: 166 | self.ams.set_location(self.config["location"], self.config["port"]) 167 | if self.config["config"]: 168 | self.ams.apply_service_configuration(self.config["config"].split("\n")) 169 | self.unit.set_ports(int(self.config["port"])) 170 | self.unit.status = ActiveStatus() 171 | 172 | def _on_etcd_available(self, _): 173 | cfg = self.etcd.get_config() 174 | self.ams.setup_etcd(ca=cfg["ca"], cert=cfg["cert"], key=cfg["key"]) 175 | self.on.config_changed.emit() 176 | 177 | def _on_lxd_integrator_joined(self, event: RelationJoinedEvent): 178 | cert, key = AmsOperatorCharm._generate_selfsigned_cert( 179 | self.public_ip, self.public_ip, self.private_ip 180 | ) 181 | self.ams.setup_lxd(cert=cert, key=key) 182 | relation_data = event.relation.data[self.unit] 183 | relation_data["client_certificates"] = json.dumps([cert.decode("utf-8")]) 184 | 185 | def _on_rest_api_joined(self, event: RelationJoinedEvent): 186 | remote_data = event.relation.data.get(event.unit) 187 | if not remote_data: 188 | event.defer() 189 | return 190 | client_cert = remote_data.get("client_certificate") 191 | if not client_cert: 192 | event.defer() 193 | logger.error("No client certificate found") 194 | return 195 | if not self.ams.is_running: 196 | event.defer() 197 | return 198 | fingerprint = self.ams.register_client(ast.literal_eval(client_cert)) 199 | if fingerprint: 200 | self._state.registered_clients.add(f"{event.unit.name}:{fingerprint}") 201 | logger.info("Client registration with AMS complete") 202 | data = { 203 | "port": str(self.config["port"]), 204 | "private_address": self.private_ip, 205 | "public_address": self.public_ip, 206 | "node": self.unit.name.replace("/", ""), 207 | } 208 | location = self.ams.get_config_item("load_balancer.url") 209 | if location: 210 | data["private_address"] = location 211 | event.relation.data[self.unit].update(data) 212 | 213 | def _on_rest_api_departed(self, event: RelationDepartedEvent): 214 | fp = None 215 | for client in self._state.registered_clients: 216 | client = client.split(":") 217 | if event.unit.name == client[0]: 218 | fp = client[1] 219 | break 220 | if not fp: 221 | logger.warning(f"No client found for {event.unit} to unregister") 222 | return 223 | self.ams.unregister_client(fp) 224 | 225 | @staticmethod 226 | def _generate_selfsigned_cert( 227 | hostname, public_ip, private_ip 228 | ) -> Tuple[bytes, bytes]: # noqa: F821 229 | if not hostname: 230 | raise Exception("A hostname is required") 231 | 232 | if not public_ip: 233 | raise Exception("A public IP is required") 234 | 235 | if not private_ip: 236 | raise Exception("A private IP is required") 237 | 238 | ca_key = generate_private_key(key_size=4096) 239 | ca_cert = generate_ca(ca_key, hostname) 240 | 241 | key = generate_private_key(key_size=4096) 242 | csr = generate_csr( 243 | private_key=key, 244 | subject=hostname, 245 | sans_dns=[public_ip, private_ip, hostname], 246 | sans_ip=[public_ip, private_ip], 247 | ) 248 | cert = generate_certificate(csr=csr, ca=ca_cert, ca_key=ca_key) 249 | return cert, key 250 | 251 | 252 | if __name__ == "__main__": # pragma: nocover 253 | main(AmsOperatorCharm) 254 | -------------------------------------------------------------------------------- /src/ams.py: -------------------------------------------------------------------------------- 1 | """Module to configure AMS for charms.""" 2 | # -*- coding: utf-8 -*- 3 | # 4 | # Copyright 2024 Canonical Ltd. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | import json 19 | import logging 20 | import shutil 21 | import subprocess 22 | import tempfile 23 | from dataclasses import asdict, dataclass, field 24 | from pathlib import Path 25 | from typing import Dict, List, Optional 26 | 27 | import ops 28 | import yaml 29 | from charms.operator_libs_linux.v0 import passwd 30 | from charms.operator_libs_linux.v1 import systemd 31 | from charms.operator_libs_linux.v2 import snap 32 | from jinja2 import Environment, FileSystemLoader 33 | 34 | SNAP_NAME = "ams" 35 | SNAP_COMMON_PATH = Path(f"/var/snap/{SNAP_NAME}/common") 36 | SNAP_DEFAULT_RISK = "stable" 37 | 38 | ETCD_BASE_PATH = SNAP_COMMON_PATH / "etcd" 39 | ETCD_CA_PATH = ETCD_BASE_PATH / "client-ca.pem" 40 | ETCD_CERT_PATH = ETCD_BASE_PATH / "client-cert.pem" 41 | ETCD_KEY_PATH = ETCD_BASE_PATH / "client-key.pem" 42 | 43 | AMS_CONFIG_PATH = SNAP_COMMON_PATH / "server/settings.yaml" 44 | 45 | LXD_CLIENT_CONFIG_FOLDER = SNAP_COMMON_PATH / "lxd" 46 | LXD_CLIENT_CERT_PATH = LXD_CLIENT_CONFIG_FOLDER / "client.crt" 47 | LXD_CLIENT_KEY_PATH = LXD_CLIENT_CONFIG_FOLDER / "client.key" 48 | 49 | SERVICE = "snap.ams.ams.service" 50 | SERVICE_DROP_IN_PATH = Path(f"/etc/systemd/system/{SERVICE}.d/10-ams-unix-socket-chown.conf") 51 | GROUP_NAME = "ams" 52 | 53 | logger = logging.getLogger(__name__) 54 | 55 | 56 | @dataclass 57 | class ETCDConfig: 58 | """Etcd configuration for AMS.""" 59 | 60 | use_embedded: bool 61 | ca: Path = ETCD_CA_PATH 62 | cert: Path = ETCD_CERT_PATH 63 | key: Path = ETCD_KEY_PATH 64 | servers: List[str] = field(default_factory=list) 65 | 66 | @property 67 | def is_ready(self) -> bool: 68 | """Check if etcd is ready or not.""" 69 | return self.use_embedded or ( 70 | self.ca.exists() and self.cert.exists() and self.key.exists() and bool(self.servers) 71 | ) 72 | 73 | 74 | @dataclass 75 | class PrometheusConfig: 76 | """Metrics configuration for AMS.""" 77 | 78 | target_ip: str 79 | target_port: int 80 | tls_cert_path: str 81 | tls_key_path: str 82 | basic_auth_username: str 83 | basic_auth_password: str 84 | metrics_path: str 85 | extra_labels: Optional[Dict[str, str]] = field(default_factory=dict) 86 | enabled: bool = False 87 | 88 | def __post_init__(self): 89 | """Post initialization validations.""" 90 | if self.target_port > 0: 91 | self.enabled = True 92 | 93 | @property 94 | def scrape_jobs(self) -> List[Dict]: 95 | """Generate scrape jobs for prometheus.""" 96 | job = { 97 | "job_name": "metrics", 98 | "metrics_path": self.metrics_path, 99 | "static_configs": [{"targets": [f"{self.target_ip}:{self.target_port}"]}], 100 | } 101 | if self.basic_auth_username and self.basic_auth_password: 102 | auth = {"username": self.basic_auth_username, "password": self.basic_auth_password} 103 | job.update(basic_auth=auth) 104 | return [job] 105 | 106 | 107 | @dataclass 108 | class BackendConfig: 109 | """Backend configuration for AMS.""" 110 | 111 | port_range: str 112 | force_tls12: str 113 | use_network_acl: str 114 | lxd_project: str 115 | metrics_server: Optional[str] = "" 116 | 117 | 118 | @dataclass 119 | class ServiceConfig: 120 | """Service level configuration for AMS.""" 121 | 122 | log_level: str 123 | ip: str 124 | port: int 125 | store: ETCDConfig 126 | backend: BackendConfig 127 | metrics: PrometheusConfig 128 | 129 | 130 | class AMS: 131 | """Class for handling AMS configurations.""" 132 | 133 | def __init__(self, charm: ops.CharmBase): 134 | self._sc = snap.SnapCache() 135 | self._charm = charm 136 | 137 | @property 138 | def snap(self): 139 | """Return AMS snap.""" 140 | return self._sc[SNAP_NAME] 141 | 142 | def restart(self): 143 | """Restart AMS Snap.""" 144 | self.snap.restart() 145 | 146 | def remove(self): 147 | """Remove AMS users, drop-in service and the snap.""" 148 | snap.remove(SNAP_NAME) 149 | shutil.rmtree(SERVICE_DROP_IN_PATH.parent) 150 | passwd.remove_group(GROUP_NAME) 151 | 152 | def install(self, channel: str, revision: Optional[str] = None): 153 | """Install AMS including its Snap.""" 154 | try: 155 | kwargs = {} 156 | if revision: 157 | kwargs = {"revision": int(revision)} 158 | else: 159 | kwargs = {"channel": channel} 160 | self.snap.ensure(state=snap.SnapState.Latest, **kwargs) 161 | self.snap.hold() 162 | except snap.SnapError as e: 163 | logger.error("could not install ams. Reason: %s", e.message) 164 | logger.debug(e, exc_info=True) 165 | raise e 166 | 167 | # refresh snap cache after installation 168 | self._sc._load_installed_snaps() 169 | self.snap.connect(plug="daemon-notify", slot="core:daemon-notify") 170 | self.snap.alias("amc", "amc") 171 | 172 | passwd.add_group(GROUP_NAME) 173 | passwd.add_user_to_group("ubuntu", GROUP_NAME) 174 | self._create_systemd_drop_in() 175 | 176 | def setup_lxd(self, key: bytes, cert: bytes): 177 | """Create certificates for Etcd.""" 178 | LXD_CLIENT_CONFIG_FOLDER.mkdir(exist_ok=True, parents=True) 179 | LXD_CLIENT_CERT_PATH.write_bytes(cert) 180 | LXD_CLIENT_KEY_PATH.write_bytes(key) 181 | 182 | def setup_etcd(self, ca: str, key: str, cert: str): 183 | """Create certificates for Etcd.""" 184 | ETCD_BASE_PATH.mkdir(exist_ok=True, parents=True) 185 | ETCD_CA_PATH.write_text(ca) 186 | ETCD_CERT_PATH.write_text(cert) 187 | ETCD_KEY_PATH.write_text(key) 188 | 189 | def _create_systemd_drop_in(self): 190 | tenv = Environment(loader=FileSystemLoader("templates")) 191 | template = tenv.get_template("10-ams-unix-socket-chown.conf.j2") 192 | rendered_content = template.render( 193 | { 194 | "group": GROUP_NAME, 195 | } 196 | ) 197 | SERVICE_DROP_IN_PATH.parent.mkdir(parents=True, exist_ok=True) 198 | SERVICE_DROP_IN_PATH.write_text(rendered_content) 199 | systemd.daemon_reload() 200 | 201 | @property 202 | def version(self) -> str: 203 | """Return AMS version.""" 204 | snap_info = self.snap._snap_client.get_snap_information(SNAP_NAME) 205 | return snap_info["channels"][self.snap.channel]["version"] 206 | 207 | def configure( 208 | self, 209 | config: ServiceConfig, 210 | ): 211 | """Configure AMS snap.""" 212 | tenv = Environment(loader=FileSystemLoader("templates")) 213 | template = tenv.get_template("settings.yaml.j2") 214 | content = asdict(config) 215 | rendered_content = template.render(content) 216 | AMS_CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True) 217 | AMS_CONFIG_PATH.write_text(rendered_content) 218 | logger.debug("Configuration written for ams: %s", rendered_content) 219 | 220 | self.snap.start(enable=True) 221 | 222 | @property 223 | def is_running(self): 224 | """Check if the service is running.""" 225 | return systemd.service_running(SERVICE) 226 | 227 | def set_location(self, location, port): 228 | """Set location configuration item for AMS.""" 229 | curr_config = self.get_config_item("load_balancer.url") 230 | url = f"https://{location}:{port}" 231 | if curr_config == url: 232 | return 233 | self._set_config_item("load_balancer.url", url) 234 | 235 | def get_config_item(self, item: str) -> str: 236 | """Get service configuration item from AMS.""" 237 | return self._get_config().get(item, "") 238 | 239 | def _get_config(self) -> dict: 240 | output = subprocess.run( 241 | ["/snap/bin/amc", "config", "show"], capture_output=True, check=True 242 | ) 243 | return yaml.safe_load(output.stdout).get("config", {}) 244 | 245 | def _set_config_item(self, name, value): 246 | subprocess.run(["/snap/bin/amc", "config", "set", name, value], check=True) 247 | logger.debug("Set ams configuration item: %s", name) 248 | 249 | def get_registered_certificates(self) -> List[Dict[str, str]]: 250 | """Get registered client with AMS.""" 251 | result = subprocess.run( 252 | ["/snap/bin/amc", "config", "trust", "ls", "--format", "json"], capture_output=True 253 | ) 254 | return json.loads(result.stdout.decode()) 255 | 256 | def register_client(self, cert: str) -> str: 257 | """Register a new client with AMS and return its fingerprint.""" 258 | current_certs = self.get_registered_certificates() 259 | current_fp = set() 260 | for crt in current_certs: 261 | current_fp.add(crt["fingerprint"]) 262 | with tempfile.NamedTemporaryFile(delete=False, dir=SNAP_COMMON_PATH, suffix=".crt") as f: 263 | f.write(cert.encode()) 264 | f.close() 265 | result = subprocess.run( 266 | ["/snap/bin/amc", "config", "trust", "add", f.name], 267 | stdout=subprocess.PIPE, 268 | stderr=subprocess.STDOUT, 269 | ) 270 | if "already exists" in result.stdout.decode(): 271 | logger.info("Skipped registration for client. Certificate already registered") 272 | return "" 273 | else: 274 | result.check_returncode() 275 | logger.debug("Registered new ams client via amc") 276 | updated_certs = self.get_registered_certificates() 277 | updated_fp = set() 278 | for crt in updated_certs: 279 | updated_fp.add(crt["fingerprint"]) 280 | new_fp = updated_fp - current_fp 281 | if not new_fp: 282 | raise Exception("Failed to register certificate") 283 | return new_fp.pop() 284 | 285 | def unregister_client(self, fingerprint: str): 286 | """Remove client from AMS.""" 287 | subprocess.run(["/snap/bin/amc", "config", "trust", "remove", fingerprint], check=True) 288 | logger.info("Client unregistered successfully. Certificate removed") 289 | 290 | def apply_service_configuration(self, config_items: List[str]): 291 | """Set configuration items in ams using `amc config set`.""" 292 | for item in config_items: 293 | name, value = item.split("=") 294 | self._set_config_item(name, value) 295 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /lib/charms/grafana_agent/v0/cos_agent.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Canonical Ltd. 2 | # See LICENSE file for licensing details. 3 | 4 | r"""## Overview. 5 | 6 | This library can be used to manage the cos_agent relation interface: 7 | 8 | - `COSAgentProvider`: Use in machine charms that need to have a workload's metrics 9 | or logs scraped, or forward rule files or dashboards to Prometheus, Loki or Grafana through 10 | the Grafana Agent machine charm. 11 | 12 | - `COSAgentConsumer`: Used in the Grafana Agent machine charm to manage the requirer side of 13 | the `cos_agent` interface. 14 | 15 | 16 | ## COSAgentProvider Library Usage 17 | 18 | Grafana Agent machine Charmed Operator interacts with its clients using the cos_agent library. 19 | Charms seeking to send telemetry, must do so using the `COSAgentProvider` object from 20 | this charm library. 21 | 22 | Using the `COSAgentProvider` object only requires instantiating it, 23 | typically in the `__init__` method of your charm (the one which sends telemetry). 24 | 25 | The constructor of `COSAgentProvider` has only one required and nine optional parameters: 26 | 27 | ```python 28 | def __init__( 29 | self, 30 | charm: CharmType, 31 | relation_name: str = DEFAULT_RELATION_NAME, 32 | metrics_endpoints: Optional[List[_MetricsEndpointDict]] = None, 33 | metrics_rules_dir: str = "./src/prometheus_alert_rules", 34 | logs_rules_dir: str = "./src/loki_alert_rules", 35 | recurse_rules_dirs: bool = False, 36 | log_slots: Optional[List[str]] = None, 37 | dashboard_dirs: Optional[List[str]] = None, 38 | refresh_events: Optional[List] = None, 39 | scrape_configs: Optional[Union[List[Dict], Callable]] = None, 40 | ): 41 | ``` 42 | 43 | ### Parameters 44 | 45 | - `charm`: The instance of the charm that instantiates `COSAgentProvider`, typically `self`. 46 | 47 | - `relation_name`: If your charmed operator uses a relation name other than `cos-agent` to use 48 | the `cos_agent` interface, this is where you have to specify that. 49 | 50 | - `metrics_endpoints`: In this parameter you can specify the metrics endpoints that Grafana Agent 51 | machine Charmed Operator will scrape. The configs of this list will be merged with the configs 52 | from `scrape_configs`. 53 | 54 | - `metrics_rules_dir`: The directory in which the Charmed Operator stores its metrics alert rules 55 | files. 56 | 57 | - `logs_rules_dir`: The directory in which the Charmed Operator stores its logs alert rules files. 58 | 59 | - `recurse_rules_dirs`: This parameters set whether Grafana Agent machine Charmed Operator has to 60 | search alert rules files recursively in the previous two directories or not. 61 | 62 | - `log_slots`: Snap slots to connect to for scraping logs in the form ["snap-name:slot", ...]. 63 | 64 | - `dashboard_dirs`: List of directories where the dashboards are stored in the Charmed Operator. 65 | 66 | - `refresh_events`: List of events on which to refresh relation data. 67 | 68 | - `scrape_configs`: List of standard scrape_configs dicts or a callable that returns the list in 69 | case the configs need to be generated dynamically. The contents of this list will be merged 70 | with the configs from `metrics_endpoints`. 71 | 72 | 73 | ### Example 1 - Minimal instrumentation: 74 | 75 | In order to use this object the following should be in the `charm.py` file. 76 | 77 | ```python 78 | from charms.grafana_agent.v0.cos_agent import COSAgentProvider 79 | ... 80 | class TelemetryProviderCharm(CharmBase): 81 | def __init__(self, *args): 82 | ... 83 | self._grafana_agent = COSAgentProvider(self) 84 | ``` 85 | 86 | ### Example 2 - Full instrumentation: 87 | 88 | In order to use this object the following should be in the `charm.py` file. 89 | 90 | ```python 91 | from charms.grafana_agent.v0.cos_agent import COSAgentProvider 92 | ... 93 | class TelemetryProviderCharm(CharmBase): 94 | def __init__(self, *args): 95 | ... 96 | self._grafana_agent = COSAgentProvider( 97 | self, 98 | relation_name="custom-cos-agent", 99 | metrics_endpoints=[ 100 | # specify "path" and "port" to scrape from localhost 101 | {"path": "/metrics", "port": 9000}, 102 | {"path": "/metrics", "port": 9001}, 103 | {"path": "/metrics", "port": 9002}, 104 | ], 105 | metrics_rules_dir="./src/alert_rules/prometheus", 106 | logs_rules_dir="./src/alert_rules/loki", 107 | recursive_rules_dir=True, 108 | log_slots=["my-app:slot"], 109 | dashboard_dirs=["./src/dashboards_1", "./src/dashboards_2"], 110 | refresh_events=["update-status", "upgrade-charm"], 111 | scrape_configs=[ 112 | { 113 | "job_name": "custom_job", 114 | "metrics_path": "/metrics", 115 | "authorization": {"credentials": "bearer-token"}, 116 | "static_configs": [ 117 | { 118 | "targets": ["localhost:9003"]}, 119 | "labels": {"key": "value"}, 120 | }, 121 | ], 122 | }, 123 | ] 124 | ) 125 | ``` 126 | 127 | ### Example 3 - Dynamic scrape configs generation: 128 | 129 | Pass a function to the `scrape_configs` to decouple the generation of the configs 130 | from the instantiation of the COSAgentProvider object. 131 | 132 | ```python 133 | from charms.grafana_agent.v0.cos_agent import COSAgentProvider 134 | ... 135 | 136 | class TelemetryProviderCharm(CharmBase): 137 | def generate_scrape_configs(self): 138 | return [ 139 | { 140 | "job_name": "custom", 141 | "metrics_path": "/metrics", 142 | "static_configs": [{"targets": ["localhost:9000"]}], 143 | }, 144 | ] 145 | 146 | def __init__(self, *args): 147 | ... 148 | self._grafana_agent = COSAgentProvider( 149 | self, 150 | scrape_configs=self.generate_scrape_configs, 151 | ) 152 | ``` 153 | 154 | ## COSAgentConsumer Library Usage 155 | 156 | This object may be used by any Charmed Operator which gathers telemetry data by 157 | implementing the consumer side of the `cos_agent` interface. 158 | For instance Grafana Agent machine Charmed Operator. 159 | 160 | For this purpose the charm needs to instantiate the `COSAgentConsumer` object with one mandatory 161 | and two optional arguments. 162 | 163 | ### Parameters 164 | 165 | - `charm`: A reference to the parent (Grafana Agent machine) charm. 166 | 167 | - `relation_name`: The name of the relation that the charm uses to interact 168 | with its clients that provides telemetry data using the `COSAgentProvider` object. 169 | 170 | If provided, this relation name must match a provided relation in metadata.yaml with the 171 | `cos_agent` interface. 172 | The default value of this argument is "cos-agent". 173 | 174 | - `refresh_events`: List of events on which to refresh relation data. 175 | 176 | 177 | ### Example 1 - Minimal instrumentation: 178 | 179 | In order to use this object the following should be in the `charm.py` file. 180 | 181 | ```python 182 | from charms.grafana_agent.v0.cos_agent import COSAgentConsumer 183 | ... 184 | class GrafanaAgentMachineCharm(GrafanaAgentCharm) 185 | def __init__(self, *args): 186 | ... 187 | self._cos = COSAgentRequirer(self) 188 | ``` 189 | 190 | 191 | ### Example 2 - Full instrumentation: 192 | 193 | In order to use this object the following should be in the `charm.py` file. 194 | 195 | ```python 196 | from charms.grafana_agent.v0.cos_agent import COSAgentConsumer 197 | ... 198 | class GrafanaAgentMachineCharm(GrafanaAgentCharm) 199 | def __init__(self, *args): 200 | ... 201 | self._cos = COSAgentRequirer( 202 | self, 203 | relation_name="cos-agent-consumer", 204 | refresh_events=["update-status", "upgrade-charm"], 205 | ) 206 | ``` 207 | """ 208 | 209 | import json 210 | import logging 211 | from collections import namedtuple 212 | from itertools import chain 213 | from pathlib import Path 214 | from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, List, Optional, Set, Tuple, Union 215 | 216 | import pydantic 217 | from cosl import GrafanaDashboard, JujuTopology 218 | from cosl.rules import AlertRules 219 | from ops.charm import RelationChangedEvent 220 | from ops.framework import EventBase, EventSource, Object, ObjectEvents 221 | from ops.model import Relation 222 | from ops.testing import CharmType 223 | 224 | if TYPE_CHECKING: 225 | try: 226 | from typing import TypedDict 227 | 228 | class _MetricsEndpointDict(TypedDict): 229 | path: str 230 | port: int 231 | 232 | except ModuleNotFoundError: 233 | _MetricsEndpointDict = Dict # pyright: ignore 234 | 235 | LIBID = "dc15fa84cef84ce58155fb84f6c6213a" 236 | LIBAPI = 0 237 | LIBPATCH = 8 238 | 239 | PYDEPS = ["cosl", "pydantic < 2"] 240 | 241 | DEFAULT_RELATION_NAME = "cos-agent" 242 | DEFAULT_PEER_RELATION_NAME = "peers" 243 | DEFAULT_SCRAPE_CONFIG = { 244 | "static_configs": [{"targets": ["localhost:80"]}], 245 | "metrics_path": "/metrics", 246 | } 247 | 248 | logger = logging.getLogger(__name__) 249 | SnapEndpoint = namedtuple("SnapEndpoint", "owner, name") 250 | 251 | 252 | class CosAgentProviderUnitData(pydantic.BaseModel): 253 | """Unit databag model for `cos-agent` relation.""" 254 | 255 | # The following entries are the same for all units of the same principal. 256 | # Note that the same grafana agent subordinate may be related to several apps. 257 | # this needs to make its way to the gagent leader 258 | metrics_alert_rules: dict 259 | log_alert_rules: dict 260 | dashboards: List[GrafanaDashboard] 261 | # subordinate is no longer used but we should keep it until we bump the library to ensure 262 | # we don't break compatibility. 263 | subordinate: Optional[bool] = None 264 | 265 | # The following entries may vary across units of the same principal app. 266 | # this data does not need to be forwarded to the gagent leader 267 | metrics_scrape_jobs: List[Dict] 268 | log_slots: List[str] 269 | 270 | # when this whole datastructure is dumped into a databag, it will be nested under this key. 271 | # while not strictly necessary (we could have it 'flattened out' into the databag), 272 | # this simplifies working with the model. 273 | KEY: ClassVar[str] = "config" 274 | 275 | 276 | class CosAgentPeersUnitData(pydantic.BaseModel): 277 | """Unit databag model for `peers` cos-agent machine charm peer relation.""" 278 | 279 | # We need the principal unit name and relation metadata to be able to render identifiers 280 | # (e.g. topology) on the leader side, after all the data moves into peer data (the grafana 281 | # agent leader can only see its own principal, because it is a subordinate charm). 282 | unit_name: str 283 | relation_id: str 284 | relation_name: str 285 | 286 | # The only data that is forwarded to the leader is data that needs to go into the app databags 287 | # of the outgoing o11y relations. 288 | metrics_alert_rules: Optional[dict] 289 | log_alert_rules: Optional[dict] 290 | dashboards: Optional[List[GrafanaDashboard]] 291 | 292 | # when this whole datastructure is dumped into a databag, it will be nested under this key. 293 | # while not strictly necessary (we could have it 'flattened out' into the databag), 294 | # this simplifies working with the model. 295 | KEY: ClassVar[str] = "config" 296 | 297 | @property 298 | def app_name(self) -> str: 299 | """Parse out the app name from the unit name. 300 | 301 | TODO: Switch to using `model_post_init` when pydantic v2 is released? 302 | https://github.com/pydantic/pydantic/issues/1729#issuecomment-1300576214 303 | """ 304 | return self.unit_name.split("/")[0] 305 | 306 | 307 | class COSAgentProvider(Object): 308 | """Integration endpoint wrapper for the provider side of the cos_agent interface.""" 309 | 310 | def __init__( 311 | self, 312 | charm: CharmType, 313 | relation_name: str = DEFAULT_RELATION_NAME, 314 | metrics_endpoints: Optional[List["_MetricsEndpointDict"]] = None, 315 | metrics_rules_dir: str = "./src/prometheus_alert_rules", 316 | logs_rules_dir: str = "./src/loki_alert_rules", 317 | recurse_rules_dirs: bool = False, 318 | log_slots: Optional[List[str]] = None, 319 | dashboard_dirs: Optional[List[str]] = None, 320 | refresh_events: Optional[List] = None, 321 | *, 322 | scrape_configs: Optional[Union[List[dict], Callable]] = None, 323 | ): 324 | """Create a COSAgentProvider instance. 325 | 326 | Args: 327 | charm: The `CharmBase` instance that is instantiating this object. 328 | relation_name: The name of the relation to communicate over. 329 | metrics_endpoints: List of endpoints in the form [{"path": path, "port": port}, ...]. 330 | This argument is a simplified form of the `scrape_configs`. 331 | The contents of this list will be merged with the contents of `scrape_configs`. 332 | metrics_rules_dir: Directory where the metrics rules are stored. 333 | logs_rules_dir: Directory where the logs rules are stored. 334 | recurse_rules_dirs: Whether to recurse into rule paths. 335 | log_slots: Snap slots to connect to for scraping logs 336 | in the form ["snap-name:slot", ...]. 337 | dashboard_dirs: Directory where the dashboards are stored. 338 | refresh_events: List of events on which to refresh relation data. 339 | scrape_configs: List of standard scrape_configs dicts or a callable 340 | that returns the list in case the configs need to be generated dynamically. 341 | The contents of this list will be merged with the contents of `metrics_endpoints`. 342 | """ 343 | super().__init__(charm, relation_name) 344 | dashboard_dirs = dashboard_dirs or ["./src/grafana_dashboards"] 345 | 346 | self._charm = charm 347 | self._relation_name = relation_name 348 | self._metrics_endpoints = metrics_endpoints or [] 349 | self._scrape_configs = scrape_configs or [] 350 | self._metrics_rules = metrics_rules_dir 351 | self._logs_rules = logs_rules_dir 352 | self._recursive = recurse_rules_dirs 353 | self._log_slots = log_slots or [] 354 | self._dashboard_dirs = dashboard_dirs 355 | self._refresh_events = refresh_events or [self._charm.on.config_changed] 356 | 357 | events = self._charm.on[relation_name] 358 | self.framework.observe(events.relation_joined, self._on_refresh) 359 | self.framework.observe(events.relation_changed, self._on_refresh) 360 | for event in self._refresh_events: 361 | self.framework.observe(event, self._on_refresh) 362 | 363 | def _on_refresh(self, event): 364 | """Trigger the class to update relation data.""" 365 | relations = self._charm.model.relations[self._relation_name] 366 | 367 | for relation in relations: 368 | # Before a principal is related to the grafana-agent subordinate, we'd get 369 | # ModelError: ERROR cannot read relation settings: unit "zk/2": settings not found 370 | # Add a guard to make sure it doesn't happen. 371 | if relation.data and self._charm.unit in relation.data: 372 | # Subordinate relations can communicate only over unit data. 373 | try: 374 | data = CosAgentProviderUnitData( 375 | metrics_alert_rules=self._metrics_alert_rules, 376 | log_alert_rules=self._log_alert_rules, 377 | dashboards=self._dashboards, 378 | metrics_scrape_jobs=self._scrape_jobs, 379 | log_slots=self._log_slots, 380 | ) 381 | relation.data[self._charm.unit][data.KEY] = data.json() 382 | except ( 383 | pydantic.ValidationError, 384 | json.decoder.JSONDecodeError, 385 | ) as e: 386 | logger.error("Invalid relation data provided: %s", e) 387 | 388 | @property 389 | def _scrape_jobs(self) -> List[Dict]: 390 | """Return a prometheus_scrape-like data structure for jobs. 391 | 392 | https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config 393 | """ 394 | if callable(self._scrape_configs): 395 | scrape_configs = self._scrape_configs() 396 | else: 397 | # Create a copy of the user scrape_configs, since we will mutate this object 398 | scrape_configs = self._scrape_configs.copy() 399 | 400 | # Convert "metrics_endpoints" to standard scrape_configs, and add them in 401 | for endpoint in self._metrics_endpoints: 402 | scrape_configs.append( 403 | { 404 | "metrics_path": endpoint["path"], 405 | "static_configs": [{"targets": [f"localhost:{endpoint['port']}"]}], 406 | } 407 | ) 408 | 409 | scrape_configs = scrape_configs or [DEFAULT_SCRAPE_CONFIG] 410 | 411 | # Augment job name to include the app name and a unique id (index) 412 | for idx, scrape_config in enumerate(scrape_configs): 413 | scrape_config["job_name"] = "_".join( 414 | [self._charm.app.name, str(idx), scrape_config.get("job_name", "default")] 415 | ) 416 | 417 | return scrape_configs 418 | 419 | @property 420 | def _metrics_alert_rules(self) -> Dict: 421 | """Use (for now) the prometheus_scrape AlertRules to initialize this.""" 422 | alert_rules = AlertRules( 423 | query_type="promql", topology=JujuTopology.from_charm(self._charm) 424 | ) 425 | alert_rules.add_path(self._metrics_rules, recursive=self._recursive) 426 | return alert_rules.as_dict() 427 | 428 | @property 429 | def _log_alert_rules(self) -> Dict: 430 | """Use (for now) the loki_push_api AlertRules to initialize this.""" 431 | alert_rules = AlertRules(query_type="logql", topology=JujuTopology.from_charm(self._charm)) 432 | alert_rules.add_path(self._logs_rules, recursive=self._recursive) 433 | return alert_rules.as_dict() 434 | 435 | @property 436 | def _dashboards(self) -> List[GrafanaDashboard]: 437 | dashboards: List[GrafanaDashboard] = [] 438 | for d in self._dashboard_dirs: 439 | for path in Path(d).glob("*"): 440 | dashboard = GrafanaDashboard._serialize(path.read_bytes()) 441 | dashboards.append(dashboard) 442 | return dashboards 443 | 444 | 445 | class COSAgentDataChanged(EventBase): 446 | """Event emitted by `COSAgentRequirer` when relation data changes.""" 447 | 448 | 449 | class COSAgentValidationError(EventBase): 450 | """Event emitted by `COSAgentRequirer` when there is an error in the relation data.""" 451 | 452 | def __init__(self, handle, message: str = ""): 453 | super().__init__(handle) 454 | self.message = message 455 | 456 | def snapshot(self) -> Dict: 457 | """Save COSAgentValidationError source information.""" 458 | return {"message": self.message} 459 | 460 | def restore(self, snapshot): 461 | """Restore COSAgentValidationError source information.""" 462 | self.message = snapshot["message"] 463 | 464 | 465 | class COSAgentRequirerEvents(ObjectEvents): 466 | """`COSAgentRequirer` events.""" 467 | 468 | data_changed = EventSource(COSAgentDataChanged) 469 | validation_error = EventSource(COSAgentValidationError) 470 | 471 | 472 | class COSAgentRequirer(Object): 473 | """Integration endpoint wrapper for the Requirer side of the cos_agent interface.""" 474 | 475 | on = COSAgentRequirerEvents() # pyright: ignore 476 | 477 | def __init__( 478 | self, 479 | charm: CharmType, 480 | *, 481 | relation_name: str = DEFAULT_RELATION_NAME, 482 | peer_relation_name: str = DEFAULT_PEER_RELATION_NAME, 483 | refresh_events: Optional[List[str]] = None, 484 | ): 485 | """Create a COSAgentRequirer instance. 486 | 487 | Args: 488 | charm: The `CharmBase` instance that is instantiating this object. 489 | relation_name: The name of the relation to communicate over. 490 | peer_relation_name: The name of the peer relation to communicate over. 491 | refresh_events: List of events on which to refresh relation data. 492 | """ 493 | super().__init__(charm, relation_name) 494 | self._charm = charm 495 | self._relation_name = relation_name 496 | self._peer_relation_name = peer_relation_name 497 | self._refresh_events = refresh_events or [self._charm.on.config_changed] 498 | 499 | events = self._charm.on[relation_name] 500 | self.framework.observe( 501 | events.relation_joined, self._on_relation_data_changed 502 | ) # TODO: do we need this? 503 | self.framework.observe(events.relation_changed, self._on_relation_data_changed) 504 | for event in self._refresh_events: 505 | self.framework.observe(event, self.trigger_refresh) # pyright: ignore 506 | 507 | # Peer relation events 508 | # A peer relation is needed as it is the only mechanism for exchanging data across 509 | # subordinate units. 510 | # self.framework.observe( 511 | # self.on[self._peer_relation_name].relation_joined, self._on_peer_relation_joined 512 | # ) 513 | peer_events = self._charm.on[peer_relation_name] 514 | self.framework.observe(peer_events.relation_changed, self._on_peer_relation_changed) 515 | 516 | @property 517 | def peer_relation(self) -> Optional["Relation"]: 518 | """Helper function for obtaining the peer relation object. 519 | 520 | Returns: peer relation object 521 | (NOTE: would return None if called too early, e.g. during install). 522 | """ 523 | return self.model.get_relation(self._peer_relation_name) 524 | 525 | def _on_peer_relation_changed(self, _): 526 | # Peer data is used for forwarding data from principal units to the grafana agent 527 | # subordinate leader, for updating the app data of the outgoing o11y relations. 528 | if self._charm.unit.is_leader(): 529 | self.on.data_changed.emit() # pyright: ignore 530 | 531 | def _on_relation_data_changed(self, event: RelationChangedEvent): 532 | # Peer data is the only means of communication between subordinate units. 533 | if not self.peer_relation: 534 | event.defer() 535 | return 536 | 537 | cos_agent_relation = event.relation 538 | if not event.unit or not cos_agent_relation.data.get(event.unit): 539 | return 540 | principal_unit = event.unit 541 | 542 | # Coherence check 543 | units = cos_agent_relation.units 544 | if len(units) > 1: 545 | # should never happen 546 | raise ValueError( 547 | f"unexpected error: subordinate relation {cos_agent_relation} " 548 | f"should have exactly one unit" 549 | ) 550 | 551 | if not (raw := cos_agent_relation.data[principal_unit].get(CosAgentProviderUnitData.KEY)): 552 | return 553 | 554 | if not (provider_data := self._validated_provider_data(raw)): 555 | return 556 | 557 | # Copy data from the cos_agent relation to the peer relation, so the leader could 558 | # follow up. 559 | # Save the originating unit name, so it could be used for topology later on by the leader. 560 | data = CosAgentPeersUnitData( # peer relation databag model 561 | unit_name=event.unit.name, 562 | relation_id=str(event.relation.id), 563 | relation_name=event.relation.name, 564 | metrics_alert_rules=provider_data.metrics_alert_rules, 565 | log_alert_rules=provider_data.log_alert_rules, 566 | dashboards=provider_data.dashboards, 567 | ) 568 | self.peer_relation.data[self._charm.unit][ 569 | f"{CosAgentPeersUnitData.KEY}-{event.unit.name}" 570 | ] = data.json() 571 | 572 | # We can't easily tell if the data that was changed is limited to only the data 573 | # that goes into peer relation (in which case, if this is not a leader unit, we wouldn't 574 | # need to emit `on.data_changed`), so we're emitting `on.data_changed` either way. 575 | self.on.data_changed.emit() # pyright: ignore 576 | 577 | def _validated_provider_data(self, raw) -> Optional[CosAgentProviderUnitData]: 578 | try: 579 | return CosAgentProviderUnitData(**json.loads(raw)) 580 | except (pydantic.ValidationError, json.decoder.JSONDecodeError) as e: 581 | self.on.validation_error.emit(message=str(e)) # pyright: ignore 582 | return None 583 | 584 | def trigger_refresh(self, _): 585 | """Trigger a refresh of relation data.""" 586 | # FIXME: Figure out what we should do here 587 | self.on.data_changed.emit() # pyright: ignore 588 | 589 | @property 590 | def _remote_data(self) -> List[Tuple[CosAgentProviderUnitData, JujuTopology]]: 591 | """Return a list of remote data from each of the related units. 592 | 593 | Assumes that the relation is of type subordinate. 594 | Relies on the fact that, for subordinate relations, the only remote unit visible to 595 | *this unit* is the principal unit that this unit is attached to. 596 | """ 597 | all_data = [] 598 | 599 | for relation in self._charm.model.relations[self._relation_name]: 600 | if not relation.units: 601 | continue 602 | unit = next(iter(relation.units)) 603 | if not (raw := relation.data[unit].get(CosAgentProviderUnitData.KEY)): 604 | continue 605 | if not (provider_data := self._validated_provider_data(raw)): 606 | continue 607 | 608 | topology = JujuTopology( 609 | model=self._charm.model.name, 610 | model_uuid=self._charm.model.uuid, 611 | application=unit.app.name, 612 | unit=unit.name, 613 | ) 614 | 615 | all_data.append((provider_data, topology)) 616 | 617 | return all_data 618 | 619 | def _gather_peer_data(self) -> List[CosAgentPeersUnitData]: 620 | """Collect data from the peers. 621 | 622 | Returns a trimmed-down list of CosAgentPeersUnitData. 623 | """ 624 | relation = self.peer_relation 625 | 626 | # Ensure that whatever context we're running this in, we take the necessary precautions: 627 | if not relation or not relation.data or not relation.app: 628 | return [] 629 | 630 | # Iterate over all peer unit data and only collect every principal once. 631 | peer_data: List[CosAgentPeersUnitData] = [] 632 | app_names: Set[str] = set() 633 | 634 | for unit in chain((self._charm.unit,), relation.units): 635 | if not relation.data.get(unit): 636 | continue 637 | 638 | for unit_name in relation.data.get(unit): # pyright: ignore 639 | if not unit_name.startswith(CosAgentPeersUnitData.KEY): 640 | continue 641 | raw = relation.data[unit].get(unit_name) 642 | if raw is None: 643 | continue 644 | data = CosAgentPeersUnitData(**json.loads(raw)) 645 | # Have we already seen this principal app? 646 | if (app_name := data.app_name) in app_names: 647 | continue 648 | peer_data.append(data) 649 | app_names.add(app_name) 650 | 651 | return peer_data 652 | 653 | @property 654 | def metrics_alerts(self) -> Dict[str, Any]: 655 | """Fetch metrics alerts.""" 656 | alert_rules = {} 657 | 658 | seen_apps: List[str] = [] 659 | for data in self._gather_peer_data(): 660 | if rules := data.metrics_alert_rules: 661 | app_name = data.app_name 662 | if app_name in seen_apps: 663 | continue # dedup! 664 | seen_apps.append(app_name) 665 | # This is only used for naming the file, so be as specific as we can be 666 | identifier = JujuTopology( 667 | model=self._charm.model.name, 668 | model_uuid=self._charm.model.uuid, 669 | application=app_name, 670 | # For the topology unit, we could use `data.principal_unit_name`, but that unit 671 | # name may not be very stable: `_gather_peer_data` de-duplicates by app name so 672 | # the exact unit name that turns up first in the iterator may vary from time to 673 | # time. So using the grafana-agent unit name instead. 674 | unit=self._charm.unit.name, 675 | ).identifier 676 | 677 | alert_rules[identifier] = rules 678 | 679 | return alert_rules 680 | 681 | @property 682 | def metrics_jobs(self) -> List[Dict]: 683 | """Parse the relation data contents and extract the metrics jobs.""" 684 | scrape_jobs = [] 685 | for data, topology in self._remote_data: 686 | for job in data.metrics_scrape_jobs: 687 | # In #220, relation schema changed from a simplified dict to the standard 688 | # `scrape_configs`. 689 | # This is to ensure backwards compatibility with Providers older than v0.5. 690 | if "path" in job and "port" in job and "job_name" in job: 691 | job = { 692 | "job_name": job["job_name"], 693 | "metrics_path": job["path"], 694 | "static_configs": [{"targets": [f"localhost:{job['port']}"]}], 695 | # We include insecure_skip_verify because we are always scraping localhost. 696 | # Even if we have the certs for the scrape targets, we'd rather specify the scrape 697 | # jobs with localhost rather than the SAN DNS the cert was issued for. 698 | "tls_config": {"insecure_skip_verify": True}, 699 | } 700 | 701 | # Apply labels to the scrape jobs 702 | for static_config in job.get("static_configs", []): 703 | topo_as_dict = topology.as_dict(excluded_keys=["charm_name"]) 704 | static_config["labels"] = { 705 | # Be sure to keep labels from static_config 706 | **static_config.get("labels", {}), 707 | # TODO: We should add a new method in juju_topology.py 708 | # that like `as_dict` method, returns the keys with juju_ prefix 709 | # https://github.com/canonical/cos-lib/issues/18 710 | **{ 711 | "juju_{}".format(key): value 712 | for key, value in topo_as_dict.items() 713 | if value 714 | }, 715 | } 716 | 717 | scrape_jobs.append(job) 718 | 719 | return scrape_jobs 720 | 721 | @property 722 | def snap_log_endpoints(self) -> List[SnapEndpoint]: 723 | """Fetch logging endpoints exposed by related snaps.""" 724 | plugs = [] 725 | for data, _ in self._remote_data: 726 | targets = data.log_slots 727 | if targets: 728 | for target in targets: 729 | if target in plugs: 730 | logger.warning( 731 | f"plug {target} already listed. " 732 | "The same snap is being passed from multiple " 733 | "endpoints; this should not happen." 734 | ) 735 | else: 736 | plugs.append(target) 737 | 738 | endpoints = [] 739 | for plug in plugs: 740 | if ":" not in plug: 741 | logger.error(f"invalid plug definition received: {plug}. Ignoring...") 742 | else: 743 | endpoint = SnapEndpoint(*plug.split(":")) 744 | endpoints.append(endpoint) 745 | return endpoints 746 | 747 | @property 748 | def logs_alerts(self) -> Dict[str, Any]: 749 | """Fetch log alerts.""" 750 | alert_rules = {} 751 | seen_apps: List[str] = [] 752 | 753 | for data in self._gather_peer_data(): 754 | if rules := data.log_alert_rules: 755 | # This is only used for naming the file, so be as specific as we can be 756 | app_name = data.app_name 757 | if app_name in seen_apps: 758 | continue # dedup! 759 | seen_apps.append(app_name) 760 | 761 | identifier = JujuTopology( 762 | model=self._charm.model.name, 763 | model_uuid=self._charm.model.uuid, 764 | application=app_name, 765 | # For the topology unit, we could use `data.unit_name`, but that unit 766 | # name may not be very stable: `_gather_peer_data` de-duplicates by app name so 767 | # the exact unit name that turns up first in the iterator may vary from time to 768 | # time. So using the grafana-agent unit name instead. 769 | unit=self._charm.unit.name, 770 | ).identifier 771 | 772 | alert_rules[identifier] = rules 773 | 774 | return alert_rules 775 | 776 | @property 777 | def dashboards(self) -> List[Dict[str, str]]: 778 | """Fetch dashboards as encoded content. 779 | 780 | Dashboards are assumed not to vary across units of the same primary. 781 | """ 782 | dashboards: List[Dict[str, Any]] = [] 783 | 784 | seen_apps: List[str] = [] 785 | for data in self._gather_peer_data(): 786 | app_name = data.app_name 787 | if app_name in seen_apps: 788 | continue # dedup! 789 | seen_apps.append(app_name) 790 | 791 | for encoded_dashboard in data.dashboards or (): 792 | content = GrafanaDashboard(encoded_dashboard)._deserialize() 793 | 794 | title = content.get("title", "no_title") 795 | 796 | dashboards.append( 797 | { 798 | "relation_id": data.relation_id, 799 | # We have the remote charm name - use it for the identifier 800 | "charm": f"{data.relation_name}-{app_name}", 801 | "content": content, 802 | "title": title, 803 | } 804 | ) 805 | 806 | return dashboards 807 | -------------------------------------------------------------------------------- /lib/charms/operator_libs_linux/v2/snap.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Canonical Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """Representations of the system's Snaps, and abstractions around managing them. 16 | 17 | The `snap` module provides convenience methods for listing, installing, refreshing, and removing 18 | Snap packages, in addition to setting and getting configuration options for them. 19 | 20 | In the `snap` module, `SnapCache` creates a dict-like mapping of `Snap` objects at when 21 | instantiated. Installed snaps are fully populated, and available snaps are lazily-loaded upon 22 | request. This module relies on an installed and running `snapd` daemon to perform operations over 23 | the `snapd` HTTP API. 24 | 25 | `SnapCache` objects can be used to install or modify Snap packages by name in a manner similar to 26 | using the `snap` command from the commandline. 27 | 28 | An example of adding Juju to the system with `SnapCache` and setting a config value: 29 | 30 | ```python 31 | try: 32 | cache = snap.SnapCache() 33 | juju = cache["juju"] 34 | 35 | if not juju.present: 36 | juju.ensure(snap.SnapState.Latest, channel="beta") 37 | juju.set({"some.key": "value", "some.key2": "value2"}) 38 | except snap.SnapError as e: 39 | logger.error("An exception occurred when installing charmcraft. Reason: %s", e.message) 40 | ``` 41 | 42 | In addition, the `snap` module provides "bare" methods which can act on Snap packages as 43 | simple function calls. :meth:`add`, :meth:`remove`, and :meth:`ensure` are provided, as 44 | well as :meth:`add_local` for installing directly from a local `.snap` file. These return 45 | `Snap` objects. 46 | 47 | As an example of installing several Snaps and checking details: 48 | 49 | ```python 50 | try: 51 | nextcloud, charmcraft = snap.add(["nextcloud", "charmcraft"]) 52 | if nextcloud.get("mode") != "production": 53 | nextcloud.set({"mode": "production"}) 54 | except snap.SnapError as e: 55 | logger.error("An exception occurred when installing snaps. Reason: %s" % e.message) 56 | ``` 57 | """ 58 | 59 | import http.client 60 | import json 61 | import logging 62 | import os 63 | import re 64 | import socket 65 | import subprocess 66 | import sys 67 | import urllib.error 68 | import urllib.parse 69 | import urllib.request 70 | from collections.abc import Mapping 71 | from datetime import datetime, timedelta, timezone 72 | from enum import Enum 73 | from subprocess import CalledProcessError, CompletedProcess 74 | from typing import Any, Dict, Iterable, List, Optional, Union 75 | 76 | logger = logging.getLogger(__name__) 77 | 78 | # The unique Charmhub library identifier, never change it 79 | LIBID = "05394e5893f94f2d90feb7cbe6b633cd" 80 | 81 | # Increment this major API version when introducing breaking changes 82 | LIBAPI = 2 83 | 84 | # Increment this PATCH version before using `charmcraft publish-lib` or reset 85 | # to 0 if you are raising the major API version 86 | LIBPATCH = 3 87 | 88 | 89 | # Regex to locate 7-bit C1 ANSI sequences 90 | ansi_filter = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") 91 | 92 | 93 | def _cache_init(func): 94 | def inner(*args, **kwargs): 95 | if _Cache.cache is None: 96 | _Cache.cache = SnapCache() 97 | return func(*args, **kwargs) 98 | 99 | return inner 100 | 101 | 102 | # recursive hints seems to error out pytest 103 | JSONType = Union[Dict[str, Any], List[Any], str, int, float] 104 | 105 | 106 | class SnapService: 107 | """Data wrapper for snap services.""" 108 | 109 | def __init__( 110 | self, 111 | daemon: Optional[str] = None, 112 | daemon_scope: Optional[str] = None, 113 | enabled: bool = False, 114 | active: bool = False, 115 | activators: List[str] = [], 116 | **kwargs, 117 | ): 118 | self.daemon = daemon 119 | self.daemon_scope = kwargs.get("daemon-scope", None) or daemon_scope 120 | self.enabled = enabled 121 | self.active = active 122 | self.activators = activators 123 | 124 | def as_dict(self) -> Dict: 125 | """Return instance representation as dict.""" 126 | return { 127 | "daemon": self.daemon, 128 | "daemon_scope": self.daemon_scope, 129 | "enabled": self.enabled, 130 | "active": self.active, 131 | "activators": self.activators, 132 | } 133 | 134 | 135 | class MetaCache(type): 136 | """MetaCache class used for initialising the snap cache.""" 137 | 138 | @property 139 | def cache(cls) -> "SnapCache": 140 | """Property for returning the snap cache.""" 141 | return cls._cache 142 | 143 | @cache.setter 144 | def cache(cls, cache: "SnapCache") -> None: 145 | """Setter for the snap cache.""" 146 | cls._cache = cache 147 | 148 | def __getitem__(cls, name) -> "Snap": 149 | """Snap cache getter.""" 150 | return cls._cache[name] 151 | 152 | 153 | class _Cache(object, metaclass=MetaCache): 154 | _cache = None 155 | 156 | 157 | class Error(Exception): 158 | """Base class of most errors raised by this library.""" 159 | 160 | def __repr__(self): 161 | """Represent the Error class.""" 162 | return "<{}.{} {}>".format(type(self).__module__, type(self).__name__, self.args) 163 | 164 | @property 165 | def name(self): 166 | """Return a string representation of the model plus class.""" 167 | return "<{}.{}>".format(type(self).__module__, type(self).__name__) 168 | 169 | @property 170 | def message(self): 171 | """Return the message passed as an argument.""" 172 | return self.args[0] 173 | 174 | 175 | class SnapAPIError(Error): 176 | """Raised when an HTTP API error occurs talking to the Snapd server.""" 177 | 178 | def __init__(self, body: Dict, code: int, status: str, message: str): 179 | super().__init__(message) # Makes str(e) return message 180 | self.body = body 181 | self.code = code 182 | self.status = status 183 | self._message = message 184 | 185 | def __repr__(self): 186 | """Represent the SnapAPIError class.""" 187 | return "APIError({!r}, {!r}, {!r}, {!r})".format( 188 | self.body, self.code, self.status, self._message 189 | ) 190 | 191 | 192 | class SnapState(Enum): 193 | """The state of a snap on the system or in the cache.""" 194 | 195 | Present = "present" 196 | Absent = "absent" 197 | Latest = "latest" 198 | Available = "available" 199 | 200 | 201 | class SnapError(Error): 202 | """Raised when there's an error running snap control commands.""" 203 | 204 | 205 | class SnapNotFoundError(Error): 206 | """Raised when a requested snap is not known to the system.""" 207 | 208 | 209 | class Snap(object): 210 | """Represents a snap package and its properties. 211 | 212 | `Snap` exposes the following properties about a snap: 213 | - name: the name of the snap 214 | - state: a `SnapState` representation of its install status 215 | - channel: "stable", "candidate", "beta", and "edge" are common 216 | - revision: a string representing the snap's revision 217 | - confinement: "classic" or "strict" 218 | """ 219 | 220 | def __init__( 221 | self, 222 | name, 223 | state: SnapState, 224 | channel: str, 225 | revision: str, 226 | confinement: str, 227 | apps: Optional[List[Dict[str, str]]] = None, 228 | cohort: Optional[str] = "", 229 | ) -> None: 230 | self._name = name 231 | self._state = state 232 | self._channel = channel 233 | self._revision = revision 234 | self._confinement = confinement 235 | self._cohort = cohort 236 | self._apps = apps or [] 237 | self._snap_client = SnapClient() 238 | 239 | def __eq__(self, other) -> bool: 240 | """Equality for comparison.""" 241 | return isinstance(other, self.__class__) and ( 242 | self._name, 243 | self._revision, 244 | ) == (other._name, other._revision) 245 | 246 | def __hash__(self): 247 | """Calculate a hash for this snap.""" 248 | return hash((self._name, self._revision)) 249 | 250 | def __repr__(self): 251 | """Represent the object such that it can be reconstructed.""" 252 | return "<{}.{}: {}>".format(self.__module__, self.__class__.__name__, self.__dict__) 253 | 254 | def __str__(self): 255 | """Represent the snap object as a string.""" 256 | return "<{}: {}-{}.{} -- {}>".format( 257 | self.__class__.__name__, 258 | self._name, 259 | self._revision, 260 | self._channel, 261 | str(self._state), 262 | ) 263 | 264 | def _snap(self, command: str, optargs: Optional[Iterable[str]] = None) -> str: 265 | """Perform a snap operation. 266 | 267 | Args: 268 | command: the snap command to execute 269 | optargs: an (optional) list of additional arguments to pass, 270 | commonly confinement or channel 271 | 272 | Raises: 273 | SnapError if there is a problem encountered 274 | """ 275 | optargs = optargs or [] 276 | args = ["snap", command, self._name, *optargs] 277 | try: 278 | return subprocess.check_output(args, universal_newlines=True) 279 | except CalledProcessError as e: 280 | raise SnapError( 281 | "Snap: {!r}; command {!r} failed with output = {!r}".format( 282 | self._name, args, e.output 283 | ) 284 | ) 285 | 286 | def _snap_daemons( 287 | self, 288 | command: List[str], 289 | services: Optional[List[str]] = None, 290 | ) -> CompletedProcess: 291 | """Perform snap app commands. 292 | 293 | Args: 294 | command: the snap command to execute 295 | services: the snap service to execute command on 296 | 297 | Raises: 298 | SnapError if there is a problem encountered 299 | """ 300 | if services: 301 | # an attempt to keep the command constrained to the snap instance's services 302 | services = ["{}.{}".format(self._name, service) for service in services] 303 | else: 304 | services = [self._name] 305 | 306 | args = ["snap", *command, *services] 307 | 308 | try: 309 | return subprocess.run(args, universal_newlines=True, check=True, capture_output=True) 310 | except CalledProcessError as e: 311 | raise SnapError("Could not {} for snap [{}]: {}".format(args, self._name, e.stderr)) 312 | 313 | def get(self, key: Optional[str], *, typed: bool = False) -> Any: 314 | """Fetch snap configuration values. 315 | 316 | Args: 317 | key: the key to retrieve. Default to retrieve all values for typed=True. 318 | typed: set to True to retrieve typed values (set with typed=True). 319 | Default is to return a string. 320 | """ 321 | if typed: 322 | config = json.loads(self._snap("get", ["-d", key])) 323 | if key: 324 | return config.get(key) 325 | return config 326 | 327 | if not key: 328 | raise TypeError("Key must be provided when typed=False") 329 | 330 | return self._snap("get", [key]).strip() 331 | 332 | def set(self, config: Dict[str, Any], *, typed: bool = False) -> str: 333 | """Set a snap configuration value. 334 | 335 | Args: 336 | config: a dictionary containing keys and values specifying the config to set. 337 | typed: set to True to convert all values in the config into typed values while 338 | configuring the snap (set with typed=True). Default is not to convert. 339 | """ 340 | if typed: 341 | kv = [f"{key}={json.dumps(val)}" for key, val in config.items()] 342 | return self._snap("set", ["-t"] + kv) 343 | 344 | return self._snap("set", [f"{key}={val}" for key, val in config.items()]) 345 | 346 | def unset(self, key) -> str: 347 | """Unset a snap configuration value. 348 | 349 | Args: 350 | key: the key to unset 351 | """ 352 | return self._snap("unset", [key]) 353 | 354 | def start(self, services: Optional[List[str]] = None, enable: Optional[bool] = False) -> None: 355 | """Start a snap's services. 356 | 357 | Args: 358 | services (list): (optional) list of individual snap services to start (otherwise all) 359 | enable (bool): (optional) flag to enable snap services on start. Default `false` 360 | """ 361 | args = ["start", "--enable"] if enable else ["start"] 362 | self._snap_daemons(args, services) 363 | 364 | def stop(self, services: Optional[List[str]] = None, disable: Optional[bool] = False) -> None: 365 | """Stop a snap's services. 366 | 367 | Args: 368 | services (list): (optional) list of individual snap services to stop (otherwise all) 369 | disable (bool): (optional) flag to disable snap services on stop. Default `False` 370 | """ 371 | args = ["stop", "--disable"] if disable else ["stop"] 372 | self._snap_daemons(args, services) 373 | 374 | def logs(self, services: Optional[List[str]] = None, num_lines: Optional[int] = 10) -> str: 375 | """Fetch a snap services' logs. 376 | 377 | Args: 378 | services (list): (optional) list of individual snap services to show logs from 379 | (otherwise all) 380 | num_lines (int): (optional) integer number of log lines to return. Default `10` 381 | """ 382 | args = ["logs", "-n={}".format(num_lines)] if num_lines else ["logs"] 383 | return self._snap_daemons(args, services).stdout 384 | 385 | def connect( 386 | self, plug: str, service: Optional[str] = None, slot: Optional[str] = None 387 | ) -> None: 388 | """Connect a plug to a slot. 389 | 390 | Args: 391 | plug (str): the plug to connect 392 | service (str): (optional) the snap service name to plug into 393 | slot (str): (optional) the snap service slot to plug in to 394 | 395 | Raises: 396 | SnapError if there is a problem encountered 397 | """ 398 | command = ["connect", "{}:{}".format(self._name, plug)] 399 | 400 | if service and slot: 401 | command = command + ["{}:{}".format(service, slot)] 402 | elif slot: 403 | command = command + [slot] 404 | 405 | args = ["snap", *command] 406 | try: 407 | subprocess.run(args, universal_newlines=True, check=True, capture_output=True) 408 | except CalledProcessError as e: 409 | raise SnapError("Could not {} for snap [{}]: {}".format(args, self._name, e.stderr)) 410 | 411 | def hold(self, duration: Optional[timedelta] = None) -> None: 412 | """Add a refresh hold to a snap. 413 | 414 | Args: 415 | duration: duration for the hold, or None (the default) to hold this snap indefinitely. 416 | """ 417 | hold_str = "forever" 418 | if duration is not None: 419 | seconds = round(duration.total_seconds()) 420 | hold_str = f"{seconds}s" 421 | self._snap("refresh", [f"--hold={hold_str}"]) 422 | 423 | def unhold(self) -> None: 424 | """Remove the refresh hold of a snap.""" 425 | self._snap("refresh", ["--unhold"]) 426 | 427 | def alias(self, application: str, alias: Optional[str] = None) -> None: 428 | """Create an alias for a given application. 429 | 430 | Args: 431 | application: application to get an alias. 432 | alias: (optional) name of the alias; if not provided, the application name is used. 433 | """ 434 | if alias is None: 435 | alias = application 436 | args = ["snap", "alias", f"{self.name}.{application}", alias] 437 | try: 438 | subprocess.check_output(args, universal_newlines=True) 439 | except CalledProcessError as e: 440 | raise SnapError( 441 | "Snap: {!r}; command {!r} failed with output = {!r}".format( 442 | self._name, args, e.output 443 | ) 444 | ) 445 | 446 | def restart( 447 | self, services: Optional[List[str]] = None, reload: Optional[bool] = False 448 | ) -> None: 449 | """Restarts a snap's services. 450 | 451 | Args: 452 | services (list): (optional) list of individual snap services to restart. 453 | (otherwise all) 454 | reload (bool): (optional) flag to use the service reload command, if available. 455 | Default `False` 456 | """ 457 | args = ["restart", "--reload"] if reload else ["restart"] 458 | self._snap_daemons(args, services) 459 | 460 | def _install( 461 | self, 462 | channel: Optional[str] = "", 463 | cohort: Optional[str] = "", 464 | revision: Optional[str] = None, 465 | ) -> None: 466 | """Add a snap to the system. 467 | 468 | Args: 469 | channel: the channel to install from 470 | cohort: optional, the key of a cohort that this snap belongs to 471 | revision: optional, the revision of the snap to install 472 | """ 473 | cohort = cohort or self._cohort 474 | 475 | args = [] 476 | if self.confinement == "classic": 477 | args.append("--classic") 478 | if channel: 479 | args.append('--channel="{}"'.format(channel)) 480 | if revision: 481 | args.append('--revision="{}"'.format(revision)) 482 | if cohort: 483 | args.append('--cohort="{}"'.format(cohort)) 484 | 485 | self._snap("install", args) 486 | 487 | def _refresh( 488 | self, 489 | channel: Optional[str] = "", 490 | cohort: Optional[str] = "", 491 | revision: Optional[str] = None, 492 | leave_cohort: Optional[bool] = False, 493 | ) -> None: 494 | """Refresh a snap. 495 | 496 | Args: 497 | channel: the channel to install from 498 | cohort: optionally, specify a cohort. 499 | revision: optionally, specify the revision of the snap to refresh 500 | leave_cohort: leave the current cohort. 501 | """ 502 | args = [] 503 | if channel: 504 | args.append('--channel="{}"'.format(channel)) 505 | 506 | if revision: 507 | args.append('--revision="{}"'.format(revision)) 508 | 509 | if not cohort: 510 | cohort = self._cohort 511 | 512 | if leave_cohort: 513 | self._cohort = "" 514 | args.append("--leave-cohort") 515 | elif cohort: 516 | args.append('--cohort="{}"'.format(cohort)) 517 | 518 | self._snap("refresh", args) 519 | 520 | def _remove(self) -> str: 521 | """Remove a snap from the system.""" 522 | return self._snap("remove") 523 | 524 | @property 525 | def name(self) -> str: 526 | """Returns the name of the snap.""" 527 | return self._name 528 | 529 | def ensure( 530 | self, 531 | state: SnapState, 532 | classic: Optional[bool] = False, 533 | channel: Optional[str] = "", 534 | cohort: Optional[str] = "", 535 | revision: Optional[str] = None, 536 | ): 537 | """Ensure that a snap is in a given state. 538 | 539 | Args: 540 | state: a `SnapState` to reconcile to. 541 | classic: an (Optional) boolean indicating whether classic confinement should be used 542 | channel: the channel to install from 543 | cohort: optional. Specify the key of a snap cohort. 544 | revision: optional. the revision of the snap to install/refresh 545 | 546 | While both channel and revision could be specified, the underlying snap install/refresh 547 | command will determine which one takes precedence (revision at this time) 548 | 549 | Raises: 550 | SnapError if an error is encountered 551 | """ 552 | self._confinement = "classic" if classic or self._confinement == "classic" else "" 553 | 554 | if state not in (SnapState.Present, SnapState.Latest): 555 | # We are attempting to remove this snap. 556 | if self._state in (SnapState.Present, SnapState.Latest): 557 | # The snap is installed, so we run _remove. 558 | self._remove() 559 | else: 560 | # The snap is not installed -- no need to do anything. 561 | pass 562 | else: 563 | # We are installing or refreshing a snap. 564 | if self._state not in (SnapState.Present, SnapState.Latest): 565 | # The snap is not installed, so we install it. 566 | self._install(channel, cohort, revision) 567 | else: 568 | # The snap is installed, but we are changing it (e.g., switching channels). 569 | self._refresh(channel, cohort, revision) 570 | 571 | self._update_snap_apps() 572 | self._state = state 573 | 574 | def _update_snap_apps(self) -> None: 575 | """Update a snap's apps after snap changes state.""" 576 | try: 577 | self._apps = self._snap_client.get_installed_snap_apps(self._name) 578 | except SnapAPIError: 579 | logger.debug("Unable to retrieve snap apps for {}".format(self._name)) 580 | self._apps = [] 581 | 582 | @property 583 | def present(self) -> bool: 584 | """Report whether or not a snap is present.""" 585 | return self._state in (SnapState.Present, SnapState.Latest) 586 | 587 | @property 588 | def latest(self) -> bool: 589 | """Report whether the snap is the most recent version.""" 590 | return self._state is SnapState.Latest 591 | 592 | @property 593 | def state(self) -> SnapState: 594 | """Report the current snap state.""" 595 | return self._state 596 | 597 | @state.setter 598 | def state(self, state: SnapState) -> None: 599 | """Set the snap state to a given value. 600 | 601 | Args: 602 | state: a `SnapState` to reconcile the snap to. 603 | 604 | Raises: 605 | SnapError if an error is encountered 606 | """ 607 | if self._state is not state: 608 | self.ensure(state) 609 | self._state = state 610 | 611 | @property 612 | def revision(self) -> str: 613 | """Returns the revision for a snap.""" 614 | return self._revision 615 | 616 | @property 617 | def channel(self) -> str: 618 | """Returns the channel for a snap.""" 619 | return self._channel 620 | 621 | @property 622 | def confinement(self) -> str: 623 | """Returns the confinement for a snap.""" 624 | return self._confinement 625 | 626 | @property 627 | def apps(self) -> List: 628 | """Returns (if any) the installed apps of the snap.""" 629 | self._update_snap_apps() 630 | return self._apps 631 | 632 | @property 633 | def services(self) -> Dict: 634 | """Returns (if any) the installed services of the snap.""" 635 | self._update_snap_apps() 636 | services = {} 637 | for app in self._apps: 638 | if "daemon" in app: 639 | services[app["name"]] = SnapService(**app).as_dict() 640 | 641 | return services 642 | 643 | @property 644 | def held(self) -> bool: 645 | """Report whether the snap has a hold.""" 646 | info = self._snap("info") 647 | return "hold:" in info 648 | 649 | 650 | class _UnixSocketConnection(http.client.HTTPConnection): 651 | """Implementation of HTTPConnection that connects to a named Unix socket.""" 652 | 653 | def __init__(self, host, timeout=None, socket_path=None): 654 | if timeout is None: 655 | super().__init__(host) 656 | else: 657 | super().__init__(host, timeout=timeout) 658 | self.socket_path = socket_path 659 | 660 | def connect(self): 661 | """Override connect to use Unix socket (instead of TCP socket).""" 662 | if not hasattr(socket, "AF_UNIX"): 663 | raise NotImplementedError("Unix sockets not supported on {}".format(sys.platform)) 664 | self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 665 | self.sock.connect(self.socket_path) 666 | if self.timeout is not None: 667 | self.sock.settimeout(self.timeout) 668 | 669 | 670 | class _UnixSocketHandler(urllib.request.AbstractHTTPHandler): 671 | """Implementation of HTTPHandler that uses a named Unix socket.""" 672 | 673 | def __init__(self, socket_path: str): 674 | super().__init__() 675 | self.socket_path = socket_path 676 | 677 | def http_open(self, req) -> http.client.HTTPResponse: 678 | """Override http_open to use a Unix socket connection (instead of TCP).""" 679 | return self.do_open(_UnixSocketConnection, req, socket_path=self.socket_path) 680 | 681 | 682 | class SnapClient: 683 | """Snapd API client to talk to HTTP over UNIX sockets. 684 | 685 | In order to avoid shelling out and/or involving sudo in calling the snapd API, 686 | use a wrapper based on the Pebble Client, trimmed down to only the utility methods 687 | needed for talking to snapd. 688 | """ 689 | 690 | def __init__( 691 | self, 692 | socket_path: str = "/run/snapd.socket", 693 | opener: Optional[urllib.request.OpenerDirector] = None, 694 | base_url: str = "http://localhost/v2/", 695 | timeout: float = 30.0, 696 | ): 697 | """Initialize a client instance. 698 | 699 | Args: 700 | socket_path: a path to the socket on the filesystem. Defaults to /run/snap/snapd.socket 701 | opener: specifies an opener for unix socket, if unspecified a default is used 702 | base_url: base url for making requests to the snap client. Defaults to 703 | http://localhost/v2/ 704 | timeout: timeout in seconds to use when making requests to the API. Default is 30.0s. 705 | """ 706 | if opener is None: 707 | opener = self._get_default_opener(socket_path) 708 | self.opener = opener 709 | self.base_url = base_url 710 | self.timeout = timeout 711 | 712 | @classmethod 713 | def _get_default_opener(cls, socket_path): 714 | """Build the default opener to use for requests (HTTP over Unix socket).""" 715 | opener = urllib.request.OpenerDirector() 716 | opener.add_handler(_UnixSocketHandler(socket_path)) 717 | opener.add_handler(urllib.request.HTTPDefaultErrorHandler()) 718 | opener.add_handler(urllib.request.HTTPRedirectHandler()) 719 | opener.add_handler(urllib.request.HTTPErrorProcessor()) 720 | return opener 721 | 722 | def _request( 723 | self, 724 | method: str, 725 | path: str, 726 | query: Dict = None, 727 | body: Dict = None, 728 | ) -> JSONType: 729 | """Make a JSON request to the Snapd server with the given HTTP method and path. 730 | 731 | If query dict is provided, it is encoded and appended as a query string 732 | to the URL. If body dict is provided, it is serialied as JSON and used 733 | as the HTTP body (with Content-Type: "application/json"). The resulting 734 | body is decoded from JSON. 735 | """ 736 | headers = {"Accept": "application/json"} 737 | data = None 738 | if body is not None: 739 | data = json.dumps(body).encode("utf-8") 740 | headers["Content-Type"] = "application/json" 741 | 742 | response = self._request_raw(method, path, query, headers, data) 743 | return json.loads(response.read().decode())["result"] 744 | 745 | def _request_raw( 746 | self, 747 | method: str, 748 | path: str, 749 | query: Dict = None, 750 | headers: Dict = None, 751 | data: bytes = None, 752 | ) -> http.client.HTTPResponse: 753 | """Make a request to the Snapd server; return the raw HTTPResponse object.""" 754 | url = self.base_url + path 755 | if query: 756 | url = url + "?" + urllib.parse.urlencode(query) 757 | 758 | if headers is None: 759 | headers = {} 760 | request = urllib.request.Request(url, method=method, data=data, headers=headers) 761 | 762 | try: 763 | response = self.opener.open(request, timeout=self.timeout) 764 | except urllib.error.HTTPError as e: 765 | code = e.code 766 | status = e.reason 767 | message = "" 768 | try: 769 | body = json.loads(e.read().decode())["result"] 770 | except (IOError, ValueError, KeyError) as e2: 771 | # Will only happen on read error or if Pebble sends invalid JSON. 772 | body = {} 773 | message = "{} - {}".format(type(e2).__name__, e2) 774 | raise SnapAPIError(body, code, status, message) 775 | except urllib.error.URLError as e: 776 | raise SnapAPIError({}, 500, "Not found", e.reason) 777 | return response 778 | 779 | def get_installed_snaps(self) -> Dict: 780 | """Get information about currently installed snaps.""" 781 | return self._request("GET", "snaps") 782 | 783 | def get_snap_information(self, name: str) -> Dict: 784 | """Query the snap server for information about single snap.""" 785 | return self._request("GET", "find", {"name": name})[0] 786 | 787 | def get_installed_snap_apps(self, name: str) -> List: 788 | """Query the snap server for apps belonging to a named, currently installed snap.""" 789 | return self._request("GET", "apps", {"names": name, "select": "service"}) 790 | 791 | 792 | class SnapCache(Mapping): 793 | """An abstraction to represent installed/available packages. 794 | 795 | When instantiated, `SnapCache` iterates through the list of installed 796 | snaps using the `snapd` HTTP API, and a list of available snaps by reading 797 | the filesystem to populate the cache. Information about available snaps is lazily-loaded 798 | from the `snapd` API when requested. 799 | """ 800 | 801 | def __init__(self): 802 | if not self.snapd_installed: 803 | raise SnapError("snapd is not installed or not in /usr/bin") from None 804 | self._snap_client = SnapClient() 805 | self._snap_map = {} 806 | if self.snapd_installed: 807 | self._load_available_snaps() 808 | self._load_installed_snaps() 809 | 810 | def __contains__(self, key: str) -> bool: 811 | """Check if a given snap is in the cache.""" 812 | return key in self._snap_map 813 | 814 | def __len__(self) -> int: 815 | """Report number of items in the snap cache.""" 816 | return len(self._snap_map) 817 | 818 | def __iter__(self) -> Iterable["Snap"]: 819 | """Provide iterator for the snap cache.""" 820 | return iter(self._snap_map.values()) 821 | 822 | def __getitem__(self, snap_name: str) -> Snap: 823 | """Return either the installed version or latest version for a given snap.""" 824 | snap = self._snap_map.get(snap_name, None) 825 | if snap is None: 826 | # The snapd cache file may not have existed when _snap_map was 827 | # populated. This is normal. 828 | try: 829 | self._snap_map[snap_name] = self._load_info(snap_name) 830 | except SnapAPIError: 831 | raise SnapNotFoundError("Snap '{}' not found!".format(snap_name)) 832 | 833 | return self._snap_map[snap_name] 834 | 835 | @property 836 | def snapd_installed(self) -> bool: 837 | """Check whether snapd has been installled on the system.""" 838 | return os.path.isfile("/usr/bin/snap") 839 | 840 | def _load_available_snaps(self) -> None: 841 | """Load the list of available snaps from disk. 842 | 843 | Leave them empty and lazily load later if asked for. 844 | """ 845 | if not os.path.isfile("/var/cache/snapd/names"): 846 | # The snap catalog may not be populated yet; this is normal. 847 | # snapd updates the cache infrequently and the cache file may not 848 | # currently exist. 849 | return 850 | 851 | with open("/var/cache/snapd/names", "r") as f: 852 | for line in f: 853 | if line.strip(): 854 | self._snap_map[line.strip()] = None 855 | 856 | def _load_installed_snaps(self) -> None: 857 | """Load the installed snaps into the dict.""" 858 | installed = self._snap_client.get_installed_snaps() 859 | 860 | for i in installed: 861 | snap = Snap( 862 | name=i["name"], 863 | state=SnapState.Latest, 864 | channel=i["channel"], 865 | revision=i["revision"], 866 | confinement=i["confinement"], 867 | apps=i.get("apps", None), 868 | ) 869 | self._snap_map[snap.name] = snap 870 | 871 | def _load_info(self, name) -> Snap: 872 | """Load info for snaps which are not installed if requested. 873 | 874 | Args: 875 | name: a string representing the name of the snap 876 | """ 877 | info = self._snap_client.get_snap_information(name) 878 | 879 | return Snap( 880 | name=info["name"], 881 | state=SnapState.Available, 882 | channel=info["channel"], 883 | revision=info["revision"], 884 | confinement=info["confinement"], 885 | apps=None, 886 | ) 887 | 888 | 889 | @_cache_init 890 | def add( 891 | snap_names: Union[str, List[str]], 892 | state: Union[str, SnapState] = SnapState.Latest, 893 | channel: Optional[str] = "", 894 | classic: Optional[bool] = False, 895 | cohort: Optional[str] = "", 896 | revision: Optional[str] = None, 897 | ) -> Union[Snap, List[Snap]]: 898 | """Add a snap to the system. 899 | 900 | Args: 901 | snap_names: the name or names of the snaps to install 902 | state: a string or `SnapState` representation of the desired state, one of 903 | [`Present` or `Latest`] 904 | channel: an (Optional) channel as a string. Defaults to 'latest' 905 | classic: an (Optional) boolean specifying whether it should be added with classic 906 | confinement. Default `False` 907 | cohort: an (Optional) string specifying the snap cohort to use 908 | revision: an (Optional) string specifying the snap revision to use 909 | 910 | Raises: 911 | SnapError if some snaps failed to install or were not found. 912 | """ 913 | if not channel and not revision: 914 | channel = "latest" 915 | 916 | snap_names = [snap_names] if isinstance(snap_names, str) else snap_names 917 | if not snap_names: 918 | raise TypeError("Expected at least one snap to add, received zero!") 919 | 920 | if isinstance(state, str): 921 | state = SnapState(state) 922 | 923 | return _wrap_snap_operations(snap_names, state, channel, classic, cohort, revision) 924 | 925 | 926 | @_cache_init 927 | def remove(snap_names: Union[str, List[str]]) -> Union[Snap, List[Snap]]: 928 | """Remove specified snap(s) from the system. 929 | 930 | Args: 931 | snap_names: the name or names of the snaps to install 932 | 933 | Raises: 934 | SnapError if some snaps failed to install. 935 | """ 936 | snap_names = [snap_names] if isinstance(snap_names, str) else snap_names 937 | if not snap_names: 938 | raise TypeError("Expected at least one snap to add, received zero!") 939 | 940 | return _wrap_snap_operations(snap_names, SnapState.Absent, "", False) 941 | 942 | 943 | @_cache_init 944 | def ensure( 945 | snap_names: Union[str, List[str]], 946 | state: str, 947 | channel: Optional[str] = "", 948 | classic: Optional[bool] = False, 949 | cohort: Optional[str] = "", 950 | revision: Optional[int] = None, 951 | ) -> Union[Snap, List[Snap]]: 952 | """Ensure specified snaps are in a given state on the system. 953 | 954 | Args: 955 | snap_names: the name(s) of the snaps to operate on 956 | state: a string representation of the desired state, from `SnapState` 957 | channel: an (Optional) channel as a string. Defaults to 'latest' 958 | classic: an (Optional) boolean specifying whether it should be added with classic 959 | confinement. Default `False` 960 | cohort: an (Optional) string specifying the snap cohort to use 961 | revision: an (Optional) integer specifying the snap revision to use 962 | 963 | When both channel and revision are specified, the underlying snap install/refresh 964 | command will determine the precedence (revision at the time of adding this) 965 | 966 | Raises: 967 | SnapError if the snap is not in the cache. 968 | """ 969 | if not revision and not channel: 970 | channel = "latest" 971 | 972 | if state in ("present", "latest") or revision: 973 | return add(snap_names, SnapState(state), channel, classic, cohort, revision) 974 | else: 975 | return remove(snap_names) 976 | 977 | 978 | def _wrap_snap_operations( 979 | snap_names: List[str], 980 | state: SnapState, 981 | channel: str, 982 | classic: bool, 983 | cohort: Optional[str] = "", 984 | revision: Optional[str] = None, 985 | ) -> Union[Snap, List[Snap]]: 986 | """Wrap common operations for bare commands.""" 987 | snaps = {"success": [], "failed": []} 988 | 989 | op = "remove" if state is SnapState.Absent else "install or refresh" 990 | 991 | for s in snap_names: 992 | try: 993 | snap = _Cache[s] 994 | if state is SnapState.Absent: 995 | snap.ensure(state=SnapState.Absent) 996 | else: 997 | snap.ensure( 998 | state=state, classic=classic, channel=channel, cohort=cohort, revision=revision 999 | ) 1000 | snaps["success"].append(snap) 1001 | except SnapError as e: 1002 | logger.warning("Failed to {} snap {}: {}!".format(op, s, e.message)) 1003 | snaps["failed"].append(s) 1004 | except SnapNotFoundError: 1005 | logger.warning("Snap '{}' not found in cache!".format(s)) 1006 | snaps["failed"].append(s) 1007 | 1008 | if len(snaps["failed"]): 1009 | raise SnapError( 1010 | "Failed to install or refresh snap(s): {}".format(", ".join(list(snaps["failed"]))) 1011 | ) 1012 | 1013 | return snaps["success"] if len(snaps["success"]) > 1 else snaps["success"][0] 1014 | 1015 | 1016 | def install_local( 1017 | filename: str, classic: Optional[bool] = False, dangerous: Optional[bool] = False 1018 | ) -> Snap: 1019 | """Perform a snap operation. 1020 | 1021 | Args: 1022 | filename: the path to a local .snap file to install 1023 | classic: whether to use classic confinement 1024 | dangerous: whether --dangerous should be passed to install snaps without a signature 1025 | 1026 | Raises: 1027 | SnapError if there is a problem encountered 1028 | """ 1029 | args = [ 1030 | "snap", 1031 | "install", 1032 | filename, 1033 | ] 1034 | if classic: 1035 | args.append("--classic") 1036 | if dangerous: 1037 | args.append("--dangerous") 1038 | try: 1039 | result = subprocess.check_output(args, universal_newlines=True).splitlines()[-1] 1040 | snap_name, _ = result.split(" ", 1) 1041 | snap_name = ansi_filter.sub("", snap_name) 1042 | 1043 | c = SnapCache() 1044 | 1045 | try: 1046 | return c[snap_name] 1047 | except SnapAPIError as e: 1048 | logger.error( 1049 | "Could not find snap {} when querying Snapd socket: {}".format(snap_name, e.body) 1050 | ) 1051 | raise SnapError("Failed to find snap {} in Snap cache".format(snap_name)) 1052 | except CalledProcessError as e: 1053 | raise SnapError("Could not install snap {}: {}".format(filename, e.output)) 1054 | 1055 | 1056 | def _system_set(config_item: str, value: str) -> None: 1057 | """Set system snapd config values. 1058 | 1059 | Args: 1060 | config_item: name of snap system setting. E.g. 'refresh.hold' 1061 | value: value to assign 1062 | """ 1063 | args = ["snap", "set", "system", "{}={}".format(config_item, value)] 1064 | try: 1065 | subprocess.check_call(args, universal_newlines=True) 1066 | except CalledProcessError: 1067 | raise SnapError("Failed setting system config '{}' to '{}'".format(config_item, value)) 1068 | 1069 | 1070 | def hold_refresh(days: int = 90, forever: bool = False) -> bool: 1071 | """Set the system-wide snap refresh hold. 1072 | 1073 | Args: 1074 | days: number of days to hold system refreshes for. Maximum 90. Set to zero to remove hold. 1075 | forever: if True, will set a hold forever. 1076 | """ 1077 | if not isinstance(forever, bool): 1078 | raise TypeError("forever must be a bool") 1079 | if not isinstance(days, int): 1080 | raise TypeError("days must be an int") 1081 | if forever: 1082 | _system_set("refresh.hold", "forever") 1083 | logger.info("Set system-wide snap refresh hold to: forever") 1084 | elif days == 0: 1085 | _system_set("refresh.hold", "") 1086 | logger.info("Removed system-wide snap refresh hold") 1087 | else: 1088 | # Currently the snap daemon can only hold for a maximum of 90 days 1089 | if not 1 <= days <= 90: 1090 | raise ValueError("days must be between 1 and 90") 1091 | # Add the number of days to current time 1092 | target_date = datetime.now(timezone.utc).astimezone() + timedelta(days=days) 1093 | # Format for the correct datetime format 1094 | hold_date = target_date.strftime("%Y-%m-%dT%H:%M:%S%z") 1095 | # Python dumps the offset in format '+0100', we need '+01:00' 1096 | hold_date = "{0}:{1}".format(hold_date[:-2], hold_date[-2:]) 1097 | # Actually set the hold date 1098 | _system_set("refresh.hold", hold_date) 1099 | logger.info("Set system-wide snap refresh hold to: %s", hold_date) 1100 | --------------------------------------------------------------------------------