├── requirements.txt ├── .gitignore ├── renovate.json ├── charmcraft.yaml ├── .github ├── workflows │ ├── cla_check.yaml │ ├── pull-request.yaml │ ├── renovate.yaml │ ├── release.yaml │ ├── build-and-test.yaml │ └── promote.yaml ├── renovate-config.js └── renovate.json ├── tests ├── unit │ └── test_charm.py └── integration │ └── test_charm.py ├── pyproject.toml ├── CONTRIBUTING.md ├── metadata.yaml ├── tox.ini ├── config.yaml ├── README.md ├── src └── charm.py ├── lib └── charms │ └── operator_libs_linux │ ├── v1 │ └── systemd.py │ └── v0 │ └── apt.py └── LICENSE /requirements.txt: -------------------------------------------------------------------------------- 1 | ops >= 1.5.0 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | venv/ 2 | build/ 3 | *.charm 4 | .tox/ 5 | .coverage 6 | __pycache__/ 7 | *.py[cod] 8 | .idea 9 | .vscode/ 10 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:recommended" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /charmcraft.yaml: -------------------------------------------------------------------------------- 1 | type: charm 2 | bases: 3 | - build-on: 4 | - name: ubuntu 5 | channel: "22.04" 6 | run-on: 7 | - name: ubuntu 8 | channel: "22.04" 9 | -------------------------------------------------------------------------------- /.github/workflows/cla_check.yaml: -------------------------------------------------------------------------------- 1 | name: CLA check 2 | 3 | on: 4 | pull_request: 5 | branches: [main] 6 | 7 | jobs: 8 | cla-check: 9 | runs-on: ubuntu-22.04 10 | steps: 11 | - name: Check if Canonical's Contributor License Agreement has been signed 12 | uses: canonical/has-signed-canonical-cla@v1 13 | -------------------------------------------------------------------------------- /.github/renovate-config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | branchPrefix: "renovate/", 3 | dryRun: null, 4 | username: "renovate-release", 5 | gitAuthor: "Renovate Bot ", 6 | onboarding: true, 7 | platform: "github", 8 | includeForks: true, 9 | repositories: ["canonical/anbox-cloud-nfs-operator"], 10 | } 11 | -------------------------------------------------------------------------------- /.github/workflows/pull-request.yaml: -------------------------------------------------------------------------------- 1 | name: Pull Request 2 | on: 3 | pull_request: 4 | paths-ignore: 5 | - ".github/renovate*" 6 | - ".github/workflows/release.yaml" 7 | - ".github/workflows/renovate.yaml" 8 | - ".github/workflows/update-libs.yaml" 9 | - ".gitignore" 10 | - ".jujuignore" 11 | push: 12 | branches: 13 | - "renovate/*" 14 | 15 | concurrency: 16 | group: ${{ github.workflow }}-${{ github.ref }} 17 | cancel-in-progress: true 18 | 19 | jobs: 20 | test: 21 | uses: ./.github/workflows/build-and-test.yaml 22 | -------------------------------------------------------------------------------- /tests/unit/test_charm.py: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Simon 2 | # See LICENSE file for licensing details. 3 | 4 | import unittest 5 | 6 | from charm import NFSOperatorCharm 7 | from ops.model import MaintenanceStatus 8 | from ops.testing import Harness 9 | 10 | 11 | class TestCharm(unittest.TestCase): 12 | def setUp(self): 13 | self.harness = Harness(NFSOperatorCharm) 14 | self.addCleanup(self.harness.cleanup) 15 | self.harness.begin() 16 | 17 | def test_install(self): 18 | # Check the charm is in MaintenanceStatus 19 | self.assertIsInstance(self.harness.model.unit.status, MaintenanceStatus) 20 | -------------------------------------------------------------------------------- /.github/workflows/renovate.yaml: -------------------------------------------------------------------------------- 1 | # workflow for checking package versions and opening PRs to bump 2 | name: Renovate 3 | on: 4 | schedule: 5 | - cron: "0 12 * * *" 6 | workflow_dispatch: 7 | workflow_call: 8 | 9 | jobs: 10 | renovate: 11 | runs-on: ubuntu-22.04 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 15 | 16 | - name: Self-hosted Renovate 17 | uses: renovatebot/github-action@a6e57359b32af9a54d5b3b6603011f50629a0a05 # v40.1.2 18 | with: 19 | configurationFile: .github/renovate-config.js 20 | token: ${{ github.token }} 21 | -------------------------------------------------------------------------------- /tests/integration/test_charm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright 2023 Canonical Ltd 3 | # See LICENSE file for licensing details. 4 | 5 | import logging 6 | from pathlib import Path 7 | 8 | import pytest 9 | import yaml 10 | from pytest_operator.plugin import OpsTest 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) 15 | APP_NAME = METADATA["name"] 16 | 17 | 18 | @pytest.mark.abort_on_fail 19 | async def test_can_build_charm(ops_test: OpsTest): 20 | """Build the charm-under-test and deploy it together with related charms. 21 | 22 | Assert on the unit status before any relations/configurations take place. 23 | """ 24 | # Build and deploy charm from local source folder 25 | charm = await ops_test.build_charm(".") 26 | assert charm 27 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | # Testing tools configuration 2 | [tool.coverage.run] 3 | branch = true 4 | 5 | [tool.coverage.report] 6 | show_missing = true 7 | 8 | [tool.pytest.ini_options] 9 | minversion = "6.0" 10 | log_cli_level = "INFO" 11 | 12 | # Formatting tools configuration 13 | [tool.black] 14 | line-length = 99 15 | 16 | # Linting tools configuration 17 | [tool.ruff] 18 | line-length = 99 19 | respect-gitignore = true 20 | target-version = "py310" 21 | exclude = ["__pycache__", "*.egg_info"] 22 | 23 | [tool.ruff.lint] 24 | select = ["E", "W", "F", "C", "N", "D", "I001"] 25 | extend-ignore = [ 26 | "D203", 27 | "D204", 28 | "D213", 29 | "D215", 30 | "D400", 31 | "D404", 32 | "D406", 33 | "D407", 34 | "D408", 35 | "D409", 36 | "D413", 37 | ] 38 | ignore = ["E501", "D107"] 39 | per-file-ignores = {"tests/*" = ["D100","D101","D102","D103","D104"]} 40 | 41 | [tool.ruff.lint.mccabe] 42 | max-complexity = 10 43 | 44 | [tool.codespell] 45 | skip = "build,lib,venv,icon.svg,.tox,.git,.mypy_cache,.ruff_cache,.vscode,.coverage" 46 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | To make contributions to this charm, you'll need a working [development setup](https://juju.is/docs/sdk/dev-setup). 4 | 5 | You can use the environments created by `tox` for development: 6 | 7 | ```shell 8 | tox --notest -e unit 9 | source .tox/unit/bin/activate 10 | ``` 11 | 12 | ## Testing 13 | 14 | This project uses `tox` for managing test environments. There are some pre-configured environments 15 | that can be used for linting and formatting code when you're preparing contributions to the charm: 16 | 17 | ```shell 18 | tox -e fmt # update your code according to linting rules 19 | tox -e lint # code style 20 | tox -e unit # unit tests 21 | tox -e integration # integration tests 22 | tox # runs 'lint' and 'unit' environments 23 | ``` 24 | 25 | ## Build the charm 26 | 27 | Build the charm in this git repository using: 28 | 29 | ```shell 30 | charmcraft pack 31 | ``` 32 | 33 | ## Canonical Contributor Agreement 34 | Canonical welcomes contributions to the Anbox Cloud NFS Operator. Please check out our [contributor agreement](https://ubuntu.com/legal/contributors) if you’re interested in contributing to the solution. 35 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release to latest/edge 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths-ignore: 8 | - ".github/renovate*" 9 | - ".github/workflows/release.yaml" 10 | - ".github/workflows/renovate.yaml" 11 | - ".github/workflows/update-libs.yaml" 12 | - ".gitignore" 13 | - "tox.ini" 14 | 15 | jobs: 16 | test: 17 | uses: ./.github/workflows/build-and-test.yaml 18 | 19 | release-to-charmhub: 20 | name: Release to CharmHub 21 | needs: 22 | - test 23 | runs-on: ubuntu-22.04 24 | steps: 25 | - name: Checkout 26 | uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 27 | with: 28 | fetch-depth: 0 29 | - name: Select charmhub channel 30 | uses: canonical/charming-actions/channel@631c2d944da2bd12430f1f3a954c8fffcf2385cd # 2.4.0 31 | id: channel 32 | - name: Upload charm to charmhub 33 | uses: canonical/charming-actions/upload-charm@631c2d944da2bd12430f1f3a954c8fffcf2385cd # 2.4.0 34 | with: 35 | credentials: "${{ secrets.CHARMHUB_TOKEN }}" 36 | github-token: "${{ secrets.GITHUB_TOKEN }}" 37 | channel: "${{ steps.channel.outputs.name }}" 38 | charmcraft-channel: "latest/stable" 39 | -------------------------------------------------------------------------------- /.github/workflows/build-and-test.yaml: -------------------------------------------------------------------------------- 1 | name: Build/Test 2 | 3 | on: 4 | workflow_call: 5 | 6 | jobs: 7 | lint: 8 | name: Lint 9 | runs-on: ubuntu-22.04 10 | steps: 11 | - name: Checkout 12 | uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 13 | - name: Install dependencies 14 | run: python3 -m pip install tox 15 | - name: Run linters 16 | run: tox -e lint 17 | 18 | unit-test: 19 | name: Unit tests 20 | runs-on: ubuntu-22.04 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 24 | - name: Install dependencies 25 | run: python -m pip install tox 26 | - name: Run tests 27 | run: tox -e unit 28 | 29 | integration-test: 30 | name: Integration tests (lxd) 31 | runs-on: ubuntu-22.04 32 | needs: 33 | - unit-test 34 | - lint 35 | steps: 36 | - name: Checkout 37 | uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 38 | - name: Setup operator environment 39 | uses: charmed-kubernetes/actions-operator@main 40 | with: 41 | provider: lxd 42 | juju-channel: 3.3/stable 43 | - name: Run integration tests 44 | run: tox -e integration 45 | -------------------------------------------------------------------------------- /metadata.yaml: -------------------------------------------------------------------------------- 1 | name: anbox-cloud-nfs 2 | display-name: Anbox Cloud NFS operator 3 | summary: Operator to provide additional NFS based storage to Anbox containers 4 | website: https://anbox-cloud.io 5 | issues: https://bugs.launchpad.net/anbox-cloud 6 | description: | 7 | The Anbox Cloud NFS operator provides additional NFS storage to Anbox containers. 8 | 9 | In various uses cases it is required to provide additional data to Anbox containers, 10 | for example for game streaming where game assets need to be provision ahead of time 11 | so the games do not have to download it on first start. 12 | 13 | This charmed operator implements this by adding an NFS mount to the machine which 14 | will provide access to remote storage. To avoid multiple network transfer of the 15 | same data, the operator will install a cache, powered by cachefilesd. 16 | subordinate: true 17 | requires: 18 | juju-info: 19 | interface: juju-info 20 | scope: container 21 | resources: 22 | amazon-efs-utils-deb: 23 | type: file 24 | filename: amazon-efs-utils.deb 25 | description: | 26 | The Debian package to install Amazon helper utils to mount an EFS volume. 27 | Please refer to the official AWS documentation 28 | [here](https://docs.aws.amazon.com/efs/latest/ug/installing-amazon-efs-utils.html#installing-other-distro) 29 | on how to obtain the package. 30 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:base", 5 | ":disableDependencyDashboard", 6 | ":automergeDigest", 7 | ":automergePatch", 8 | ":automergeMinor", 9 | ":rebaseStalePrs", 10 | ":semanticCommits", 11 | ":semanticCommitScope(deps)", 12 | "docker:pinDigests", 13 | "helpers:pinGitHubActionDigests", 14 | "regexManagers:dockerfileVersions" 15 | ], 16 | "automergeType": "branch", 17 | "packageRules": [ 18 | { 19 | "groupName": "github actions", 20 | "matchManagers": ["github-actions"], 21 | "automerge": true, 22 | "schedule": ["on monday"] 23 | }, 24 | { 25 | "groupName": "testing deps", 26 | "matchFiles": ["tox.ini"], 27 | "matchUpdateTypes": ["major", "minor", "patch", "pin", "digest"], 28 | "automerge": true, 29 | "schedule": ["on monday"] 30 | }, 31 | { 32 | "groupName": "renovate packages", 33 | "matchSourceUrlPrefixes": ["https://github.com/renovatebot/"], 34 | "matchUpdateTypes": ["major", "minor", "patch", "pin", "digest"], 35 | "automerge": true, 36 | "schedule": ["on monday"] 37 | } 38 | ], 39 | "regexManagers": [ 40 | { 41 | "fileMatch": ["tox.ini"], 42 | "matchStrings": [ 43 | "# renovate: datasource=(?\\S+)\n\\s+(?.*?)==(?.*?)\\n" 44 | ] 45 | } 46 | ] 47 | } 48 | -------------------------------------------------------------------------------- /.github/workflows/promote.yaml: -------------------------------------------------------------------------------- 1 | name: Promote Charm 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | promotion: 7 | type: choice 8 | description: Channel to promote from 9 | options: 10 | - edge -> candidate 11 | - candidate -> stable 12 | 13 | jobs: 14 | promote: 15 | name: Promote Charm 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout 19 | uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 20 | - name: Set target channel 21 | env: 22 | PROMOTE_FROM: ${{ github.event.inputs.promotion }} 23 | run: | 24 | if [ "${PROMOTE_FROM}" == "edge -> candidate" ]; then 25 | echo "promote-from=edge" >> ${GITHUB_ENV} 26 | echo "promote-to=candidate" >> ${GITHUB_ENV} 27 | elif [ "${PROMOTE_FROM}" == "candidate -> stable" ]; then 28 | echo "promote-from=candidate" >> ${GITHUB_ENV} 29 | echo "promote-to=stable" >> ${GITHUB_ENV} 30 | fi 31 | - name: Promote Charm 32 | uses: canonical/charming-actions/release-charm@2.4.0 33 | with: 34 | credentials: ${{ secrets.CHARMHUB_TOKEN }} 35 | github-token: ${{ secrets.GITHUB_TOKEN }} 36 | destination-channel: latest/${{ env.promote-to }} 37 | origin-channel: latest/${{ env.promote-from }} 38 | charmcraft-channel: latest/stable 39 | base-channel: "22.04" 40 | 41 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # Copyright 2023 Simon 2 | # See LICENSE file for licensing details. 3 | 4 | [tox] 5 | skipsdist=True 6 | skip_missing_interpreters = True 7 | envlist = fmt, lint, unit 8 | 9 | [vars] 10 | src_path = {toxinidir}/src/ 11 | tst_path = {toxinidir}/tests/ 12 | ;lib_path = {toxinidir}/lib/charms/operator_name_with_underscores 13 | all_path = {[vars]src_path} {[vars]tst_path} 14 | 15 | [testenv] 16 | setenv = 17 | PYTHONPATH = {toxinidir}:{toxinidir}/lib:{[vars]src_path} 18 | PYTHONBREAKPOINT=pdb.set_trace 19 | PY_COLORS=1 20 | passenv = 21 | PYTHONPATH 22 | CHARM_BUILD_DIR 23 | MODEL_SETTINGS 24 | 25 | [testenv:fmt] 26 | description = Apply coding style standards to code 27 | deps = 28 | black 29 | ruff 30 | commands = 31 | black {[vars]all_path} 32 | ruff --fix {[vars]all_path} 33 | 34 | [testenv:lint] 35 | description = Check code against coding style standards 36 | deps = 37 | black 38 | ruff 39 | codespell 40 | commands = 41 | # uncomment the following line if this charm owns a lib 42 | # codespell {[vars]lib_path} 43 | codespell {toxinidir} \ 44 | --skip {toxinidir}/.git \ 45 | --skip {toxinidir}/.tox \ 46 | --skip {toxinidir}/build \ 47 | --skip {toxinidir}/lib \ 48 | --skip {toxinidir}/venv \ 49 | --skip {toxinidir}/.mypy_cache \ 50 | --skip {toxinidir}/icon.svg 51 | 52 | ruff {[vars]all_path} 53 | black --check --diff {[vars]all_path} 54 | 55 | [testenv:unit] 56 | description = Run unit tests 57 | deps = 58 | pytest 59 | coverage[toml] 60 | -r{toxinidir}/requirements.txt 61 | commands = 62 | coverage run --source={[vars]src_path} \ 63 | -m pytest \ 64 | --ignore={[vars]tst_path}integration \ 65 | --tb native \ 66 | -v \ 67 | -s \ 68 | {posargs} 69 | coverage report 70 | 71 | [testenv:integration] 72 | description = Run integration tests 73 | deps = 74 | pytest 75 | juju 76 | pytest-operator 77 | -r{toxinidir}/requirements.txt 78 | commands = 79 | pytest -v \ 80 | -s \ 81 | --tb native \ 82 | --ignore={[vars]tst_path}unit \ 83 | --log-cli-level=INFO \ 84 | {posargs} 85 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | options: 2 | mount_type: 3 | description: | 4 | The type of mount to use while mounting an nfs volume. There are two mount 5 | types allowed: nfs, efs. 6 | 7 | The `efs` mount type is specific to AWS EFS mount volumes. The EFS mount 8 | can work with TLS enabled. The security groups need to be configured for 9 | this to work properly. More information about setting up the security groups 10 | can be found here: 11 | https://docs.aws.amazon.com/efs/latest/ug/accessing-fs-create-security-groups.html 12 | 13 | The `nfs` mount type is generic nfs mount which can be used. 14 | 15 | If no mount type is provided `nfs` is used as the default mount type. 16 | default: "nfs" 17 | type: string 18 | nfs_path: 19 | description: | 20 | Remote NFS storage path to use for the local mount the operator will 21 | provision. A typical path has the following format: : 22 | 23 | If the mount_type is `efs`, the `` can be the EFS Filesystem id of 24 | the created efs volume on AWS. 25 | If left empty, no mount will be added to the machine. 26 | default: "" 27 | type: string 28 | nfs_extra_options: 29 | description: | 30 | Extra options for the NFS mount. To enable TLS for the `efs` mount type, 31 | a `tls` option can be passed. 32 | default: "" 33 | type: string 34 | cachefilesd_brun: 35 | description: | 36 | Given in percentage of blocks available in the underlying filesystem. 37 | 38 | If the amount of free space and the number of available files in the 39 | cache rises above both this limit, then culling is turned off. 40 | default: 10 41 | type: int 42 | cachefilesd_bcull: 43 | description: | 44 | Given in percentage of blocks available in the underlying filesystem. 45 | 46 | If the amount of available space or the number of available files in 47 | the cache falls below either of these limits, then culling is started. 48 | default: 7 49 | type: int 50 | cachefilesd_bstop: 51 | description: | 52 | Given in percentage of blocks available in the underlying filesystem. 53 | 54 | If the amount of available space or the number of available files in 55 | the cache falls below either of these limits, then no further 56 | allocation of disk space or files is permitted until culling has 57 | raised things above these limits again. 58 | default: 3 59 | type: int 60 | cachefilesd_frun: 61 | description: | 62 | Given in percentage of files available in the underlying filesystem. 63 | 64 | If the amount of free space and the number of available files in the 65 | cache rises above both this limit, then culling is turned off. 66 | default: 10 67 | type: int 68 | cachefilesd_fcull: 69 | description: | 70 | Given in percentage of files available in the underlying filesystem. 71 | 72 | If the amount of available space or the number of available files in 73 | the cache falls below either of these limits, then culling is started. 74 | default: 7 75 | type: int 76 | cachefilesd_fstop: 77 | description: | 78 | Given in percentage of files available in the underlying filesystem. 79 | 80 | If the amount of available space or the number of available files in 81 | the cache falls below either of these limits, then no further 82 | allocation of disk space or files is permitted until culling has 83 | raised things above these limits again. 84 | default: 3 85 | type: int 86 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Anbox Cloud NFS operator 2 | 3 | ## Description 4 | 5 | The Anbox Cloud NFS operator charm allows providing additional storage to LXD 6 | nodes via NFS. This is intended to be used to provide additional shared data 7 | to Anbox containers, e.g. game assets. 8 | 9 | Current features: 10 | 11 | * Supports two modes to mount an NFS path, namely `nfs` and `efs` 12 | * TLS support with EFS 13 | 14 | ## Usage 15 | 16 | ### Basic Usage 17 | 18 | This operator is a subordinate charm and attaches itself to a principal charm. 19 | To deploy the charm using its default configuration: 20 | 21 | ```shell 22 | juju deploy anbox-cloud-nfs 23 | ``` 24 | 25 | To mount a basic network path use the following config: 26 | 27 | ```yaml 28 | applications: 29 | nfs-op: 30 | charm: anbox-cloud-nfs 31 | channel: stable 32 | options: 33 | mount_type: nfs 34 | nfs_path: :/ 35 | ``` 36 | 37 | ### Use EFS filesystem on AWS without TLS 38 | 39 | > Note: This feature has been introduced recently. So if you are 40 | > running an older version of the charm, please upgrade the charm using 41 | > `juju refresh anbox-cloud-nfs --channel latest/stable`. 42 | 43 | Using this feature requires the user to provide the charm with a debian package 44 | named [aws-efs-utils](https://docs.aws.amazon.com/efs/latest/ug/installing-amazon-efs-utils.html#installing-other-distro) 45 | 46 | ```shell 47 | juju deploy anbox-cloud-nfs --resource amazon-efs-utils-deb= 48 | ``` 49 | 50 | To mount an EFS filesystem on the machine using this charm, the `mount_type` 51 | should be set to `efs`. 52 | 53 | From shell: 54 | ```shell 55 | juju config anbox-cloud-nfs mount_type=efs 56 | ``` 57 | 58 | In a bundle: 59 | ```yaml 60 | applications: 61 | nfs-op: 62 | charm: anbox-cloud-nfs 63 | channel: latest/stable 64 | options: 65 | mount_type: efs 66 | nfs_path: :/ 67 | ``` 68 | 69 | ### Using EFS with TLS 70 | 71 | To use the EFS mount with TLS, the config option for `nfs_extra_options` must be 72 | set to `tls`. 73 | 74 | ```shell 75 | juju config anbox-cloud-nfs nfs_extra_options=tls 76 | ``` 77 | 78 | > Note: While setting up EFS mounts please make sure the security groups are 79 | > correctly setup for the EFS volume. For information on setting the security 80 | > groups, please follow [this](https://docs.aws.amazon.com/efs/latest/ug/accessing-fs-create-security-groups.html) 81 | > guide. 82 | 83 | ## Integrations (Relations) 84 | 85 | Supported [relations](https://juju.is/docs/olm/relations): 86 | 87 | #### `juju-info` interface: 88 | 89 | The NFS Operator supports a `juju-info` interface to allow clients to connect 90 | to the subordinate charm. 91 | 92 | ```yaml 93 | provides: 94 | juju-info: 95 | interface: juju-info 96 | ``` 97 | 98 | juju v2.x: 99 | 100 | ```shell 101 | juju relate anbox-cloud-nfs application 102 | ``` 103 | 104 | juju v3.x 105 | 106 | ```shell 107 | juju integrate anbox-cloud-nfs application 108 | ``` 109 | 110 | To remove a relation: 111 | 112 | ```shell 113 | juju remove-relation anbox-cloud-nfs application 114 | ``` 115 | 116 | ## Security 117 | Security issues in the Operator can be reported through [LaunchPad](https://wiki.ubuntu.com/DebuggingSecurity#How%20to%20File) on the [Anbox Cloud](https://bugs.launchpad.net/anbox-cloud) project. Please do not file GitHub issues about security issues. 118 | 119 | ## Contributing 120 | Please see the [Juju SDK docs](https://juju.is/docs/sdk) for guidelines on enhancements to this charm following best practice guidelines, and [CONTRIBUTING.md](https://github.com/canonical/anbox-cloud-nfs-operator/blob/main/CONTRIBUTING.md) for developer guidance. 121 | 122 | ## License 123 | The Charmed Operator is distributed under the Apache Software License, version 2.0. 124 | -------------------------------------------------------------------------------- /src/charm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """NFS Operator charm for Anbox Cloud.""" 3 | # Copyright 2023 Canonical Ltd 4 | # See LICENSE file for licensing details. 5 | 6 | import logging 7 | import os 8 | import subprocess 9 | 10 | import charms.operator_libs_linux.v0.apt as apt 11 | import charms.operator_libs_linux.v1.systemd as systemd 12 | from ops.charm import CharmBase 13 | from ops.framework import StoredState 14 | from ops.main import main 15 | from ops.model import ActiveStatus, BlockedStatus 16 | 17 | logger = logging.getLogger(__name__) 18 | 19 | NFS_BASE_UNIT_NAME = r"media-anbox\x2ddata" 20 | NFS_MOUNT_UNIT_NAME = f"{NFS_BASE_UNIT_NAME}.mount" 21 | REQUIRED_APT_PACKAGES = ["cachefilesd", "nfs-common"] 22 | MOUNT_TARGET_PATH = "/media/anbox-data" 23 | ALLOWED_MOUNT_TYPES = ("efs", "nfs") 24 | 25 | 26 | class NFSOperatorCharm(CharmBase): 27 | """NFS Operator charm for Anbox Cloud.""" 28 | 29 | state = StoredState() 30 | 31 | def __init__(self, *args): 32 | super().__init__(*args) 33 | self.framework.observe(self.on.config_changed, self._on_config_changed) 34 | self.framework.observe(self.on.install, self._on_install) 35 | self.framework.observe(self.on.stop, self._on_stop) 36 | 37 | self.state.set_default( 38 | nfs_path=None, 39 | nfs_extra_options=None, 40 | ) 41 | 42 | def _get_nfs_path(self): 43 | return self.model.config["nfs_path"] or None 44 | 45 | def _on_install(self, event): 46 | self._install_dependencies() 47 | self._setup_cachefilesd() 48 | 49 | extra_opts = self.model.config["nfs_extra_options"] 50 | self._render_mount_unit(self._get_nfs_path(), MOUNT_TARGET_PATH, extra_opts) 51 | self.unit.status = ActiveStatus() 52 | 53 | def _on_stop(self, event): 54 | apt.remove_package(REQUIRED_APT_PACKAGES) 55 | 56 | def _on_config_changed(self, event): 57 | self._setup_cachefilesd() 58 | extra_opts = self.model.config["nfs_extra_options"] 59 | self._render_mount_unit(self._get_nfs_path(), MOUNT_TARGET_PATH, extra_opts) 60 | 61 | def _install_dependencies(self): 62 | if self.config["mount_type"] not in ALLOWED_MOUNT_TYPES: 63 | raise ValueError("Invalid value for mount_type") 64 | 65 | apt.update() 66 | apt.add_package(REQUIRED_APT_PACKAGES) 67 | if self.config["mount_type"] == "efs": 68 | self._install_aws_efs() 69 | 70 | def _install_aws_efs(self): 71 | res_path = None 72 | try: 73 | res_path = self.model.resources.fetch("amazon-efs-utils-deb") 74 | except NameError: 75 | self.unit.status = BlockedStatus( 76 | "Cannot find resource to install `amazon-efs-utils` package" 77 | ) 78 | raise 79 | cmd = ["sudo", "apt", "install", "-y", res_path] 80 | try: 81 | subprocess.run(cmd, check=True) 82 | except subprocess.CalledProcessError as e: 83 | logging.error(f"failed to install efs helper package: {e}") 84 | raise 85 | 86 | def _setup_cachefilesd(self): 87 | brun = self.model.config["cachefilesd_brun"] or 10 88 | bcull = self.model.config["cachefilesd_bcull"] or 7 89 | bstop = self.model.config["cachefilesd_bstop"] or 3 90 | 91 | if not brun > bcull and not bcull > bstop: 92 | raise Exception("Invalid cachefilesd configuration") 93 | 94 | frun = self.model.config["cachefilesd_frun"] or 10 95 | fcull = self.model.config["cachefilesd_fcull"] or 7 96 | fstop = self.model.config["cachefilesd_fstop"] or 3 97 | 98 | if not frun > fcull and not fcull > fstop: 99 | raise Exception("Invalid cachefilesd configuration") 100 | 101 | defaults = """# DO NOT MODIFY - This file is managed by the Anbox Cloud NFS operator charm 102 | RUN=yes 103 | DAEMON_OPTS= 104 | """ 105 | self._write_content("/etc/default/cachefilesd", defaults) 106 | 107 | config = f"""# DO NOT MODIFY - This file is managed by the Anbox Cloud NFS operator charm 108 | dir /var/cache/fscache 109 | tag anbox-cloud 110 | brun {brun}% 111 | bcull {bcull}% 112 | bstop {bstop}% 113 | frun {frun}% 114 | fcull {fcull}% 115 | fstop {fstop}% 116 | """ 117 | self._write_content("/etc/cachefilesd.conf", config) 118 | 119 | systemd.service_restart("cachefilesd") 120 | 121 | def _write_content(self, path, content): 122 | if os.path.exists(path): 123 | os.remove(path) 124 | with open(os.open(path, os.O_CREAT | os.O_WRONLY, 0o644), "w+") as f: 125 | f.write(content) 126 | 127 | def _get_unit_path(self, name): 128 | return f"/etc/systemd/system/{name}" 129 | 130 | def _remove_mount_unit(self, name): 131 | unit_path = self._get_unit_path(name) 132 | if os.path.exists(unit_path): 133 | systemd.serivce_stop(NFS_MOUNT_UNIT_NAME) 134 | os.remove(unit_path) 135 | 136 | def _render_mount_unit(self, path, target_path, extra_opts=None): 137 | mount_type = self.config["mount_type"] 138 | if self.state.nfs_path == path and self.state.nfs_extra_options == extra_opts: 139 | return 140 | 141 | if not path: 142 | self._remove_mount_unit(NFS_MOUNT_UNIT_NAME) 143 | return 144 | 145 | if len(target_path) == 0: 146 | raise Exception("No target path for NFS mount given") 147 | 148 | unit_path = self._get_unit_path(NFS_MOUNT_UNIT_NAME) 149 | if os.path.exists(unit_path): 150 | systemd.service_stop(NFS_MOUNT_UNIT_NAME) 151 | 152 | mount_opts = "soft,async,fsc" 153 | 154 | if extra_opts and len(extra_opts) > 0: 155 | mount_opts += f",{extra_opts}" 156 | 157 | content = f"""# DO NOT MODIFY - This file is managed by the Anbox Cloud NFS operator charm 158 | [Unit] 159 | Description=NFS mount for {path} 160 | After=network-online.target 161 | Wants=network-online.target 162 | 163 | [Mount] 164 | Type={mount_type} 165 | What={path} 166 | Where={target_path} 167 | Options={mount_opts} 168 | 169 | [Install] 170 | WantedBy=multi-user.target 171 | """ 172 | 173 | self._write_content(unit_path, content) 174 | 175 | systemd.daemon_reload() 176 | systemd.service_resume(NFS_MOUNT_UNIT_NAME) 177 | systemd.service_start(NFS_MOUNT_UNIT_NAME) 178 | 179 | self.state.nfs_path = path 180 | self.state.nfs_extra_options = extra_opts 181 | 182 | 183 | if __name__ == "__main__": # pragma: nocover 184 | main(NFSOperatorCharm) 185 | -------------------------------------------------------------------------------- /lib/charms/operator_libs_linux/v1/systemd.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Canonical Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | 16 | """Abstractions for stopping, starting and managing system services via systemd. 17 | 18 | This library assumes that your charm is running on a platform that uses systemd. E.g., 19 | Centos 7 or later, Ubuntu Xenial (16.04) or later. 20 | 21 | For the most part, we transparently provide an interface to a commonly used selection of 22 | systemd commands, with a few shortcuts baked in. For example, service_pause and 23 | service_resume with run the mask/unmask and enable/disable invocations. 24 | 25 | Example usage: 26 | ```python 27 | from charms.operator_libs_linux.v0.systemd import service_running, service_reload 28 | 29 | # Start a service 30 | if not service_running("mysql"): 31 | success = service_start("mysql") 32 | 33 | # Attempt to reload a service, restarting if necessary 34 | success = service_reload("nginx", restart_on_failure=True) 35 | ``` 36 | 37 | """ 38 | 39 | import logging 40 | import subprocess 41 | 42 | __all__ = [ # Don't export `_systemctl`. (It's not the intended way of using this lib.) 43 | "service_pause", 44 | "service_reload", 45 | "service_restart", 46 | "service_resume", 47 | "service_running", 48 | "service_start", 49 | "service_stop", 50 | "daemon_reload", 51 | ] 52 | 53 | logger = logging.getLogger(__name__) 54 | 55 | # The unique Charmhub library identifier, never change it 56 | LIBID = "045b0d179f6b4514a8bb9b48aee9ebaf" 57 | 58 | # Increment this major API version when introducing breaking changes 59 | LIBAPI = 1 60 | 61 | # Increment this PATCH version before using `charmcraft publish-lib` or reset 62 | # to 0 if you are raising the major API version 63 | LIBPATCH = 0 64 | 65 | 66 | class SystemdError(Exception): 67 | pass 68 | 69 | 70 | def _popen_kwargs(): 71 | return dict( 72 | stdout=subprocess.PIPE, 73 | stderr=subprocess.STDOUT, 74 | bufsize=1, 75 | universal_newlines=True, 76 | encoding="utf-8", 77 | ) 78 | 79 | 80 | def _systemctl( 81 | sub_cmd: str, service_name: str = None, now: bool = None, quiet: bool = None 82 | ) -> bool: 83 | """Control a system service. 84 | 85 | Args: 86 | sub_cmd: the systemctl subcommand to issue 87 | service_name: the name of the service to perform the action on 88 | now: passes the --now flag to the shell invocation. 89 | quiet: passes the --quiet flag to the shell invocation. 90 | """ 91 | cmd = ["systemctl", sub_cmd] 92 | 93 | if service_name is not None: 94 | cmd.append(service_name) 95 | if now is not None: 96 | cmd.append("--now") 97 | if quiet is not None: 98 | cmd.append("--quiet") 99 | if sub_cmd != "is-active": 100 | logger.debug("Attempting to {} '{}' with command {}.".format(cmd, service_name, cmd)) 101 | else: 102 | logger.debug("Checking if '{}' is active".format(service_name)) 103 | 104 | proc = subprocess.Popen(cmd, **_popen_kwargs()) 105 | last_line = "" 106 | for line in iter(proc.stdout.readline, ""): 107 | last_line = line 108 | logger.debug(line) 109 | 110 | proc.wait() 111 | 112 | if sub_cmd == "is-active": 113 | # If we are just checking whether a service is running, return True/False, rather 114 | # than raising an error. 115 | if proc.returncode < 1: 116 | return True 117 | if proc.returncode == 3: # Code returned when service is not active. 118 | return False 119 | 120 | if proc.returncode < 1: 121 | return True 122 | 123 | raise SystemdError( 124 | "Could not {}{}: systemd output: {}".format( 125 | sub_cmd, " {}".format(service_name) if service_name else "", last_line 126 | ) 127 | ) 128 | 129 | 130 | def service_running(service_name: str) -> bool: 131 | """Determine whether a system service is running. 132 | 133 | Args: 134 | service_name: the name of the service 135 | """ 136 | return _systemctl("is-active", service_name, quiet=True) 137 | 138 | 139 | def service_start(service_name: str) -> bool: 140 | """Start a system service. 141 | 142 | Args: 143 | service_name: the name of the service to stop 144 | """ 145 | return _systemctl("start", service_name) 146 | 147 | 148 | def service_stop(service_name: str) -> bool: 149 | """Stop a system service. 150 | 151 | Args: 152 | service_name: the name of the service to stop 153 | """ 154 | return _systemctl("stop", service_name) 155 | 156 | 157 | def service_restart(service_name: str) -> bool: 158 | """Restart a system service. 159 | 160 | Args: 161 | service_name: the name of the service to restart 162 | """ 163 | return _systemctl("restart", service_name) 164 | 165 | 166 | def service_reload(service_name: str, restart_on_failure: bool = False) -> bool: 167 | """Reload a system service, optionally falling back to restart if reload fails. 168 | 169 | Args: 170 | service_name: the name of the service to reload 171 | restart_on_failure: boolean indicating whether to fallback to a restart if the 172 | reload fails. 173 | """ 174 | try: 175 | return _systemctl("reload", service_name) 176 | except SystemdError: 177 | if restart_on_failure: 178 | return _systemctl("restart", service_name) 179 | else: 180 | raise 181 | 182 | 183 | def service_pause(service_name: str) -> bool: 184 | """Pause a system service. 185 | 186 | Stop it, and prevent it from starting again at boot. 187 | 188 | Args: 189 | service_name: the name of the service to pause 190 | """ 191 | _systemctl("disable", service_name, now=True) 192 | _systemctl("mask", service_name) 193 | 194 | if not service_running(service_name): 195 | return True 196 | 197 | raise SystemdError("Attempted to pause '{}', but it is still running.".format(service_name)) 198 | 199 | 200 | def service_resume(service_name: str) -> bool: 201 | """Resume a system service. 202 | 203 | Re-enable starting again at boot. Start the service. 204 | 205 | Args: 206 | service_name: the name of the service to resume 207 | """ 208 | _systemctl("unmask", service_name) 209 | _systemctl("enable", service_name, now=True) 210 | 211 | if service_running(service_name): 212 | return True 213 | 214 | raise SystemdError("Attempted to resume '{}', but it is not running.".format(service_name)) 215 | 216 | 217 | def daemon_reload() -> bool: 218 | """Reload systemd manager configuration.""" 219 | return _systemctl("daemon-reload") 220 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2023 Simon 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /lib/charms/operator_libs_linux/v0/apt.py: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Canonical Ltd. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """Abstractions for the system's Debian/Ubuntu package information and repositories. 16 | 17 | This module contains abstractions and wrappers around Debian/Ubuntu-style repositories and 18 | packages, in order to easily provide an idiomatic and Pythonic mechanism for adding packages and/or 19 | repositories to systems for use in machine charms. 20 | 21 | A sane default configuration is attainable through nothing more than instantiation of the 22 | appropriate classes. `DebianPackage` objects provide information about the architecture, version, 23 | name, and status of a package. 24 | 25 | `DebianPackage` will try to look up a package either from `dpkg -L` or from `apt-cache` when 26 | provided with a string indicating the package name. If it cannot be located, `PackageNotFoundError` 27 | will be returned, as `apt` and `dpkg` otherwise return `100` for all errors, and a meaningful error 28 | message if the package is not known is desirable. 29 | 30 | To install packages with convenience methods: 31 | 32 | ```python 33 | try: 34 | # Run `apt-get update` 35 | apt.update() 36 | apt.add_package("zsh") 37 | apt.add_package(["vim", "htop", "wget"]) 38 | except PackageNotFoundError: 39 | logger.error("a specified package not found in package cache or on system") 40 | except PackageError as e: 41 | logger.error("could not install package. Reason: %s", e.message) 42 | ```` 43 | 44 | To find details of a specific package: 45 | 46 | ```python 47 | try: 48 | vim = apt.DebianPackage.from_system("vim") 49 | 50 | # To find from the apt cache only 51 | # apt.DebianPackage.from_apt_cache("vim") 52 | 53 | # To find from installed packages only 54 | # apt.DebianPackage.from_installed_package("vim") 55 | 56 | vim.ensure(PackageState.Latest) 57 | logger.info("updated vim to version: %s", vim.fullversion) 58 | except PackageNotFoundError: 59 | logger.error("a specified package not found in package cache or on system") 60 | except PackageError as e: 61 | logger.error("could not install package. Reason: %s", e.message) 62 | ``` 63 | 64 | 65 | `RepositoryMapping` will return a dict-like object containing enabled system repositories 66 | and their properties (available groups, baseuri. gpg key). This class can add, disable, or 67 | manipulate repositories. Items can be retrieved as `DebianRepository` objects. 68 | 69 | In order add a new repository with explicit details for fields, a new `DebianRepository` can 70 | be added to `RepositoryMapping` 71 | 72 | `RepositoryMapping` provides an abstraction around the existing repositories on the system, 73 | and can be accessed and iterated over like any `Mapping` object, to retrieve values by key, 74 | iterate, or perform other operations. 75 | 76 | Keys are constructed as `{repo_type}-{}-{release}` in order to uniquely identify a repository. 77 | 78 | Repositories can be added with explicit values through a Python constructor. 79 | 80 | Example: 81 | 82 | ```python 83 | repositories = apt.RepositoryMapping() 84 | 85 | if "deb-example.com-focal" not in repositories: 86 | repositories.add(DebianRepository(enabled=True, repotype="deb", 87 | uri="https://example.com", release="focal", groups=["universe"])) 88 | ``` 89 | 90 | Alternatively, any valid `sources.list` line may be used to construct a new 91 | `DebianRepository`. 92 | 93 | Example: 94 | 95 | ```python 96 | repositories = apt.RepositoryMapping() 97 | 98 | if "deb-us.archive.ubuntu.com-xenial" not in repositories: 99 | line = "deb http://us.archive.ubuntu.com/ubuntu xenial main restricted" 100 | repo = DebianRepository.from_repo_line(line) 101 | repositories.add(repo) 102 | ``` 103 | """ 104 | 105 | import fileinput 106 | import glob 107 | import logging 108 | import os 109 | import re 110 | import subprocess 111 | from collections.abc import Mapping 112 | from enum import Enum 113 | from subprocess import PIPE, CalledProcessError, check_call, check_output 114 | from typing import Iterable, List, Optional, Tuple, Union 115 | from urllib.parse import urlparse 116 | 117 | logger = logging.getLogger(__name__) 118 | 119 | # The unique Charmhub library identifier, never change it 120 | LIBID = "7c3dbc9c2ad44a47bd6fcb25caa270e5" 121 | 122 | # Increment this major API version when introducing breaking changes 123 | LIBAPI = 0 124 | 125 | # Increment this PATCH version before using `charmcraft publish-lib` or reset 126 | # to 0 if you are raising the major API version 127 | LIBPATCH = 9 128 | 129 | 130 | VALID_SOURCE_TYPES = ("deb", "deb-src") 131 | OPTIONS_MATCHER = re.compile(r"\[.*?\]") 132 | 133 | 134 | class Error(Exception): 135 | """Base class of most errors raised by this library.""" 136 | 137 | def __repr__(self): 138 | """String representation of Error.""" 139 | return "<{}.{} {}>".format(type(self).__module__, type(self).__name__, self.args) 140 | 141 | @property 142 | def name(self): 143 | """Return a string representation of the model plus class.""" 144 | return "<{}.{}>".format(type(self).__module__, type(self).__name__) 145 | 146 | @property 147 | def message(self): 148 | """Return the message passed as an argument.""" 149 | return self.args[0] 150 | 151 | 152 | class PackageError(Error): 153 | """Raised when there's an error installing or removing a package.""" 154 | 155 | 156 | class PackageNotFoundError(Error): 157 | """Raised when a requested package is not known to the system.""" 158 | 159 | 160 | class PackageState(Enum): 161 | """A class to represent possible package states.""" 162 | 163 | Present = "present" 164 | Absent = "absent" 165 | Latest = "latest" 166 | Available = "available" 167 | 168 | 169 | class DebianPackage: 170 | """Represents a traditional Debian package and its utility functions. 171 | 172 | `DebianPackage` wraps information and functionality around a known package, whether installed 173 | or available. The version, epoch, name, and architecture can be easily queried and compared 174 | against other `DebianPackage` objects to determine the latest version or to install a specific 175 | version. 176 | 177 | The representation of this object as a string mimics the output from `dpkg` for familiarity. 178 | 179 | Installation and removal of packages is handled through the `state` property or `ensure` 180 | method, with the following options: 181 | 182 | apt.PackageState.Absent 183 | apt.PackageState.Available 184 | apt.PackageState.Present 185 | apt.PackageState.Latest 186 | 187 | When `DebianPackage` is initialized, the state of a given `DebianPackage` object will be set to 188 | `Available`, `Present`, or `Latest`, with `Absent` implemented as a convenience for removal 189 | (though it operates essentially the same as `Available`). 190 | """ 191 | 192 | def __init__( 193 | self, name: str, version: str, epoch: str, arch: str, state: PackageState 194 | ) -> None: 195 | self._name = name 196 | self._arch = arch 197 | self._state = state 198 | self._version = Version(version, epoch) 199 | 200 | def __eq__(self, other) -> bool: 201 | """Equality for comparison. 202 | 203 | Args: 204 | other: a `DebianPackage` object for comparison 205 | 206 | Returns: 207 | A boolean reflecting equality 208 | """ 209 | return isinstance(other, self.__class__) and ( 210 | self._name, 211 | self._version.number, 212 | ) == (other._name, other._version.number) 213 | 214 | def __hash__(self): 215 | """A basic hash so this class can be used in Mappings and dicts.""" 216 | return hash((self._name, self._version.number)) 217 | 218 | def __repr__(self): 219 | """A representation of the package.""" 220 | return "<{}.{}: {}>".format(self.__module__, self.__class__.__name__, self.__dict__) 221 | 222 | def __str__(self): 223 | """A human-readable representation of the package.""" 224 | return "<{}: {}-{}.{} -- {}>".format( 225 | self.__class__.__name__, 226 | self._name, 227 | self._version, 228 | self._arch, 229 | str(self._state), 230 | ) 231 | 232 | @staticmethod 233 | def _apt( 234 | command: str, 235 | package_names: Union[str, List], 236 | optargs: Optional[List[str]] = None, 237 | ) -> None: 238 | """Wrap package management commands for Debian/Ubuntu systems. 239 | 240 | Args: 241 | command: the command given to `apt-get` 242 | package_names: a package name or list of package names to operate on 243 | optargs: an (Optional) list of additioanl arguments 244 | 245 | Raises: 246 | PackageError if an error is encountered 247 | """ 248 | optargs = optargs if optargs is not None else [] 249 | if isinstance(package_names, str): 250 | package_names = [package_names] 251 | _cmd = ["apt-get", "-y", *optargs, command, *package_names] 252 | try: 253 | env = os.environ.copy() 254 | env["DEBIAN_FRONTEND"] = "noninteractive" 255 | check_call(_cmd, env=env, stderr=PIPE, stdout=PIPE) 256 | except CalledProcessError as e: 257 | raise PackageError( 258 | "Could not {} package(s) [{}]: {}".format(command, [*package_names], e.output) 259 | ) from None 260 | 261 | def _add(self) -> None: 262 | """Add a package to the system.""" 263 | self._apt( 264 | "install", 265 | "{}={}".format(self.name, self.version), 266 | optargs=["--option=Dpkg::Options::=--force-confold"], 267 | ) 268 | 269 | def _remove(self) -> None: 270 | """Removes a package from the system. Implementation-specific.""" 271 | return self._apt("remove", "{}={}".format(self.name, self.version)) 272 | 273 | @property 274 | def name(self) -> str: 275 | """Returns the name of the package.""" 276 | return self._name 277 | 278 | def ensure(self, state: PackageState): 279 | """Ensures that a package is in a given state. 280 | 281 | Args: 282 | state: a `PackageState` to reconcile the package to 283 | 284 | Raises: 285 | PackageError from the underlying call to apt 286 | """ 287 | if self._state is not state: 288 | if state not in (PackageState.Present, PackageState.Latest): 289 | self._remove() 290 | else: 291 | self._add() 292 | self._state = state 293 | 294 | @property 295 | def present(self) -> bool: 296 | """Returns whether or not a package is present.""" 297 | return self._state in (PackageState.Present, PackageState.Latest) 298 | 299 | @property 300 | def latest(self) -> bool: 301 | """Returns whether the package is the most recent version.""" 302 | return self._state is PackageState.Latest 303 | 304 | @property 305 | def state(self) -> PackageState: 306 | """Returns the current package state.""" 307 | return self._state 308 | 309 | @state.setter 310 | def state(self, state: PackageState) -> None: 311 | """Sets the package state to a given value. 312 | 313 | Args: 314 | state: a `PackageState` to reconcile the package to 315 | 316 | Raises: 317 | PackageError from the underlying call to apt 318 | """ 319 | if state in (PackageState.Latest, PackageState.Present): 320 | self._add() 321 | else: 322 | self._remove() 323 | self._state = state 324 | 325 | @property 326 | def version(self) -> "Version": 327 | """Returns the version for a package.""" 328 | return self._version 329 | 330 | @property 331 | def epoch(self) -> str: 332 | """Returns the epoch for a package. May be unset.""" 333 | return self._version.epoch 334 | 335 | @property 336 | def arch(self) -> str: 337 | """Returns the architecture for a package.""" 338 | return self._arch 339 | 340 | @property 341 | def fullversion(self) -> str: 342 | """Returns the name+epoch for a package.""" 343 | return "{}.{}".format(self._version, self._arch) 344 | 345 | @staticmethod 346 | def _get_epoch_from_version(version: str) -> Tuple[str, str]: 347 | """Pull the epoch, if any, out of a version string.""" 348 | epoch_matcher = re.compile(r"^((?P\d+):)?(?P.*)") 349 | matches = epoch_matcher.search(version).groupdict() 350 | return matches.get("epoch", ""), matches.get("version") 351 | 352 | @classmethod 353 | def from_system( 354 | cls, package: str, version: Optional[str] = "", arch: Optional[str] = "" 355 | ) -> "DebianPackage": 356 | """Locates a package, either on the system or known to apt, and serializes the information. 357 | 358 | Args: 359 | package: a string representing the package 360 | version: an optional string if a specific version is requested 361 | arch: an optional architecture, defaulting to `dpkg --print-architecture`. If an 362 | architecture is not specified, this will be used for selection. 363 | 364 | """ 365 | try: 366 | return DebianPackage.from_installed_package(package, version, arch) 367 | except PackageNotFoundError: 368 | logger.debug( 369 | "package '%s' is not currently installed or has the wrong architecture.", package 370 | ) 371 | 372 | # Ok, try `apt-cache ...` 373 | try: 374 | return DebianPackage.from_apt_cache(package, version, arch) 375 | except (PackageNotFoundError, PackageError): 376 | # If we get here, it's not known to the systems. 377 | # This seems unnecessary, but virtually all `apt` commands have a return code of `100`, 378 | # and providing meaningful error messages without this is ugly. 379 | raise PackageNotFoundError( 380 | "Package '{}{}' could not be found on the system or in the apt cache!".format( 381 | package, ".{}".format(arch) if arch else "" 382 | ) 383 | ) from None 384 | 385 | @classmethod 386 | def from_installed_package( 387 | cls, package: str, version: Optional[str] = "", arch: Optional[str] = "" 388 | ) -> "DebianPackage": 389 | """Check whether the package is already installed and return an instance. 390 | 391 | Args: 392 | package: a string representing the package 393 | version: an optional string if a specific version is requested 394 | arch: an optional architecture, defaulting to `dpkg --print-architecture`. 395 | If an architecture is not specified, this will be used for selection. 396 | """ 397 | system_arch = check_output( 398 | ["dpkg", "--print-architecture"], universal_newlines=True 399 | ).strip() 400 | arch = arch if arch else system_arch 401 | 402 | # Regexps are a really terrible way to do this. Thanks dpkg 403 | output = "" 404 | try: 405 | output = check_output(["dpkg", "-l", package], stderr=PIPE, universal_newlines=True) 406 | except CalledProcessError: 407 | raise PackageNotFoundError("Package is not installed: {}".format(package)) from None 408 | 409 | # Pop off the output from `dpkg -l' because there's no flag to 410 | # omit it` 411 | lines = str(output).splitlines()[5:] 412 | 413 | dpkg_matcher = re.compile( 414 | r""" 415 | ^(?P\w+?)\s+ 416 | (?P.*?)(?P:\w+?)?\s+ 417 | (?P.*?)\s+ 418 | (?P\w+?)\s+ 419 | (?P.*) 420 | """, 421 | re.VERBOSE, 422 | ) 423 | 424 | for line in lines: 425 | try: 426 | matches = dpkg_matcher.search(line).groupdict() 427 | package_status = matches["package_status"] 428 | 429 | if not package_status.endswith("i"): 430 | logger.debug( 431 | "package '%s' in dpkg output but not installed, status: '%s'", 432 | package, 433 | package_status, 434 | ) 435 | break 436 | 437 | epoch, split_version = DebianPackage._get_epoch_from_version(matches["version"]) 438 | pkg = DebianPackage( 439 | matches["package_name"], 440 | split_version, 441 | epoch, 442 | matches["arch"], 443 | PackageState.Present, 444 | ) 445 | if (pkg.arch == "all" or pkg.arch == arch) and ( 446 | version == "" or str(pkg.version) == version 447 | ): 448 | return pkg 449 | except AttributeError: 450 | logger.warning("dpkg matcher could not parse line: %s", line) 451 | 452 | # If we didn't find it, fail through 453 | raise PackageNotFoundError("Package {}.{} is not installed!".format(package, arch)) 454 | 455 | @classmethod 456 | def from_apt_cache( 457 | cls, package: str, version: Optional[str] = "", arch: Optional[str] = "" 458 | ) -> "DebianPackage": 459 | """Check whether the package is already installed and return an instance. 460 | 461 | Args: 462 | package: a string representing the package 463 | version: an optional string if a specific version is requested 464 | arch: an optional architecture, defaulting to `dpkg --print-architecture`. 465 | If an architecture is not specified, this will be used for selection. 466 | """ 467 | system_arch = check_output( 468 | ["dpkg", "--print-architecture"], universal_newlines=True 469 | ).strip() 470 | arch = arch if arch else system_arch 471 | 472 | # Regexps are a really terrible way to do this. Thanks dpkg 473 | keys = ("Package", "Architecture", "Version") 474 | 475 | try: 476 | output = check_output( 477 | ["apt-cache", "show", package], stderr=PIPE, universal_newlines=True 478 | ) 479 | except CalledProcessError as e: 480 | raise PackageError( 481 | "Could not list packages in apt-cache: {}".format(e.output) 482 | ) from None 483 | 484 | pkg_groups = output.strip().split("\n\n") 485 | keys = ("Package", "Architecture", "Version") 486 | 487 | for pkg_raw in pkg_groups: 488 | lines = str(pkg_raw).splitlines() 489 | vals = {} 490 | for line in lines: 491 | if line.startswith(keys): 492 | items = line.split(":", 1) 493 | vals[items[0]] = items[1].strip() 494 | else: 495 | continue 496 | 497 | epoch, split_version = DebianPackage._get_epoch_from_version(vals["Version"]) 498 | pkg = DebianPackage( 499 | vals["Package"], 500 | split_version, 501 | epoch, 502 | vals["Architecture"], 503 | PackageState.Available, 504 | ) 505 | 506 | if (pkg.arch == "all" or pkg.arch == arch) and ( 507 | version == "" or str(pkg.version) == version 508 | ): 509 | return pkg 510 | 511 | # If we didn't find it, fail through 512 | raise PackageNotFoundError("Package {}.{} is not in the apt cache!".format(package, arch)) 513 | 514 | 515 | class Version: 516 | """An abstraction around package versions. 517 | 518 | This seems like it should be strictly unnecessary, except that `apt_pkg` is not usable inside a 519 | venv, and wedging version comparisons into `DebianPackage` would overcomplicate it. 520 | 521 | This class implements the algorithm found here: 522 | https://www.debian.org/doc/debian-policy/ch-controlfields.html#version 523 | """ 524 | 525 | def __init__(self, version: str, epoch: str): 526 | self._version = version 527 | self._epoch = epoch or "" 528 | 529 | def __repr__(self): 530 | """A representation of the package.""" 531 | return "<{}.{}: {}>".format(self.__module__, self.__class__.__name__, self.__dict__) 532 | 533 | def __str__(self): 534 | """A human-readable representation of the package.""" 535 | return "{}{}".format("{}:".format(self._epoch) if self._epoch else "", self._version) 536 | 537 | @property 538 | def epoch(self): 539 | """Returns the epoch for a package. May be empty.""" 540 | return self._epoch 541 | 542 | @property 543 | def number(self) -> str: 544 | """Returns the version number for a package.""" 545 | return self._version 546 | 547 | def _get_parts(self, version: str) -> Tuple[str, str]: 548 | """Separate the version into component upstream and Debian pieces.""" 549 | try: 550 | version.rindex("-") 551 | except ValueError: 552 | # No hyphens means no Debian version 553 | return version, "0" 554 | 555 | upstream, debian = version.rsplit("-", 1) 556 | return upstream, debian 557 | 558 | def _listify(self, revision: str) -> List[str]: 559 | """Split a revision string into a listself. 560 | 561 | This list is comprised of alternating between strings and numbers, 562 | padded on either end to always be "str, int, str, int..." and 563 | always be of even length. This allows us to trivially implement the 564 | comparison algorithm described. 565 | """ 566 | result = [] 567 | while revision: 568 | rev_1, remains = self._get_alphas(revision) 569 | rev_2, remains = self._get_digits(remains) 570 | result.extend([rev_1, rev_2]) 571 | revision = remains 572 | return result 573 | 574 | def _get_alphas(self, revision: str) -> Tuple[str, str]: 575 | """Return a tuple of the first non-digit characters of a revision.""" 576 | # get the index of the first digit 577 | for i, char in enumerate(revision): 578 | if char.isdigit(): 579 | if i == 0: 580 | return "", revision 581 | return revision[0:i], revision[i:] 582 | # string is entirely alphas 583 | return revision, "" 584 | 585 | def _get_digits(self, revision: str) -> Tuple[int, str]: 586 | """Return a tuple of the first integer characters of a revision.""" 587 | # If the string is empty, return (0,'') 588 | if not revision: 589 | return 0, "" 590 | # get the index of the first non-digit 591 | for i, char in enumerate(revision): 592 | if not char.isdigit(): 593 | if i == 0: 594 | return 0, revision 595 | return int(revision[0:i]), revision[i:] 596 | # string is entirely digits 597 | return int(revision), "" 598 | 599 | def _dstringcmp(self, a, b): # noqa: C901 600 | """Debian package version string section lexical sort algorithm. 601 | 602 | The lexical comparison is a comparison of ASCII values modified so 603 | that all the letters sort earlier than all the non-letters and so that 604 | a tilde sorts before anything, even the end of a part. 605 | """ 606 | if a == b: 607 | return 0 608 | try: 609 | for i, char in enumerate(a): 610 | if char == b[i]: 611 | continue 612 | # "a tilde sorts before anything, even the end of a part" 613 | # (emptyness) 614 | if char == "~": 615 | return -1 616 | if b[i] == "~": 617 | return 1 618 | # "all the letters sort earlier than all the non-letters" 619 | if char.isalpha() and not b[i].isalpha(): 620 | return -1 621 | if not char.isalpha() and b[i].isalpha(): 622 | return 1 623 | # otherwise lexical sort 624 | if ord(char) > ord(b[i]): 625 | return 1 626 | if ord(char) < ord(b[i]): 627 | return -1 628 | except IndexError: 629 | # a is longer than b but otherwise equal, greater unless there are tildes 630 | if char == "~": 631 | return -1 632 | return 1 633 | # if we get here, a is shorter than b but otherwise equal, so check for tildes... 634 | if b[len(a)] == "~": 635 | return 1 636 | return -1 637 | 638 | def _compare_revision_strings(self, first: str, second: str): # noqa: C901 639 | """Compare two debian revision strings.""" 640 | if first == second: 641 | return 0 642 | 643 | # listify pads results so that we will always be comparing ints to ints 644 | # and strings to strings (at least until we fall off the end of a list) 645 | first_list = self._listify(first) 646 | second_list = self._listify(second) 647 | if first_list == second_list: 648 | return 0 649 | try: 650 | for i, item in enumerate(first_list): 651 | # explicitly raise IndexError if we've fallen off the edge of list2 652 | if i >= len(second_list): 653 | raise IndexError 654 | # if the items are equal, next 655 | if item == second_list[i]: 656 | continue 657 | # numeric comparison 658 | if isinstance(item, int): 659 | if item > second_list[i]: 660 | return 1 661 | if item < second_list[i]: 662 | return -1 663 | else: 664 | # string comparison 665 | return self._dstringcmp(item, second_list[i]) 666 | except IndexError: 667 | # rev1 is longer than rev2 but otherwise equal, hence greater 668 | # ...except for goddamn tildes 669 | if first_list[len(second_list)][0][0] == "~": 670 | return 1 671 | return 1 672 | # rev1 is shorter than rev2 but otherwise equal, hence lesser 673 | # ...except for goddamn tildes 674 | if second_list[len(first_list)][0][0] == "~": 675 | return -1 676 | return -1 677 | 678 | def _compare_version(self, other) -> int: 679 | if (self.number, self.epoch) == (other.number, other.epoch): 680 | return 0 681 | 682 | if self.epoch < other.epoch: 683 | return -1 684 | if self.epoch > other.epoch: 685 | return 1 686 | 687 | # If none of these are true, follow the algorithm 688 | upstream_version, debian_version = self._get_parts(self.number) 689 | other_upstream_version, other_debian_version = self._get_parts(other.number) 690 | 691 | upstream_cmp = self._compare_revision_strings(upstream_version, other_upstream_version) 692 | if upstream_cmp != 0: 693 | return upstream_cmp 694 | 695 | debian_cmp = self._compare_revision_strings(debian_version, other_debian_version) 696 | if debian_cmp != 0: 697 | return debian_cmp 698 | 699 | return 0 700 | 701 | def __lt__(self, other) -> bool: 702 | """Less than magic method impl.""" 703 | return self._compare_version(other) < 0 704 | 705 | def __eq__(self, other) -> bool: 706 | """Equality magic method impl.""" 707 | return self._compare_version(other) == 0 708 | 709 | def __gt__(self, other) -> bool: 710 | """Greater than magic method impl.""" 711 | return self._compare_version(other) > 0 712 | 713 | def __le__(self, other) -> bool: 714 | """Less than or equal to magic method impl.""" 715 | return self.__eq__(other) or self.__lt__(other) 716 | 717 | def __ge__(self, other) -> bool: 718 | """Greater than or equal to magic method impl.""" 719 | return self.__gt__(other) or self.__eq__(other) 720 | 721 | def __ne__(self, other) -> bool: 722 | """Not equal to magic method impl.""" 723 | return not self.__eq__(other) 724 | 725 | 726 | def add_package( 727 | package_names: Union[str, List[str]], 728 | version: Optional[str] = "", 729 | arch: Optional[str] = "", 730 | update_cache: Optional[bool] = False, 731 | ) -> Union[DebianPackage, List[DebianPackage]]: 732 | """Add a package or list of packages to the system. 733 | 734 | Args: 735 | name: the name(s) of the package(s) 736 | version: an (Optional) version as a string. Defaults to the latest known 737 | arch: an optional architecture for the package 738 | update_cache: whether or not to run `apt-get update` prior to operating 739 | 740 | Raises: 741 | TypeError if no package name is given, or explicit version is set for multiple packages 742 | PackageNotFoundError if the package is not in the cache. 743 | PackageError if packages fail to install 744 | """ 745 | cache_refreshed = False 746 | if update_cache: 747 | update() 748 | cache_refreshed = True 749 | 750 | packages = {"success": [], "retry": [], "failed": []} 751 | 752 | package_names = [package_names] if type(package_names) is str else package_names 753 | if not package_names: 754 | raise TypeError("Expected at least one package name to add, received zero!") 755 | 756 | if len(package_names) != 1 and version: 757 | raise TypeError( 758 | "Explicit version should not be set if more than one package is being added!" 759 | ) 760 | 761 | for p in package_names: 762 | pkg, success = _add(p, version, arch) 763 | if success: 764 | packages["success"].append(pkg) 765 | else: 766 | logger.warning("failed to locate and install/update '%s'", pkg) 767 | packages["retry"].append(p) 768 | 769 | if packages["retry"] and not cache_refreshed: 770 | logger.info("updating the apt-cache and retrying installation of failed packages.") 771 | update() 772 | 773 | for p in packages["retry"]: 774 | pkg, success = _add(p, version, arch) 775 | if success: 776 | packages["success"].append(pkg) 777 | else: 778 | packages["failed"].append(p) 779 | 780 | if packages["failed"]: 781 | raise PackageError("Failed to install packages: {}".format(", ".join(packages["failed"]))) 782 | 783 | return packages["success"] if len(packages["success"]) > 1 else packages["success"][0] 784 | 785 | 786 | def _add( 787 | name: str, 788 | version: Optional[str] = "", 789 | arch: Optional[str] = "", 790 | ) -> Tuple[Union[DebianPackage, str], bool]: 791 | """Adds a package. 792 | 793 | Args: 794 | name: the name(s) of the package(s) 795 | version: an (Optional) version as a string. Defaults to the latest known 796 | arch: an optional architecture for the package 797 | 798 | Returns: a tuple of `DebianPackage` if found, or a :str: if it is not, and 799 | a boolean indicating success 800 | """ 801 | try: 802 | pkg = DebianPackage.from_system(name, version, arch) 803 | pkg.ensure(state=PackageState.Present) 804 | return pkg, True 805 | except PackageNotFoundError: 806 | return name, False 807 | 808 | 809 | def remove_package( 810 | package_names: Union[str, List[str]] 811 | ) -> Union[DebianPackage, List[DebianPackage]]: 812 | """Removes a package from the system. 813 | 814 | Args: 815 | package_names: the name of a package 816 | 817 | Raises: 818 | PackageNotFoundError if the package is not found. 819 | """ 820 | packages = [] 821 | 822 | package_names = [package_names] if type(package_names) is str else package_names 823 | if not package_names: 824 | raise TypeError("Expected at least one package name to add, received zero!") 825 | 826 | for p in package_names: 827 | try: 828 | pkg = DebianPackage.from_installed_package(p) 829 | pkg.ensure(state=PackageState.Absent) 830 | packages.append(pkg) 831 | except PackageNotFoundError: 832 | logger.info("package '%s' was requested for removal, but it was not installed.", p) 833 | 834 | # the list of packages will be empty when no package is removed 835 | logger.debug("packages: '%s'", packages) 836 | return packages[0] if len(packages) == 1 else packages 837 | 838 | 839 | def update() -> None: 840 | """Updates the apt cache via `apt-get update`.""" 841 | check_call(["apt-get", "update"], stderr=PIPE, stdout=PIPE) 842 | 843 | 844 | class InvalidSourceError(Error): 845 | """Exceptions for invalid source entries.""" 846 | 847 | 848 | class GPGKeyError(Error): 849 | """Exceptions for GPG keys.""" 850 | 851 | 852 | class DebianRepository: 853 | """An abstraction to represent a repository.""" 854 | 855 | def __init__( 856 | self, 857 | enabled: bool, 858 | repotype: str, 859 | uri: str, 860 | release: str, 861 | groups: List[str], 862 | filename: Optional[str] = "", 863 | gpg_key_filename: Optional[str] = "", 864 | options: Optional[dict] = None, 865 | ): 866 | self._enabled = enabled 867 | self._repotype = repotype 868 | self._uri = uri 869 | self._release = release 870 | self._groups = groups 871 | self._filename = filename 872 | self._gpg_key_filename = gpg_key_filename 873 | self._options = options 874 | 875 | @property 876 | def enabled(self): 877 | """Return whether or not the repository is enabled.""" 878 | return self._enabled 879 | 880 | @property 881 | def repotype(self): 882 | """Return whether it is binary or source.""" 883 | return self._repotype 884 | 885 | @property 886 | def uri(self): 887 | """Return the URI.""" 888 | return self._uri 889 | 890 | @property 891 | def release(self): 892 | """Return which Debian/Ubuntu releases it is valid for.""" 893 | return self._release 894 | 895 | @property 896 | def groups(self): 897 | """Return the enabled package groups.""" 898 | return self._groups 899 | 900 | @property 901 | def filename(self): 902 | """Returns the filename for a repository.""" 903 | return self._filename 904 | 905 | @filename.setter 906 | def filename(self, fname: str) -> None: 907 | """Sets the filename used when a repo is written back to diskself. 908 | 909 | Args: 910 | fname: a filename to write the repository information to. 911 | """ 912 | if not fname.endswith(".list"): 913 | raise InvalidSourceError("apt source filenames should end in .list!") 914 | 915 | self._filename = fname 916 | 917 | @property 918 | def gpg_key(self): 919 | """Returns the path to the GPG key for this repository.""" 920 | return self._gpg_key_filename 921 | 922 | @property 923 | def options(self): 924 | """Returns any additional repo options which are set.""" 925 | return self._options 926 | 927 | def make_options_string(self) -> str: 928 | """Generate the complete options string for a a repository. 929 | 930 | Combining `gpg_key`, if set, and the rest of the options to find 931 | a complex repo string. 932 | """ 933 | options = self._options if self._options else {} 934 | if self._gpg_key_filename: 935 | options["signed-by"] = self._gpg_key_filename 936 | 937 | return ( 938 | "[{}] ".format(" ".join(["{}={}".format(k, v) for k, v in options.items()])) 939 | if options 940 | else "" 941 | ) 942 | 943 | @staticmethod 944 | def prefix_from_uri(uri: str) -> str: 945 | """Get a repo list prefix from the uri, depending on whether a path is set.""" 946 | uridetails = urlparse(uri) 947 | path = ( 948 | uridetails.path.lstrip("/").replace("/", "-") if uridetails.path else uridetails.netloc 949 | ) 950 | return "/etc/apt/sources.list.d/{}".format(path) 951 | 952 | @staticmethod 953 | def from_repo_line(repo_line: str, write_file: Optional[bool] = True) -> "DebianRepository": 954 | """Instantiate a new `DebianRepository` a `sources.list` entry line. 955 | 956 | Args: 957 | repo_line: a string representing a repository entry 958 | write_file: boolean to enable writing the new repo to disk 959 | """ 960 | repo = RepositoryMapping._parse(repo_line, "UserInput") 961 | fname = "{}-{}.list".format( 962 | DebianRepository.prefix_from_uri(repo.uri), repo.release.replace("/", "-") 963 | ) 964 | repo.filename = fname 965 | 966 | options = repo.options if repo.options else {} 967 | if repo.gpg_key: 968 | options["signed-by"] = repo.gpg_key 969 | 970 | # For Python 3.5 it's required to use sorted in the options dict in order to not have 971 | # different results in the order of the options between executions. 972 | options_str = ( 973 | "[{}] ".format(" ".join(["{}={}".format(k, v) for k, v in sorted(options.items())])) 974 | if options 975 | else "" 976 | ) 977 | 978 | if write_file: 979 | with open(fname, "wb") as f: 980 | f.write( 981 | ( 982 | "{}".format("#" if not repo.enabled else "") 983 | + "{} {}{} ".format(repo.repotype, options_str, repo.uri) 984 | + "{} {}\n".format(repo.release, " ".join(repo.groups)) 985 | ).encode("utf-8") 986 | ) 987 | 988 | return repo 989 | 990 | def disable(self) -> None: 991 | """Remove this repository from consideration. 992 | 993 | Disable it instead of removing from the repository file. 994 | """ 995 | searcher = "{} {}{} {}".format( 996 | self.repotype, self.make_options_string(), self.uri, self.release 997 | ) 998 | for line in fileinput.input(self._filename, inplace=True): 999 | if re.match(r"^{}\s".format(re.escape(searcher)), line): 1000 | print("# {}".format(line), end="") 1001 | else: 1002 | print(line, end="") 1003 | 1004 | def import_key(self, key: str) -> None: 1005 | """Import an ASCII Armor key. 1006 | 1007 | A Radix64 format keyid is also supported for backwards 1008 | compatibility. In this case Ubuntu keyserver will be 1009 | queried for a key via HTTPS by its keyid. This method 1010 | is less preferable because https proxy servers may 1011 | require traffic decryption which is equivalent to a 1012 | man-in-the-middle attack (a proxy server impersonates 1013 | keyserver TLS certificates and has to be explicitly 1014 | trusted by the system). 1015 | 1016 | Args: 1017 | key: A GPG key in ASCII armor format, 1018 | including BEGIN and END markers or a keyid. 1019 | 1020 | Raises: 1021 | GPGKeyError if the key could not be imported 1022 | """ 1023 | key = key.strip() 1024 | if "-" in key or "\n" in key: 1025 | # Send everything not obviously a keyid to GPG to import, as 1026 | # we trust its validation better than our own. eg. handling 1027 | # comments before the key. 1028 | logger.debug("PGP key found (looks like ASCII Armor format)") 1029 | if ( 1030 | "-----BEGIN PGP PUBLIC KEY BLOCK-----" in key 1031 | and "-----END PGP PUBLIC KEY BLOCK-----" in key 1032 | ): 1033 | logger.debug("Writing provided PGP key in the binary format") 1034 | key_bytes = key.encode("utf-8") 1035 | key_name = self._get_keyid_by_gpg_key(key_bytes) 1036 | key_gpg = self._dearmor_gpg_key(key_bytes) 1037 | self._gpg_key_filename = "/etc/apt/trusted.gpg.d/{}.gpg".format(key_name) 1038 | self._write_apt_gpg_keyfile(key_name=self._gpg_key_filename, key_material=key_gpg) 1039 | else: 1040 | raise GPGKeyError("ASCII armor markers missing from GPG key") 1041 | else: 1042 | logger.warning( 1043 | "PGP key found (looks like Radix64 format). " 1044 | "SECURELY importing PGP key from keyserver; " 1045 | "full key not provided." 1046 | ) 1047 | # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL 1048 | # to retrieve GPG keys. `apt-key adv` command is deprecated as is 1049 | # apt-key in general as noted in its manpage. See lp:1433761 for more 1050 | # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop 1051 | # gpg 1052 | key_asc = self._get_key_by_keyid(key) 1053 | # write the key in GPG format so that apt-key list shows it 1054 | key_gpg = self._dearmor_gpg_key(key_asc.encode("utf-8")) 1055 | self._gpg_key_filename = "/etc/apt/trusted.gpg.d/{}.gpg".format(key) 1056 | self._write_apt_gpg_keyfile(key_name=key, key_material=key_gpg) 1057 | 1058 | @staticmethod 1059 | def _get_keyid_by_gpg_key(key_material: bytes) -> str: 1060 | """Get a GPG key fingerprint by GPG key material. 1061 | 1062 | Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded 1063 | or binary GPG key material. Can be used, for example, to generate file 1064 | names for keys passed via charm options. 1065 | """ 1066 | # Use the same gpg command for both Xenial and Bionic 1067 | cmd = ["gpg", "--with-colons", "--with-fingerprint"] 1068 | ps = subprocess.run( 1069 | cmd, 1070 | stdout=PIPE, 1071 | stderr=PIPE, 1072 | input=key_material, 1073 | ) 1074 | out, err = ps.stdout.decode(), ps.stderr.decode() 1075 | if "gpg: no valid OpenPGP data found." in err: 1076 | raise GPGKeyError("Invalid GPG key material provided") 1077 | # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) 1078 | return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1) 1079 | 1080 | @staticmethod 1081 | def _get_key_by_keyid(keyid: str) -> str: 1082 | """Get a key via HTTPS from the Ubuntu keyserver. 1083 | 1084 | Different key ID formats are supported by SKS keyservers (the longer ones 1085 | are more secure, see "dead beef attack" and https://evil32.com/). Since 1086 | HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will 1087 | impersonate keyserver.ubuntu.com and generate a certificate with 1088 | keyserver.ubuntu.com in the CN field or in SubjAltName fields of a 1089 | certificate. If such proxy behavior is expected it is necessary to add the 1090 | CA certificate chain containing the intermediate CA of the SSLBump proxy to 1091 | every machine that this code runs on via ca-certs cloud-init directive (via 1092 | cloudinit-userdata model-config) or via other means (such as through a 1093 | custom charm option). Also note that DNS resolution for the hostname in a 1094 | URL is done at a proxy server - not at the client side. 1095 | 8-digit (32 bit) key ID 1096 | https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6 1097 | 16-digit (64 bit) key ID 1098 | https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6 1099 | 40-digit key ID: 1100 | https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6 1101 | 1102 | Args: 1103 | keyid: An 8, 16 or 40 hex digit keyid to find a key for 1104 | 1105 | Returns: 1106 | A string contining key material for the specified GPG key id 1107 | 1108 | 1109 | Raises: 1110 | subprocess.CalledProcessError 1111 | """ 1112 | # options=mr - machine-readable output (disables html wrappers) 1113 | keyserver_url = ( 1114 | "https://keyserver.ubuntu.com" "/pks/lookup?op=get&options=mr&exact=on&search=0x{}" 1115 | ) 1116 | curl_cmd = ["curl", keyserver_url.format(keyid)] 1117 | # use proxy server settings in order to retrieve the key 1118 | return check_output(curl_cmd).decode() 1119 | 1120 | @staticmethod 1121 | def _dearmor_gpg_key(key_asc: bytes) -> bytes: 1122 | """Converts a GPG key in the ASCII armor format to the binary format. 1123 | 1124 | Args: 1125 | key_asc: A GPG key in ASCII armor format. 1126 | 1127 | Returns: 1128 | A GPG key in binary format as a string 1129 | 1130 | Raises: 1131 | GPGKeyError 1132 | """ 1133 | ps = subprocess.run(["gpg", "--dearmor"], stdout=PIPE, stderr=PIPE, input=key_asc) 1134 | out, err = ps.stdout, ps.stderr.decode() 1135 | if "gpg: no valid OpenPGP data found." in err: 1136 | raise GPGKeyError( 1137 | "Invalid GPG key material. Check your network setup" 1138 | " (MTU, routing, DNS) and/or proxy server settings" 1139 | " as well as destination keyserver status." 1140 | ) 1141 | else: 1142 | return out 1143 | 1144 | @staticmethod 1145 | def _write_apt_gpg_keyfile(key_name: str, key_material: bytes) -> None: 1146 | """Writes GPG key material into a file at a provided path. 1147 | 1148 | Args: 1149 | key_name: A key name to use for a key file (could be a fingerprint) 1150 | key_material: A GPG key material (binary) 1151 | """ 1152 | with open(key_name, "wb") as keyf: 1153 | keyf.write(key_material) 1154 | 1155 | 1156 | class RepositoryMapping(Mapping): 1157 | """An representation of known repositories. 1158 | 1159 | Instantiation of `RepositoryMapping` will iterate through the 1160 | filesystem, parse out repository files in `/etc/apt/...`, and create 1161 | `DebianRepository` objects in this list. 1162 | 1163 | Typical usage: 1164 | 1165 | repositories = apt.RepositoryMapping() 1166 | repositories.add(DebianRepository( 1167 | enabled=True, repotype="deb", uri="https://example.com", release="focal", 1168 | groups=["universe"] 1169 | )) 1170 | """ 1171 | 1172 | def __init__(self): 1173 | self._repository_map = {} 1174 | # Repositories that we're adding -- used to implement mode param 1175 | self.default_file = "/etc/apt/sources.list" 1176 | 1177 | # read sources.list if it exists 1178 | if os.path.isfile(self.default_file): 1179 | self.load(self.default_file) 1180 | 1181 | # read sources.list.d 1182 | for file in glob.iglob("/etc/apt/sources.list.d/*.list"): 1183 | self.load(file) 1184 | 1185 | def __contains__(self, key: str) -> bool: 1186 | """Magic method for checking presence of repo in mapping.""" 1187 | return key in self._repository_map 1188 | 1189 | def __len__(self) -> int: 1190 | """Return number of repositories in map.""" 1191 | return len(self._repository_map) 1192 | 1193 | def __iter__(self) -> Iterable[DebianRepository]: 1194 | """Iterator magic method for RepositoryMapping.""" 1195 | return iter(self._repository_map.values()) 1196 | 1197 | def __getitem__(self, repository_uri: str) -> DebianRepository: 1198 | """Return a given `DebianRepository`.""" 1199 | return self._repository_map[repository_uri] 1200 | 1201 | def __setitem__(self, repository_uri: str, repository: DebianRepository) -> None: 1202 | """Add a `DebianRepository` to the cache.""" 1203 | self._repository_map[repository_uri] = repository 1204 | 1205 | def load(self, filename: str): 1206 | """Load a repository source file into the cache. 1207 | 1208 | Args: 1209 | filename: the path to the repository file 1210 | """ 1211 | parsed = [] 1212 | skipped = [] 1213 | with open(filename, "r") as f: 1214 | for n, line in enumerate(f): 1215 | try: 1216 | repo = self._parse(line, filename) 1217 | except InvalidSourceError: 1218 | skipped.append(n) 1219 | else: 1220 | repo_identifier = "{}-{}-{}".format(repo.repotype, repo.uri, repo.release) 1221 | self._repository_map[repo_identifier] = repo 1222 | parsed.append(n) 1223 | logger.debug("parsed repo: '%s'", repo_identifier) 1224 | 1225 | if skipped: 1226 | skip_list = ", ".join(str(s) for s in skipped) 1227 | logger.debug("skipped the following lines in file '%s': %s", filename, skip_list) 1228 | 1229 | if parsed: 1230 | logger.info("parsed %d apt package repositories", len(parsed)) 1231 | else: 1232 | raise InvalidSourceError("all repository lines in '{}' were invalid!".format(filename)) 1233 | 1234 | @staticmethod 1235 | def _parse(line: str, filename: str) -> DebianRepository: 1236 | """Parse a line in a sources.list file. 1237 | 1238 | Args: 1239 | line: a single line from `load` to parse 1240 | filename: the filename being read 1241 | 1242 | Raises: 1243 | InvalidSourceError if the source type is unknown 1244 | """ 1245 | enabled = True 1246 | repotype = uri = release = gpg_key = "" 1247 | options = {} 1248 | groups = [] 1249 | 1250 | line = line.strip() 1251 | if line.startswith("#"): 1252 | enabled = False 1253 | line = line[1:] 1254 | 1255 | # Check for "#" in the line and treat a part after it as a comment then strip it off. 1256 | i = line.find("#") 1257 | if i > 0: 1258 | line = line[:i] 1259 | 1260 | # Split a source into substrings to initialize a new repo. 1261 | source = line.strip() 1262 | if source: 1263 | # Match any repo options, and get a dict representation. 1264 | for v in re.findall(OPTIONS_MATCHER, source): 1265 | opts = dict(o.split("=") for o in v.strip("[]").split()) 1266 | # Extract the 'signed-by' option for the gpg_key 1267 | gpg_key = opts.pop("signed-by", "") 1268 | options = opts 1269 | 1270 | # Remove any options from the source string and split the string into chunks 1271 | source = re.sub(OPTIONS_MATCHER, "", source) 1272 | chunks = source.split() 1273 | 1274 | # Check we've got a valid list of chunks 1275 | if len(chunks) < 3 or chunks[0] not in VALID_SOURCE_TYPES: 1276 | raise InvalidSourceError("An invalid sources line was found in %s!", filename) 1277 | 1278 | repotype = chunks[0] 1279 | uri = chunks[1] 1280 | release = chunks[2] 1281 | groups = chunks[3:] 1282 | 1283 | return DebianRepository( 1284 | enabled, repotype, uri, release, groups, filename, gpg_key, options 1285 | ) 1286 | else: 1287 | raise InvalidSourceError("An invalid sources line was found in %s!", filename) 1288 | 1289 | def add(self, repo: DebianRepository, default_filename: Optional[bool] = False) -> None: 1290 | """Add a new repository to the system. 1291 | 1292 | Args: 1293 | repo: a `DebianRepository` object 1294 | default_filename: an (Optional) filename if the default is not desirable 1295 | """ 1296 | new_filename = "{}-{}.list".format( 1297 | DebianRepository.prefix_from_uri(repo.uri), repo.release.replace("/", "-") 1298 | ) 1299 | 1300 | fname = repo.filename or new_filename 1301 | 1302 | options = repo.options if repo.options else {} 1303 | if repo.gpg_key: 1304 | options["signed-by"] = repo.gpg_key 1305 | 1306 | with open(fname, "wb") as f: 1307 | f.write( 1308 | ( 1309 | "{}".format("#" if not repo.enabled else "") 1310 | + "{} {}{} ".format(repo.repotype, repo.make_options_string(), repo.uri) 1311 | + "{} {}\n".format(repo.release, " ".join(repo.groups)) 1312 | ).encode("utf-8") 1313 | ) 1314 | 1315 | self._repository_map["{}-{}-{}".format(repo.repotype, repo.uri, repo.release)] = repo 1316 | 1317 | def disable(self, repo: DebianRepository) -> None: 1318 | """Remove a repository. Disable by default. 1319 | 1320 | Args: 1321 | repo: a `DebianRepository` to disable 1322 | """ 1323 | searcher = "{} {}{} {}".format( 1324 | repo.repotype, repo.make_options_string(), repo.uri, repo.release 1325 | ) 1326 | 1327 | for line in fileinput.input(repo.filename, inplace=True): 1328 | if re.match(r"^{}\s".format(re.escape(searcher)), line): 1329 | print("# {}".format(line), end="") 1330 | else: 1331 | print(line, end="") 1332 | 1333 | self._repository_map["{}-{}-{}".format(repo.repotype, repo.uri, repo.release)] = repo 1334 | --------------------------------------------------------------------------------