├── .gitignore ├── .wokeignore ├── revive.toml ├── .github ├── labeler.yml ├── renovate.json ├── workflows │ ├── triage.yml │ ├── commits.yml │ ├── codeql.yml │ └── security.yml ├── dependabot.yml └── .jira_sync_config.yaml ├── doc ├── .sphinx │ ├── _static │ │ ├── tag.png │ │ ├── favicon.png │ │ ├── header-nav.js │ │ ├── footer.js │ │ ├── github_issue_links.css │ │ ├── footer.css │ │ ├── github_issue_links.js │ │ ├── 404.svg │ │ ├── header.css │ │ └── furo_colors.css │ ├── _integration │ │ ├── tag.png │ │ ├── rtd-search.js │ │ ├── add_config.py │ │ ├── microovn.html │ │ ├── microceph.html │ │ ├── microcloud.html │ │ ├── lxd.html │ │ └── header.css │ ├── pa11y.json │ ├── _templates │ │ ├── base.html │ │ ├── sidebar │ │ │ └── search.html │ │ ├── 404.html │ │ ├── header.html │ │ ├── page.html │ │ └── footer.html │ ├── spellingcheck.yaml │ ├── get_vale_conf.py │ └── build_requirements.py ├── images │ ├── ui_instances.png │ ├── ui_security_warning.png │ ├── ui_certificate_selection.png │ ├── microcloud_logo_dark.svg │ └── microcloud_logo_light.svg ├── .wokeignore ├── .gitignore ├── explanation │ └── index.md ├── integration │ └── README.md ├── .custom_wordlist.txt ├── .wordlist.txt ├── how-to │ ├── index.md │ ├── support.md │ ├── add_service.md │ ├── add_machine.md │ ├── remove_machine.md │ ├── shutdown_machine.md │ ├── recover.md │ ├── install.md │ ├── snaps.md │ ├── ovn_underlay.md │ └── preseed.yaml ├── .readthedocs.yaml ├── index.md ├── reference │ └── index.md └── Makefile.sp ├── api ├── types │ ├── version.go │ ├── status.go │ ├── services.go │ └── session.go ├── session_stop.go ├── response.go ├── services_tokens.go ├── services.go ├── services_auth.go └── session_join.go ├── staticcheck.conf ├── multicast ├── version.go ├── info.go └── discovery_test.go ├── service ├── test_wordlist.go ├── interface.go ├── version.go ├── lxd_join.go ├── version_test.go └── lxd_test.go ├── test ├── lint │ ├── capitalize-errors.sh │ ├── no-oneline-assign-and-test.sh │ ├── no-short-form-imports.sh │ ├── negated-is-bool.sh │ └── newline-after-block.sh ├── includes │ └── check.sh └── suites │ └── recover.sh ├── .golangci.yml ├── cmd ├── microcloud │ ├── input.go │ ├── waitready.go │ ├── shutdown.go │ ├── remove.go │ ├── test_input.go │ ├── sql.go │ ├── tokens.go │ ├── main.go │ └── join.go └── tui │ ├── table.go │ ├── asker.go │ ├── console.go │ └── handler.go ├── SECURITY.md ├── version └── version.go ├── client ├── proxy.go └── client.go ├── Makefile ├── go.mod └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | *.snap 2 | -------------------------------------------------------------------------------- /.wokeignore: -------------------------------------------------------------------------------- 1 | doc/.wokeignore -------------------------------------------------------------------------------- /revive.toml: -------------------------------------------------------------------------------- 1 | [rule.exported] 2 | Arguments = ["checkPrivateReceivers"] 3 | -------------------------------------------------------------------------------- /.github/labeler.yml: -------------------------------------------------------------------------------- 1 | Documentation: 2 | - changed-files: 3 | - any-glob-to-any-file: 4 | - doc/**/* -------------------------------------------------------------------------------- /doc/.sphinx/_static/tag.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/edlerd/microcloud/main/doc/.sphinx/_static/tag.png -------------------------------------------------------------------------------- /doc/images/ui_instances.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/edlerd/microcloud/main/doc/images/ui_instances.png -------------------------------------------------------------------------------- /api/types/version.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | // APIVersion is the current API version. 4 | const APIVersion = "1.0" 5 | -------------------------------------------------------------------------------- /doc/.sphinx/_integration/tag.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/edlerd/microcloud/main/doc/.sphinx/_integration/tag.png -------------------------------------------------------------------------------- /doc/.sphinx/_static/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/edlerd/microcloud/main/doc/.sphinx/_static/favicon.png -------------------------------------------------------------------------------- /doc/images/ui_security_warning.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/edlerd/microcloud/main/doc/images/ui_security_warning.png -------------------------------------------------------------------------------- /doc/images/ui_certificate_selection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/edlerd/microcloud/main/doc/images/ui_certificate_selection.png -------------------------------------------------------------------------------- /staticcheck.conf: -------------------------------------------------------------------------------- 1 | # Checks being ignored: 2 | # ST1005: error strings should not be capitalized 3 | checks = ["inherit", "-ST1005"] 4 | -------------------------------------------------------------------------------- /multicast/version.go: -------------------------------------------------------------------------------- 1 | package multicast 2 | 3 | // Version is the current version of the multicast discovery format. 4 | const Version = "2.0" 5 | -------------------------------------------------------------------------------- /doc/.sphinx/pa11y.json: -------------------------------------------------------------------------------- 1 | { 2 | "chromeLaunchConfig": { 3 | "args": [ 4 | "--no-sandbox" 5 | ] 6 | }, 7 | "reporter": "cli", 8 | "standard": "WCAG2AA" 9 | } -------------------------------------------------------------------------------- /service/test_wordlist.go: -------------------------------------------------------------------------------- 1 | //go:build test 2 | 3 | package service 4 | 5 | // Testing wordlist that will always print `a a a a`. 6 | var wordlist = `1 a 7 | 2 a 8 | 3 a 9 | 4 a` 10 | -------------------------------------------------------------------------------- /test/lint/capitalize-errors.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eu 2 | 3 | echo "Checking for error messages beginning with lower-case letters..." 4 | 5 | ! git grep --untracked -P -n 'fmt\.Errorf\("[a-z]' -- '*.go' 6 | -------------------------------------------------------------------------------- /doc/.wokeignore: -------------------------------------------------------------------------------- 1 | # the cheat sheets contain a link to a repository with a block word which we 2 | # cannot avoid for now, ie 3 | # https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html 4 | doc-cheat-sheet* 5 | -------------------------------------------------------------------------------- /test/lint/no-oneline-assign-and-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eu 2 | 3 | echo "Checking for oneline assign & test..." 4 | 5 | # Recursively grep go files for if statements that contain assignments. 6 | ! git grep --untracked -P -n '^\s+if.*:=.*;.*{\s*$' -- '*.go' 7 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "enabled": false, 4 | "dependencyDashboard": false, 5 | "extends": [ 6 | "config:recommended", 7 | ":disableDependencyDashboard" 8 | ] 9 | } 10 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | linters: 2 | enable: 3 | - gofmt 4 | - misspell 5 | - godot 6 | - whitespace 7 | - gci 8 | linters-settings: 9 | gci: 10 | sections: 11 | - standard 12 | - default 13 | - prefix(github.com/canonical/microcloud/microcloud) 14 | -------------------------------------------------------------------------------- /test/lint/no-short-form-imports.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eu 2 | 3 | echo "Checking for short form imports..." 4 | 5 | OUT=$(git grep --untracked -n -P '^\s*import\s+"' '*.go' | grep -v ':import "C"$' || true) 6 | if [ -n "${OUT}" ]; then 7 | echo "ERROR: found short form imports: ${OUT}" 8 | exit 1 9 | fi 10 | -------------------------------------------------------------------------------- /doc/.gitignore: -------------------------------------------------------------------------------- 1 | /*env*/ 2 | .sphinx/venv/ 3 | .sphinx/requirements.txt 4 | .sphinx/warnings.txt 5 | .sphinx/.wordlist.dic 6 | .sphinx/.doctrees/ 7 | .sphinx/node_modules/ 8 | package*.json 9 | _build 10 | .DS_Store 11 | __pycache__ 12 | .idea/ 13 | .vscode/ 14 | .sphinx/styles/* 15 | .sphinx/vale.ini 16 | integration/ 17 | -------------------------------------------------------------------------------- /doc/.sphinx/_static/header-nav.js: -------------------------------------------------------------------------------- 1 | $(document).ready(function() { 2 | $(document).on("click", function () { 3 | $(".more-links-dropdown").hide(); 4 | }); 5 | 6 | $('.nav-more-links').click(function(event) { 7 | $('.more-links-dropdown').toggle(); 8 | event.stopPropagation(); 9 | }); 10 | }) 11 | -------------------------------------------------------------------------------- /doc/.sphinx/_integration/rtd-search.js: -------------------------------------------------------------------------------- 1 | document.addEventListener("readthedocs-addons-data-ready", function(event) { 2 | document.querySelector("[role='search'] input").addEventListener("focusin", function() { 3 | const event = new CustomEvent("readthedocs-search-show"); 4 | document.dispatchEvent(event); 5 | }); 6 | }); 7 | -------------------------------------------------------------------------------- /test/lint/negated-is-bool.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eu 2 | 3 | echo "Checking usage of negated shared.Is(True|False)*() functions..." 4 | 5 | OUT=$(git grep --untracked -P '!(shared\.)?Is(True|False).*\(' '*.go' || true) 6 | if [ -n "${OUT}" ]; then 7 | echo "ERROR: negated shared.Is(True|False)*() function in script: ${OUT}" 8 | exit 1 9 | fi 10 | -------------------------------------------------------------------------------- /doc/explanation/index.md: -------------------------------------------------------------------------------- 1 | (explanation)= 2 | # Explanation 3 | 4 | The explanatory guides in this section introduce you to the concepts used in MicroCloud and help you understand how things fit together. 5 | 6 | ```{toctree} 7 | :maxdepth: 2 8 | 9 | /explanation/microcloud 10 | Initialisation process 11 | ``` 12 | -------------------------------------------------------------------------------- /doc/integration/README.md: -------------------------------------------------------------------------------- 1 | This folder is populated with `make doc-integrate` (from the root of the repository) or `make integrate` (from the doc folder). 2 | The subfolders are clones of the respective repositories and should not be checked in or modified. 3 | 4 | Some of the files are overridden by modified variants. 5 | These modified files are stored in doc/.sphinx/_integration/. 6 | -------------------------------------------------------------------------------- /doc/.sphinx/_static/footer.js: -------------------------------------------------------------------------------- 1 | $(document).ready(function() { 2 | $(document).on("click", function () { 3 | $(".all-contributors").hide(); 4 | $("#overlay").hide(); 5 | }); 6 | 7 | $('.display-contributors').click(function(event) { 8 | $('.all-contributors').toggle(); 9 | $("#overlay").toggle(); 10 | event.stopPropagation(); 11 | }); 12 | }) 13 | -------------------------------------------------------------------------------- /doc/.sphinx/_templates/base.html: -------------------------------------------------------------------------------- 1 | {% extends "furo/base.html" %} 2 | 3 | {% block theme_scripts %} 4 | 7 | {% endblock theme_scripts %} 8 | 9 | {# ru-fu: don't include the color variables from the conf.py file, but use a 10 |  separate CSS file to save space #} 11 | {% block theme_styles %} 12 | {% endblock theme_styles %} 13 | -------------------------------------------------------------------------------- /cmd/microcloud/input.go: -------------------------------------------------------------------------------- 1 | //go:build !test 2 | 3 | package main 4 | 5 | import ( 6 | "context" 7 | "os" 8 | 9 | "github.com/canonical/microcloud/microcloud/cmd/tui" 10 | ) 11 | 12 | func setupAsker(ctx context.Context) (*tui.InputHandler, error) { 13 | noColor := os.Getenv("NO_COLOR") 14 | if noColor != "" { 15 | tui.DisableColors() 16 | } 17 | 18 | return tui.NewInputHandler(os.Stdin, os.Stdout), nil 19 | } 20 | -------------------------------------------------------------------------------- /doc/.sphinx/_templates/sidebar/search.html: -------------------------------------------------------------------------------- 1 | 7 | 8 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | To report a security issue, file a [Private Security 2 | Report](https://github.com/canonical/microcloud/security/advisories/new) with 3 | a description of the issue, the steps you took to create the issue, affected 4 | versions, and, if known, mitigations for the issue. 5 | 6 | The [Ubuntu Security disclosure and embargo 7 | policy](https://ubuntu.com/security/disclosure-policy) contains more 8 | information about what you can expect when you contact us and what we expect 9 | from you. 10 | -------------------------------------------------------------------------------- /doc/.custom_wordlist.txt: -------------------------------------------------------------------------------- 1 | ACL 2 | ACLs 3 | AGPLv 4 | backend 5 | balancers 6 | Ceph 7 | Ceph's 8 | CephFS 9 | CIDR 10 | DCO 11 | DHCP 12 | disaggregate 13 | disaggregated 14 | DNS 15 | FDE 16 | GiB 17 | HA 18 | HWE 19 | intra 20 | IOV 21 | IPs 22 | IPv 23 | LTS 24 | LXD 25 | LXD's 26 | mDNS 27 | MicroCeph 28 | MicroCloud 29 | MicroCloud's 30 | MicroOVN 31 | multicast 32 | NAT 33 | NVMe 34 | OVN 35 | pre 36 | preseed 37 | rollout 38 | roadmap 39 | snapd 40 | subnet 41 | uplink 42 | VLAN 43 | VM 44 | VMs 45 | ZFS 46 | subnets 47 | GbE 48 | QSFP 49 | OSDs 50 | -------------------------------------------------------------------------------- /version/version.go: -------------------------------------------------------------------------------- 1 | // Package version provides shared version information. 2 | package version 3 | 4 | import ( 5 | "fmt" 6 | ) 7 | 8 | // RawVersion is the current daemon version of MicroCloud. 9 | // LTS versions also include the patch number. 10 | const RawVersion = "2.1.0" 11 | 12 | // LTS should be set if the current version is an LTS (long-term support) version. 13 | const LTS = true 14 | 15 | // Version appends "LTS" to the raw version string if MicroCloud is an LTS version. 16 | func Version() string { 17 | if LTS { 18 | return fmt.Sprintf("%s LTS", RawVersion) 19 | } 20 | 21 | return RawVersion 22 | } 23 | -------------------------------------------------------------------------------- /doc/.sphinx/_static/github_issue_links.css: -------------------------------------------------------------------------------- 1 | .github-issue-link-container { 2 | padding-right: 0.5rem; 3 | } 4 | .github-issue-link { 5 | font-size: var(--font-size--small); 6 | font-weight: bold; 7 | background-color: #D6410D; 8 | padding: 13px 23px; 9 | text-decoration: none; 10 | } 11 | .github-issue-link:link { 12 | color: #FFFFFF; 13 | } 14 | .github-issue-link:visited { 15 | color: #FFFFFF 16 | } 17 | .muted-link.github-issue-link:hover { 18 | color: #FFFFFF; 19 | text-decoration: underline; 20 | } 21 | .github-issue-link:active { 22 | color: #FFFFFF; 23 | text-decoration: underline; 24 | } 25 | -------------------------------------------------------------------------------- /.github/workflows/triage.yml: -------------------------------------------------------------------------------- 1 | name: Triaging 2 | on: 3 | pull_request_target: 4 | issues: 5 | types: 6 | - labeled 7 | - unlabeled 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | label: 14 | if: github.event.pull_request 15 | permissions: 16 | contents: read # for actions/labeler to determine modified files 17 | pull-requests: write # for actions/labeler to add labels to PRs 18 | name: PR labels 19 | runs-on: ubuntu-22.04 20 | steps: 21 | - uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5.0.0 22 | with: 23 | repo-token: "${{ secrets.GITHUB_TOKEN }}" 24 | sync-labels: true -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | labels: [] 6 | schedule: 7 | interval: "weekly" 8 | target-branch: "main" 9 | 10 | - package-ecosystem: "gomod" 11 | directory: "/" 12 | labels: [] 13 | schedule: 14 | interval: "weekly" 15 | target-branch: "main" 16 | 17 | - package-ecosystem: "github-actions" 18 | directory: "/" 19 | labels: [] 20 | schedule: 21 | interval: "weekly" 22 | target-branch: "v2-edge" 23 | 24 | - package-ecosystem: "gomod" 25 | directory: "/" 26 | labels: [] 27 | schedule: 28 | interval: "weekly" 29 | target-branch: "v2-edge" 30 | -------------------------------------------------------------------------------- /doc/.sphinx/spellingcheck.yaml: -------------------------------------------------------------------------------- 1 | matrix: 2 | - name: rST files 3 | aspell: 4 | lang: en 5 | d: en_GB 6 | dictionary: 7 | wordlists: 8 | - .wordlist.txt 9 | - .custom_wordlist.txt 10 | output: .sphinx/.wordlist.dic 11 | sources: 12 | - _build/**/*.html 13 | pipeline: 14 | - pyspelling.filters.html: 15 | comments: false 16 | attributes: 17 | - title 18 | - alt 19 | ignores: 20 | - code 21 | - pre 22 | - spellexception 23 | - .spellexception 24 | - link 25 | - title 26 | - div.relatedlinks 27 | - strong.command 28 | - div.visually-hidden 29 | - img 30 | - a.p-navigation__link 31 | - a.contributor 32 | -------------------------------------------------------------------------------- /doc/.sphinx/_templates/404.html: -------------------------------------------------------------------------------- 1 | {% extends "page.html" %} 2 | 3 | {% block content -%} 4 |
5 |

Page not found

6 |
7 |
8 |
9 | {{ body }} 10 |
11 |
12 | Penguin with a question mark 13 |
14 |
15 |
16 |
17 | {%- endblock content %} 18 | -------------------------------------------------------------------------------- /doc/.wordlist.txt: -------------------------------------------------------------------------------- 1 | # This wordlist is from the Sphinx starter pack and should not be 2 | # modified. Add any custom terms to .custom_wordlist.txt instead. 3 | 4 | addons 5 | API 6 | APIs 7 | balancer 8 | Charmhub 9 | CLI 10 | Diátaxis 11 | Dqlite 12 | dropdown 13 | EBS 14 | EKS 15 | enablement 16 | favicon 17 | Furo 18 | Git 19 | GitHub 20 | Grafana 21 | IAM 22 | installable 23 | JSON 24 | Juju 25 | Kubeflow 26 | Kubernetes 27 | Launchpad 28 | linter 29 | LTS 30 | Makefile 31 | Matrix 32 | Mattermost 33 | monmap 34 | MyST 35 | namespace 36 | namespaces 37 | NodePort 38 | Numbat 39 | observability 40 | OEM 41 | OLM 42 | Permalink 43 | pre 44 | Quickstart 45 | ReadMe 46 | reST 47 | reStructuredText 48 | RTD 49 | subdirectories 50 | subfolders 51 | subtree 52 | Ubuntu 53 | UI 54 | UUID 55 | VM 56 | YAML 57 | -------------------------------------------------------------------------------- /doc/how-to/index.md: -------------------------------------------------------------------------------- 1 | (howto)= 2 | # How-to guides 3 | 4 | These how-to guides cover key operations and processes in MicroCloud. 5 | 6 | ```{toctree} 7 | :maxdepth: 1 8 | 9 | Install MicroCloud 10 | Initialise MicroCloud 11 | Update and upgrade 12 | Manage the snaps 13 | Configure Ceph networking 14 | Configure OVN underlay 15 | Add a machine 16 | Remove a machine 17 | Shut down a machine 18 | Add a service 19 | Get support 20 | Contribute to MicroCloud 21 | Work with MicroCloud 22 | Recover MicroCloud 23 | ``` 24 | -------------------------------------------------------------------------------- /doc/.sphinx/get_vale_conf.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import requests 4 | import os 5 | 6 | DIR=os.getcwd() 7 | 8 | def main(): 9 | 10 | if os.path.exists(f"{DIR}/.sphinx/styles/Canonical"): 11 | print("Vale directory exists") 12 | else: 13 | os.makedirs(f"{DIR}/.sphinx/styles/Canonical") 14 | 15 | url = "https://api.github.com/repos/canonical/praecepta/contents/styles/Canonical" 16 | r = requests.get(url) 17 | for item in r.json(): 18 | download = requests.get(item["download_url"]) 19 | file = open(".sphinx/styles/Canonical/" + item["name"], "w") 20 | file.write(download.text) 21 | file.close() 22 | 23 | config = requests.get("https://raw.githubusercontent.com/canonical/praecepta/main/vale.ini") 24 | file = open(".sphinx/vale.ini", "w") 25 | file.write(config.text) 26 | file.close() 27 | 28 | if __name__ == "__main__": 29 | main() -------------------------------------------------------------------------------- /doc/.sphinx/_integration/add_config.py: -------------------------------------------------------------------------------- 1 | # Links to other doc sets (used in the header) 2 | # All paths are relative to the URL of one doc set 3 | html_context['microcloud_path'] = "../microcloud" 4 | html_context['microcloud_tag'] = "../microcloud/_static/tag.png" 5 | html_context['lxd_path'] = "../lxd" 6 | html_context['lxd_tag'] = "../lxd/_static/tag.png" 7 | html_context['microceph_path'] = "../microceph" 8 | html_context['microceph_tag'] = "../microceph/_static/tag.png" 9 | html_context['microovn_path'] = "../microovn" 10 | html_context['microovn_tag'] = "../microovn/_static/microovn.png" 11 | 12 | # Add the search JavaScript file 13 | custom_html_js_files.append('rtd-search.js') 14 | 15 | if project == "LXD": 16 | html_baseurl = "https://documentation.ubuntu.com/lxd/en/latest/" 17 | elif project == "MicroCeph": 18 | html_baseurl = "https://canonical-microceph.readthedocs-hosted.com/en/latest/" 19 | elif project == "MicroOVN": 20 | html_baseurl = "https://canonical-microovn.readthedocs-hosted.com/en/latest/" 21 | -------------------------------------------------------------------------------- /doc/.sphinx/_static/footer.css: -------------------------------------------------------------------------------- 1 | .display-contributors { 2 | color: var(--color-sidebar-link-text); 3 | cursor: pointer; 4 | } 5 | .all-contributors { 6 | display: none; 7 | z-index: 55; 8 | list-style: none; 9 | position: fixed; 10 | top: 0; 11 | bottom: 0; 12 | left: 0; 13 | right: 0; 14 | width: 200px; 15 | height: 200px; 16 | overflow-y: scroll; 17 | margin: auto; 18 | padding: 0; 19 | background: var(--color-background-primary); 20 | scrollbar-color: var(--color-foreground-border) transparent; 21 | scrollbar-width: thin; 22 | } 23 | 24 | .all-contributors li:hover { 25 | background: var(--color-sidebar-item-background--hover); 26 | width: 100%; 27 | } 28 | 29 | .all-contributors li a{ 30 | color: var(--color-sidebar-link-text); 31 | padding: 1rem; 32 | display: inline-block; 33 | } 34 | 35 | #overlay { 36 | position: fixed; 37 | display: none; 38 | width: 100%; 39 | height: 100%; 40 | top: 0; 41 | left: 0; 42 | right: 0; 43 | bottom: 0; 44 | background-color: rgba(0,0,0,0.5); 45 | z-index: 2; 46 | cursor: pointer; 47 | } 48 | -------------------------------------------------------------------------------- /multicast/info.go: -------------------------------------------------------------------------------- 1 | package multicast 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | ) 7 | 8 | // NetworkInfo represents information about a network interface. 9 | type NetworkInfo struct { 10 | Interface net.Interface 11 | Address string 12 | Subnet *net.IPNet 13 | } 14 | 15 | // GetNetworkInfo returns a slice of NetworkInfo. 16 | func GetNetworkInfo() ([]NetworkInfo, error) { 17 | networks := []NetworkInfo{} 18 | ifaces, err := net.Interfaces() 19 | if err != nil { 20 | return nil, fmt.Errorf("Failed to get network interfaces: %w", err) 21 | } 22 | 23 | for _, iface := range ifaces { 24 | addrs, err := iface.Addrs() 25 | if err != nil { 26 | continue 27 | } 28 | 29 | if len(addrs) == 0 { 30 | continue 31 | } 32 | 33 | for _, addr := range addrs { 34 | ipNet, ok := addr.(*net.IPNet) 35 | if !ok { 36 | continue 37 | } 38 | 39 | if !ipNet.IP.IsGlobalUnicast() { 40 | continue 41 | } 42 | 43 | networks = append(networks, NetworkInfo{Interface: iface, Address: ipNet.IP.String(), Subnet: ipNet}) 44 | } 45 | } 46 | 47 | return networks, nil 48 | } 49 | -------------------------------------------------------------------------------- /doc/.sphinx/_static/github_issue_links.js: -------------------------------------------------------------------------------- 1 | // if we already have an onload function, save that one 2 | var prev_handler = window.onload; 3 | 4 | window.onload = function() { 5 | // call the previous onload function 6 | if (prev_handler) { 7 | prev_handler(); 8 | } 9 | 10 | const link = document.createElement("a"); 11 | link.classList.add("muted-link"); 12 | link.classList.add("github-issue-link"); 13 | link.text = "Give feedback"; 14 | link.href = ( 15 | github_url 16 | + "/issues/new?" 17 | + "title=docs%3A+TYPE+YOUR+QUESTION+HERE" 18 | + "&body=*Please describe the question or issue you're facing with " 19 | + `"${document.title}"` 20 | + ".*" 21 | + "%0A%0A%0A%0A%0A" 22 | + "---" 23 | + "%0A" 24 | + `*Reported+from%3A+${location.href}*` 25 | ); 26 | link.target = "_blank"; 27 | 28 | const div = document.createElement("div"); 29 | div.classList.add("github-issue-link-container"); 30 | div.append(link) 31 | 32 | const container = document.querySelector(".article-container > .content-icon-container"); 33 | container.prepend(div); 34 | }; 35 | -------------------------------------------------------------------------------- /api/types/status.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | cephTypes "github.com/canonical/microceph/microceph/api/types" 5 | microTypes "github.com/canonical/microcluster/v2/rest/types" 6 | ovnTypes "github.com/canonical/microovn/microovn/api/types" 7 | ) 8 | 9 | // Status is a set of status information from a cluster member. 10 | type Status struct { 11 | // Name represents the cluster name for the member. 12 | Name string `json:"name" yaml:"name"` 13 | 14 | // Address represnts the cluster address for the member. 15 | Address string `json:"address" yaml:"address"` 16 | 17 | // Clusters is a list of cluster members for each service installed on the member. 18 | Clusters map[ServiceType][]microTypes.ClusterMember `json:"clusters" yaml:"clusters"` 19 | 20 | // OSDs is a list of all OSDs local to the member. 21 | OSDs cephTypes.Disks `json:"osds" yaml:"osds"` 22 | 23 | // CephServices is a list of all ceph services running on this member. 24 | CephServices cephTypes.Services `json:"ceph_services" yaml:"ceph_services"` 25 | 26 | // OVNServices is a list of all ovn services running on this member. 27 | OVNServices ovnTypes.Services `json:"ovn_services" yaml:"ovn_services"` 28 | } 29 | -------------------------------------------------------------------------------- /doc/.sphinx/_templates/header.html: -------------------------------------------------------------------------------- 1 | 37 | -------------------------------------------------------------------------------- /test/lint/newline-after-block.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eu 2 | 3 | echo "Checking that functional blocks are followed by newlines..." 4 | 5 | # Check all .go files except the protobuf bindings (.pb.go) 6 | files=$(git ls-files --cached --modified --others '*.go' ':!:*.pb.go') 7 | 8 | exit_code=0 9 | for file in $files 10 | do 11 | # This oneliner has a few steps: 12 | # 1. sed: 13 | # a. Check for lines that contain a single closing brace (plus whitespace). 14 | # b. Move the pattern space window forward to the next line. 15 | # c. Match lines that start with a word character. This allows for a closing brace on subsequent lines. 16 | # d. Print the line number. 17 | # 2. xargs: Print the filename next to the line number of the matches (piped). 18 | # 3. If there were no matches, the file name without the line number is printed, use grep to filter it out. 19 | # 4. Replace the space with a colon to make a clickable link. 20 | RESULT=$(sed -n -e '/^\s*}\s*$/{n;/^\s*\w/{;=}}' "$file" | xargs -L 1 echo "$file" | grep -v '\.go$' | sed 's/ /:/g') 21 | if [ -n "${RESULT}" ]; then 22 | echo "${RESULT}" 23 | exit_code=1 24 | fi 25 | done 26 | 27 | exit $exit_code 28 | -------------------------------------------------------------------------------- /cmd/microcloud/waitready.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/canonical/microcluster/v2/microcluster" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | type cmdWaitready struct { 12 | common *CmdControl 13 | 14 | flagTimeout int 15 | } 16 | 17 | func (c *cmdWaitready) Command() *cobra.Command { 18 | cmd := &cobra.Command{ 19 | Use: "waitready", 20 | Short: "Wait for the daemon to be ready to process requests", 21 | RunE: c.Run, 22 | } 23 | 24 | cmd.Flags().IntVarP(&c.flagTimeout, "timeout", "t", 0, "Number of seconds to wait before giving up"+"``") 25 | 26 | return cmd 27 | } 28 | 29 | func (c *cmdWaitready) Run(cmd *cobra.Command, args []string) error { 30 | if len(args) > 0 { 31 | return cmd.Help() 32 | } 33 | 34 | options := microcluster.Args{StateDir: c.common.FlagMicroCloudDir} 35 | m, err := microcluster.App(options) 36 | if err != nil { 37 | return err 38 | } 39 | 40 | ctx := context.Background() 41 | if c.flagTimeout > 0 { 42 | var cancel context.CancelFunc 43 | ctx, cancel = context.WithTimeout(ctx, time.Second*time.Duration(c.flagTimeout)) 44 | defer cancel() 45 | } 46 | 47 | return m.Ready(ctx) 48 | } 49 | -------------------------------------------------------------------------------- /.github/workflows/commits.yml: -------------------------------------------------------------------------------- 1 | name: Commits 2 | on: 3 | pull_request: 4 | 5 | concurrency: 6 | group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name }} 7 | cancel-in-progress: true 8 | 9 | permissions: 10 | contents: read 11 | 12 | jobs: 13 | commits: 14 | name: Branch target and CLA 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Check branch target 18 | env: 19 | TARGET: ${{ github.event.pull_request.base.ref }} 20 | TITLE: ${{ github.event.pull_request.title }} 21 | if: ${{ github.actor != 'dependabot[bot]' }} 22 | run: | 23 | set -eux 24 | TARGET_FROM_PR_TITLE="$(echo "${TITLE}" | sed -n 's/.*(\(v[0-9]-\(edge\|candidate\)\))$/\1/p')" 25 | if [ -z "${TARGET_FROM_PR_TITLE}" ]; then 26 | TARGET_FROM_PR_TITLE="main" 27 | else 28 | echo "Branch target overridden from PR title" 29 | fi 30 | [ "${TARGET}" = "${TARGET_FROM_PR_TITLE}" ] && exit 0 31 | 32 | echo "Invalid branch target: ${TARGET} != ${TARGET_FROM_PR_TITLE}" 33 | exit 1 34 | 35 | - name: Check if CLA signed 36 | uses: canonical/has-signed-canonical-cla@046337b42822b7868ad62970988929c79f9c1d40 # 1.2.3 37 | -------------------------------------------------------------------------------- /cmd/microcloud/shutdown.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/canonical/microcluster/v2/microcluster" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | type cmdShutdown struct { 12 | common *CmdControl 13 | } 14 | 15 | func (c *cmdShutdown) Command() *cobra.Command { 16 | cmd := &cobra.Command{ 17 | Use: "shutdown", 18 | Short: "Shut down the MicroCloud daemon", 19 | RunE: c.Run, 20 | } 21 | 22 | return cmd 23 | } 24 | 25 | func (c *cmdShutdown) Run(cmd *cobra.Command, args []string) error { 26 | if len(args) != 0 { 27 | return cmd.Help() 28 | } 29 | 30 | options := microcluster.Args{StateDir: c.common.FlagMicroCloudDir} 31 | m, err := microcluster.App(options) 32 | if err != nil { 33 | return err 34 | } 35 | 36 | err = m.Ready(context.Background()) 37 | if err != nil { 38 | return fmt.Errorf("Failed to wait for MicroCloud to get ready: %w", err) 39 | } 40 | 41 | client, err := m.LocalClient() 42 | if err != nil { 43 | return err 44 | } 45 | 46 | chResult := make(chan error, 1) 47 | go func() { 48 | defer close(chResult) 49 | 50 | err := client.ShutdownDaemon(context.Background()) 51 | if err != nil { 52 | chResult <- err 53 | return 54 | } 55 | }() 56 | 57 | return <-chResult 58 | } 59 | -------------------------------------------------------------------------------- /service/interface.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "crypto/x509" 6 | 7 | "github.com/canonical/microcloud/microcloud/api/types" 8 | ) 9 | 10 | // Service represents a common interface for all MicroCloud services. 11 | type Service interface { 12 | Bootstrap(ctx context.Context) error 13 | Join(ctx context.Context, config JoinConfig) error 14 | 15 | IssueToken(ctx context.Context, peer string) (string, error) 16 | DeleteToken(ctx context.Context, tokenName string, address string) error 17 | 18 | ClusterMembers(ctx context.Context) (map[string]string, error) 19 | // RemoteClusterMembers is called during the pre-init phase of microcluster. 20 | // It allows providing the certificate of the remote microcluster member for mTLS verification. 21 | RemoteClusterMembers(ctx context.Context, cert *x509.Certificate, address string) (map[string]string, error) 22 | DeleteClusterMember(ctx context.Context, name string, force bool) error 23 | 24 | Type() types.ServiceType 25 | Name() string 26 | Address() string 27 | Port() int64 28 | SetConfig(config map[string]string) 29 | SupportsFeature(ctx context.Context, feature string) (bool, error) 30 | GetVersion(ctx context.Context) (string, error) 31 | IsInitialized(ctx context.Context) (bool, error) 32 | } 33 | -------------------------------------------------------------------------------- /cmd/microcloud/remove.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/canonical/microcluster/v2/microcluster" 8 | "github.com/spf13/cobra" 9 | 10 | cloudClient "github.com/canonical/microcloud/microcloud/client" 11 | ) 12 | 13 | type cmdRemove struct { 14 | common *CmdControl 15 | 16 | flagForce bool 17 | } 18 | 19 | func (c *cmdRemove) Command() *cobra.Command { 20 | cmd := &cobra.Command{ 21 | Use: "remove ", 22 | Aliases: []string{"rm"}, 23 | Short: "Remove the specified member from all MicroCloud services", 24 | RunE: c.Run, 25 | } 26 | 27 | cmd.Flags().BoolVarP(&c.flagForce, "force", "f", false, "Forcibly remove the cluster member") 28 | 29 | return cmd 30 | } 31 | 32 | func (c *cmdRemove) Run(cmd *cobra.Command, args []string) error { 33 | if len(args) != 1 { 34 | return cmd.Help() 35 | } 36 | 37 | options := microcluster.Args{StateDir: c.common.FlagMicroCloudDir} 38 | m, err := microcluster.App(options) 39 | if err != nil { 40 | return err 41 | } 42 | 43 | err = m.Ready(context.Background()) 44 | if err != nil { 45 | return fmt.Errorf("Failed to wait for MicroCloud to get ready: %w", err) 46 | } 47 | 48 | client, err := m.LocalClient() 49 | if err != nil { 50 | return err 51 | } 52 | 53 | return cloudClient.DeleteClusterMember(context.Background(), client, args[0], c.flagForce) 54 | } 55 | -------------------------------------------------------------------------------- /doc/.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the version of Python and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | golang: "1.22" 13 | python: "3.11" 14 | commands: 15 | - git fetch --unshallow || true 16 | - cd doc && make integrate 17 | - cd doc/integration/lxd/ && go build -ldflags "-s -w" -o trimpath -o lxc.bin ./lxc 18 | # Pretend that woke is installed - we don't need it for building 19 | # (workaround until https://github.com/canonical/microovn/pull/168 is merged 20 | # and https://github.com/canonical/microceph/pull/400 is restored) 21 | - ln -s /bin/true doc/integration/microovn/docs/woke 22 | - ln -s /bin/true doc/integration/microceph/docs/woke 23 | - make doc-html-rtd PATH=$PATH:. 24 | 25 | # Build documentation in the docs/ directory with Sphinx 26 | sphinx: 27 | builder: dirhtml 28 | configuration: doc/conf.py 29 | fail_on_warning: true 30 | 31 | # If using Sphinx, optionally build your docs in additional formats such as PDF 32 | #formats: 33 | # - pdf 34 | 35 | # Optionally declare the Python requirements required to build your docs 36 | python: 37 | install: 38 | - requirements: doc/.sphinx/requirements.txt 39 | -------------------------------------------------------------------------------- /.github/.jira_sync_config.yaml: -------------------------------------------------------------------------------- 1 | settings: 2 | # Jira project key to create the issue in 3 | jira_project_key: "LXD" 4 | 5 | # Dictionary mapping GitHub issue status to Jira issue status 6 | status_mapping: 7 | opened: Untriaged 8 | closed: done 9 | 10 | # (Optional) Jira project components that should be attached to the created issue 11 | # Component names are case-sensitive 12 | components: 13 | - MicroCloud 14 | 15 | # (Optional) GitHub labels. Only issues with one of those labels will be synchronized. 16 | # If not specified, all issues will be synchronized 17 | labels: 18 | - Jira 19 | 20 | # (Optional) (Default: false) Add a new comment in GitHub with a link to Jira created issue 21 | add_gh_comment: false 22 | 23 | # (Optional) (Default: true) Synchronize issue description from GitHub to Jira 24 | sync_description: true 25 | 26 | # (Optional) (Default: true) Synchronize comments from GitHub to Jira 27 | sync_comments: false 28 | 29 | # (Optional) (Default: None) Parent Epic key to link the issue to 30 | epic_key: "LXD-1251" 31 | 32 | # (Optional) Dictionary mapping GitHub issue labels to Jira issue types. 33 | # If label on the issue is not in specified list, this issue will be created as a Bug 34 | label_mapping: 35 | improvement: Story 36 | feature: Story 37 | investigation: Spike 38 | bug: Bug 39 | -------------------------------------------------------------------------------- /doc/.sphinx/_templates/page.html: -------------------------------------------------------------------------------- 1 | {% extends "furo/page.html" %} 2 | 3 | {% block footer %} 4 | {% include "footer.html" %} 5 | {% endblock footer %} 6 | 7 | {% block body -%} 8 | {% include "header.html" %} 9 | {{ super() }} 10 | {%- endblock body %} 11 | 12 | {% if meta and ((meta.discourse and discourse_prefix) or meta.relatedlinks) %} 13 | {% set furo_hide_toc_orig = furo_hide_toc %} 14 | {% set furo_hide_toc=false %} 15 | {% endif %} 16 | 17 | {% block right_sidebar %} 18 |
19 | {% if not furo_hide_toc_orig %} 20 |
21 | 22 | {{ _("Contents") }} 23 | 24 |
25 |
26 |
27 | {{ toc }} 28 |
29 |
30 | {% endif %} 31 | {% if meta and ((meta.discourse and discourse_prefix) or meta.relatedlinks) %} 32 | 37 | 47 | {% endif %} 48 |
49 | {% endblock right_sidebar %} 50 | -------------------------------------------------------------------------------- /api/session_stop.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "net/http" 7 | 8 | "github.com/canonical/lxd/lxd/response" 9 | "github.com/canonical/lxd/shared/api" 10 | "github.com/canonical/microcluster/v2/rest" 11 | "github.com/canonical/microcluster/v2/state" 12 | 13 | "github.com/canonical/microcloud/microcloud/api/types" 14 | "github.com/canonical/microcloud/microcloud/service" 15 | ) 16 | 17 | // SessionStopCmd represents the /1.0/session/stop API on MicroCloud. 18 | var SessionStopCmd = func(sh *service.Handler) rest.Endpoint { 19 | return rest.Endpoint{ 20 | AllowedBeforeInit: true, 21 | Name: "session/stop", 22 | Path: "session/stop", 23 | 24 | Put: rest.EndpointAction{Handler: authHandlerMTLS(sh, sessionStopPut(sh))}, 25 | } 26 | } 27 | 28 | // sessionStopPut stops the current session. 29 | func sessionStopPut(sh *service.Handler) func(state state.State, r *http.Request) response.Response { 30 | return func(state state.State, r *http.Request) response.Response { 31 | req := types.SessionStopPut{} 32 | 33 | err := json.NewDecoder(r.Body).Decode(&req) 34 | if err != nil { 35 | return response.BadRequest(err) 36 | } 37 | 38 | err = sh.SessionTransaction(true, func(session *service.Session) error { 39 | err := session.Stop(errors.New(req.Reason)) 40 | if err != nil { 41 | return api.StatusErrorf(http.StatusBadRequest, "Failed to stop session: %w", err) 42 | } 43 | 44 | return nil 45 | }) 46 | if err != nil { 47 | return response.SmartError(err) 48 | } 49 | 50 | return response.EmptySyncResponse 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /api/types/services.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "github.com/canonical/lxd/shared/api" 5 | "github.com/canonical/microceph/microceph/api/types" 6 | ) 7 | 8 | // ServiceType represents supported services. 9 | type ServiceType string 10 | 11 | const ( 12 | // MicroCloud represents a MicroCloud service. 13 | MicroCloud ServiceType = "MicroCloud" 14 | 15 | // MicroCeph represents a MicroCeph service. 16 | MicroCeph ServiceType = "MicroCeph" 17 | 18 | // MicroOVN represents a MicroOVN service. 19 | MicroOVN ServiceType = "MicroOVN" 20 | 21 | // LXD represents a LXD service. 22 | LXD ServiceType = "LXD" 23 | ) 24 | 25 | // ServicesPut represents data for updating the cluster configuration of the MicroCloud services. 26 | type ServicesPut struct { 27 | Tokens []ServiceToken `json:"tokens" yaml:"tokens"` 28 | Address string `json:"address" yaml:"address"` 29 | 30 | LXDConfig []api.ClusterMemberConfigKey `json:"lxd_config" yaml:"lxd_config"` 31 | CephConfig []types.DisksPost `json:"ceph_config" yaml:"ceph_config"` 32 | OVNConfig map[string]string `json:"ovn_config" yaml:"ovn_config"` 33 | } 34 | 35 | // ServiceToken represents a join token for a service join request. 36 | type ServiceToken struct { 37 | Service ServiceType `json:"service" yaml:"service"` 38 | JoinToken string `json:"join_token" yaml:"join_token"` 39 | } 40 | 41 | // ServiceTokensPost represents a request to issue a join token for a MicroCloud service. 42 | type ServiceTokensPost struct { 43 | ClusterAddress string `json:"cluster_address" yaml:"cluster_address"` 44 | JoinerName string `json:"joiner_name" yaml:"joiner_name"` 45 | } 46 | -------------------------------------------------------------------------------- /doc/how-to/support.md: -------------------------------------------------------------------------------- 1 | (howto-support)= 2 | # How to get support 3 | 4 | We recommend using the following channels for the snaps required to run MicroCloud: 5 | 6 | * For MicroCloud: `2/(stable|candidate|edge)` 7 | * For LXD: `5.21/(stable|candidate|edge)` 8 | * For MicroCeph: `squid/(stable|candidate|edge)` 9 | * For MicroOVN: `24.03/(stable|candidate|edge)` 10 | 11 | ```{note} 12 | The LTS version of MicroCloud is available in the `2` track. 13 | It's recommended to use the `/stable` channels for production deployments. 14 | ``` 15 | 16 | ## Support and community 17 | 18 | The following channels are available for you to interact with the MicroCloud community: 19 | 20 | - You can file bug reports and feature requests as [GitHub issues](https://github.com/canonical/microcloud/issues/new). 21 | - To ask questions, go to the MicroCloud section of our [discussion forum](https://discourse.ubuntu.com/c/lxd/microcloud/). 22 | 23 | ## Commercial support 24 | 25 | Commercial support for MicroCloud is available through [Ubuntu Pro](https://ubuntu.com/support) (Ubuntu Pro (Infra-only) or full Ubuntu Pro). 26 | The support will cover all LTS versions for five years starting from the day of the release. 27 | 28 | See the full [Ubuntu Pro service description](https://ubuntu.com/legal/ubuntu-pro-description) for detailed information about what support Ubuntu Pro provides. 29 | 30 | ## Documentation 31 | 32 | See the [MicroCloud documentation](https://canonical-microcloud.readthedocs-hosted.com/) for official product documentation. 33 | 34 | You can find additional resources on the [website](https://canonical.com/microcloud) and in the [discussion forum](https://discourse.ubuntu.com/c/lxd/microcloud/). 35 | -------------------------------------------------------------------------------- /doc/.sphinx/_integration/microovn.html: -------------------------------------------------------------------------------- 1 | 42 | -------------------------------------------------------------------------------- /doc/.sphinx/_integration/microceph.html: -------------------------------------------------------------------------------- 1 | 44 | -------------------------------------------------------------------------------- /doc/.sphinx/_integration/microcloud.html: -------------------------------------------------------------------------------- 1 | 44 | -------------------------------------------------------------------------------- /doc/.sphinx/_integration/lxd.html: -------------------------------------------------------------------------------- 1 | 44 | -------------------------------------------------------------------------------- /test/includes/check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Miscellaneous test checks. 3 | 4 | check_dependencies() { 5 | # shellcheck disable=SC3043 6 | local dep missing 7 | missing="" 8 | 9 | # XXX: make sure we don't detect lxd-installer wrapper by accident 10 | [ -x /usr/sbin/lxc ] && chmod -x /usr/sbin/lxc /usr/sbin/lxd 11 | 12 | for dep in "$@"; do 13 | if ! command -v "$dep" >/dev/null 2>&1; then 14 | [ "$missing" ] && missing="$missing $dep" || missing="$dep" 15 | fi 16 | done 17 | 18 | if [ "$missing" ]; then 19 | echo "Missing dependencies: $missing" >&2 20 | exit 1 21 | fi 22 | 23 | # Instances need to be able to self-report on their state 24 | if ! lxc info | sed -ne '/^api_extensions:/,/^[^-]/ s/^- //p' | grep -qxF "instance_ready_state"; then 25 | echo "Missing LXD instance_ready_state extension" >&2 26 | exit 1 27 | fi 28 | } 29 | 30 | check_empty() { 31 | if [ "$(find "${1}" 2>/dev/null | wc -l)" -gt "1" ]; then 32 | echo "${1} is not empty, content:" 33 | find "${1}" 34 | false 35 | fi 36 | } 37 | 38 | check_snap_channels() { 39 | if [ "${LXD_SNAP_CHANNEL}" != "5.21/edge" ]; then 40 | echo "::warning::lxd channel not set to 5.21/edge, continuing anyway" 41 | fi 42 | 43 | non_edge="" 44 | if [ "${MICROCEPH_SNAP_CHANNEL}" != "latest/edge" ]; then 45 | non_edge="${non_edge} microceph" 46 | fi 47 | if [ "${MICROCLOUD_SNAP_CHANNEL}" != "latest/edge" ]; then 48 | non_edge="${non_edge} microcloud" 49 | fi 50 | if [ "${MICROOVN_SNAP_CHANNEL}" != "latest/edge" ]; then 51 | non_edge="${non_edge} microovn" 52 | fi 53 | 54 | for snap in ${non_edge}; do 55 | echo "::warning::${snap} channel not set to latest/edge, continuing anyway" 56 | done 57 | } 58 | -------------------------------------------------------------------------------- /api/response.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net/http" 7 | "strconv" 8 | 9 | "github.com/canonical/lxd/lxd/response" 10 | "github.com/canonical/lxd/shared/api" 11 | ) 12 | 13 | // Response represents a response returned from a MicroCloud service. 14 | type Response struct { 15 | response *http.Response 16 | } 17 | 18 | // NewResponse wraps the given http.Response as a Response. 19 | func NewResponse(response *http.Response) response.Response { 20 | return &Response{response: response} 21 | } 22 | 23 | // Render implements response.Response for Response, enabling use with a rest.EndpointAction Handler function. 24 | func (r *Response) Render(w http.ResponseWriter, _ *http.Request) error { 25 | decoder := json.NewDecoder(r.response.Body) 26 | 27 | var responseRaw *api.ResponseRaw 28 | err := decoder.Decode(&responseRaw) 29 | if err != nil { 30 | return err 31 | } 32 | 33 | return r.render(responseRaw, w) 34 | } 35 | 36 | // String implements response.Response for the Response. 37 | func (r *Response) String() string { 38 | return fmt.Sprintf("%s - %s", r.response.Proto, r.response.Status) 39 | } 40 | 41 | // render copies the response status code and headers, and writes the transformed response body to the http.ResponseWriter. 42 | func (r *Response) render(responseRaw *api.ResponseRaw, w http.ResponseWriter) error { 43 | responseBody, err := json.Marshal(responseRaw) 44 | if err != nil { 45 | return err 46 | } 47 | 48 | for key, value := range r.response.Header { 49 | if key == "Content-Length" { 50 | w.Header().Set("Content-Length", strconv.Itoa(len(responseBody))) 51 | continue 52 | } 53 | 54 | for _, v := range value { 55 | w.Header().Set(key, v) 56 | } 57 | } 58 | 59 | w.WriteHeader(r.response.StatusCode) 60 | 61 | _, err = w.Write(responseBody) 62 | return err 63 | } 64 | -------------------------------------------------------------------------------- /cmd/microcloud/test_input.go: -------------------------------------------------------------------------------- 1 | //go:build test 2 | 3 | package main 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | "os" 9 | 10 | "github.com/canonical/microcloud/microcloud/cmd/tui" 11 | ) 12 | 13 | func setupAsker(ctx context.Context) (*tui.InputHandler, error) { 14 | noColor := os.Getenv("NO_COLOR") 15 | if noColor != "" { 16 | tui.DisableColors() 17 | } 18 | 19 | useTestConsole := os.Getenv("TEST_CONSOLE") 20 | if useTestConsole != "1" { 21 | return tui.NewInputHandler(os.Stdin, os.Stdout), nil 22 | } 23 | 24 | fmt.Fprintf(os.Stderr, "%s\n\n", ` 25 | Detected 'TEST_CONSOLE=1', MicroCloud CLI is in testing mode. Terminal interactivity is disabled. 26 | 27 | Interactive microcloud commands will read text instructions by line: 28 | 29 | cat << EOF | microcloud init 30 | table:select # selects an element in the table 31 | table:select-all # selects all elements in the table 32 | table:select-none # de-selects all elements in the table 33 | table:up # move up in the table 34 | table:down # move down in the table 35 | table:wait # waits before the next instruction 36 | table:expect # waits until exactly peers are available, and errors out if more are found 37 | table:filter # applies filtering text to the table output 38 | table:done # confirms the table selection and exits the table 39 | 40 | # anything not prefixed with 'table:' will be treated as a raw string. This is used for text entry for individual questions 41 | EOF`) 42 | 43 | // This fd is used for debugging what the CLI sees for each question. 44 | file := os.NewFile(uintptr(3), "fd3") 45 | if file == nil { 46 | return nil, fmt.Errorf("Failed to open file descriptor 3") 47 | } 48 | 49 | asker, err := tui.PrepareTestAsker(ctx, os.Stdin, file) 50 | if err != nil { 51 | return nil, err 52 | } 53 | 54 | return asker, nil 55 | } 56 | -------------------------------------------------------------------------------- /cmd/microcloud/sql.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/canonical/microcluster/v2/microcluster" 8 | "github.com/spf13/cobra" 9 | 10 | "github.com/canonical/microcloud/microcloud/cmd/tui" 11 | ) 12 | 13 | type cmdSQL struct { 14 | common *CmdControl 15 | } 16 | 17 | func (c *cmdSQL) Command() *cobra.Command { 18 | cmd := &cobra.Command{ 19 | Use: "sql ", 20 | Short: "Run a SQL query against the daemon", 21 | RunE: c.Run, 22 | } 23 | 24 | return cmd 25 | } 26 | 27 | func (c *cmdSQL) Run(cmd *cobra.Command, args []string) error { 28 | if len(args) != 1 { 29 | err := cmd.Help() 30 | if err != nil { 31 | return fmt.Errorf("Unable to load help: %w", err) 32 | } 33 | 34 | if len(args) == 0 { 35 | return nil 36 | } 37 | } 38 | 39 | options := microcluster.Args{StateDir: c.common.FlagMicroCloudDir} 40 | m, err := microcluster.App(options) 41 | if err != nil { 42 | return err 43 | } 44 | 45 | err = m.Ready(context.Background()) 46 | if err != nil { 47 | return fmt.Errorf("Failed to wait for MicroCloud to get ready: %w", err) 48 | } 49 | 50 | query := args[0] 51 | dump, batch, err := m.SQL(context.Background(), query) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | if dump != "" { 57 | fmt.Print(dump) 58 | return nil 59 | } 60 | 61 | for i, result := range batch.Results { 62 | if len(batch.Results) > 1 { 63 | fmt.Printf("=> Query %d:\n\n", i) 64 | } 65 | 66 | if result.Type == "select" { 67 | rows := make([][]string, len(result.Rows)) 68 | for i, row := range result.Rows { 69 | rowStr := make([]string, len(row)) 70 | for j, c := range row { 71 | rowStr[j] = fmt.Sprintf("%v", c) 72 | } 73 | 74 | rows[i] = rowStr 75 | } 76 | 77 | fmt.Println(tui.NewTable(result.Columns, rows)) 78 | } else { 79 | fmt.Printf("Rows affected: %d\n", result.RowsAffected) 80 | } 81 | 82 | if len(batch.Results) > 1 { 83 | fmt.Printf("\n") 84 | } 85 | } 86 | return nil 87 | } 88 | -------------------------------------------------------------------------------- /service/version.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "fmt" 5 | "regexp" 6 | 7 | "golang.org/x/mod/semver" 8 | 9 | "github.com/canonical/microcloud/microcloud/api/types" 10 | ) 11 | 12 | const ( 13 | // lxdMinVersion is the minimum version of LXD that fully supports all MicroCloud features. 14 | lxdMinVersion = "5.21" 15 | 16 | // microCephMinVersion is the minimum version of MicroCeph that fully supports all MicroCloud features. 17 | microCephMinVersion = "19.2" 18 | 19 | // microOVNMinVersion is the minimum version of MicroOVN that fully supports all MicroCloud features. 20 | microOVNMinVersion = "24.03" 21 | ) 22 | 23 | // validateVersion checks that the daemon version for the given service is at a supported version for this version of MicroCloud. 24 | func validateVersion(serviceType types.ServiceType, daemonVersion string) error { 25 | switch serviceType { 26 | case types.LXD: 27 | lxdVersion := semver.Canonical(fmt.Sprintf("v%s", daemonVersion)) 28 | expectedVersion := semver.Canonical(fmt.Sprintf("v%s", lxdMinVersion)) 29 | if semver.Compare(semver.MajorMinor(lxdVersion), semver.MajorMinor(expectedVersion)) != 0 { 30 | return fmt.Errorf("%s version %q is not supported", serviceType, daemonVersion) 31 | } 32 | 33 | case types.MicroOVN: 34 | if daemonVersion != microOVNMinVersion { 35 | return fmt.Errorf("%s version %q is not supported", serviceType, daemonVersion) 36 | } 37 | 38 | case types.MicroCeph: 39 | regex := regexp.MustCompile(`\d+\.\d+\.\d+`) 40 | match := regex.FindString(daemonVersion) 41 | if match == "" { 42 | return fmt.Errorf("%s version format not supported (%s)", serviceType, daemonVersion) 43 | } 44 | 45 | daemonVersion = semver.Canonical(fmt.Sprintf("v%s", match)) 46 | expectedVersion := semver.Canonical(fmt.Sprintf("v%s", microCephMinVersion)) 47 | if semver.Compare(semver.MajorMinor(daemonVersion), semver.MajorMinor(expectedVersion)) != 0 { 48 | return fmt.Errorf("%s version %q is not supported", serviceType, daemonVersion) 49 | } 50 | } 51 | 52 | return nil 53 | } 54 | -------------------------------------------------------------------------------- /doc/how-to/add_service.md: -------------------------------------------------------------------------------- 1 | (howto-add-service)= 2 | # How to add a service 3 | 4 | If you set up the MicroCloud without MicroOVN or MicroCeph initially, you can add those services with the {command}`microcloud service add` command: 5 | 6 | sudo microcloud service add 7 | 8 | If MicroCloud detects a service is installed but not set up, it will ask to configure the service. 9 | 10 | To add MicroCeph: 11 | 12 | ```{note} 13 | To set up distributed storage, you need at least three additional disks on at least three different machines. 14 | The disks must not contain any partitions. 15 | ``` 16 | 17 | 1. Select `yes` to set up distributed storage. 18 | 19 | 1. Select the disks that you want to use for distributed storage. 20 | You must select at least three disks. 21 | 22 | 1. Select whether you want to wipe any of the disks. 23 | Wiping a disk will destroy all data on it. 24 | 25 | 1. You can choose to optionally encrypt the chosen disks. 26 | 27 | 1. You can choose to optionally set up a CephFS distributed file system. 28 | 29 | 1. Select either an IPv4 or IPv6 CIDR subnet for the Ceph internal traffic. You can leave it empty to use the default value, which is the MicroCloud internal network (see {ref}`howto-ceph-networking` for how to configure it). 30 | 31 | To add MicroOVN: 32 | 33 | 1. Select `yes` to set up distributed networking. 34 | 35 | 1. Select the network interfaces that you want to use (see {ref}`microcloud-networking-uplink`). 36 | You must select one network interface per machine. 37 | 38 | 1. If you want to use IPv4, specify the IPv4 gateway on the uplink network (in CIDR notation) and the first and last IPv4 address in the range that you want to use with LXD. 39 | 40 | 1. If you want to use IPv6, specify the IPv6 gateway on the uplink network (in CIDR notation). 41 | 42 | MicroCloud now starts to bootstrap the cluster for only the new services. 43 | 44 | Monitor the output to see whether all steps complete successfully. 45 | 46 | See {ref}`bootstrapping-process` for more information. 47 | -------------------------------------------------------------------------------- /api/services_tokens.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net/http" 7 | "net/url" 8 | 9 | "github.com/canonical/lxd/lxd/response" 10 | "github.com/canonical/microcluster/v2/rest" 11 | "github.com/canonical/microcluster/v2/state" 12 | "github.com/gorilla/mux" 13 | 14 | "github.com/canonical/microcloud/microcloud/api/types" 15 | "github.com/canonical/microcloud/microcloud/service" 16 | ) 17 | 18 | // ServiceTokensCmd represents the /1.0/services/serviceType/tokens API on MicroCloud. 19 | var ServiceTokensCmd = func(sh *service.Handler) rest.Endpoint { 20 | return rest.Endpoint{ 21 | AllowedBeforeInit: true, 22 | Name: "services/{serviceType}/tokens", 23 | Path: "services/{serviceType}/tokens", 24 | 25 | Post: rest.EndpointAction{Handler: authHandlerMTLS(sh, serviceTokensPost), ProxyTarget: true}, 26 | } 27 | } 28 | 29 | // serviceTokensPost issues a token for service using the MicroCloud proxy. 30 | // Normally a token request to a service is restricted to trusted systems, 31 | // so this endpoint makes use of the estblished mTLS and then proxies the request to the local unix socket of the remote system. 32 | func serviceTokensPost(s state.State, r *http.Request) response.Response { 33 | serviceType, err := url.PathUnescape(mux.Vars(r)["serviceType"]) 34 | if err != nil { 35 | return response.SmartError(err) 36 | } 37 | 38 | // Parse the request. 39 | req := types.ServiceTokensPost{} 40 | 41 | err = json.NewDecoder(r.Body).Decode(&req) 42 | if err != nil { 43 | return response.BadRequest(err) 44 | } 45 | 46 | sh, err := service.NewHandler(s.Name(), req.ClusterAddress, s.FileSystem().StateDir, types.ServiceType(serviceType)) 47 | if err != nil { 48 | return response.SmartError(err) 49 | } 50 | 51 | token, err := sh.Services[types.ServiceType(serviceType)].IssueToken(r.Context(), req.JoinerName) 52 | if err != nil { 53 | return response.SmartError(fmt.Errorf("Failed to issue %s token for peer %q: %w", serviceType, req.JoinerName, err)) 54 | } 55 | 56 | return response.SyncResponse(true, token) 57 | } 58 | -------------------------------------------------------------------------------- /doc/how-to/add_machine.md: -------------------------------------------------------------------------------- 1 | (howto-add)= 2 | # How to add a machine 3 | ## Interactive configuration 4 | 5 | If you want to add a machine to the MicroCloud cluster after the initialisation, use the {command}`microcloud add` command: 6 | 7 | sudo microcloud add 8 | 9 | On the new machine use the {command}`microcloud join` command: 10 | 11 | sudo microcloud join 12 | 13 | Answer the prompts on both sides to add the machine. 14 | You can also add the `--wipe` flag to automatically wipe any disks you add to the cluster. 15 | 16 | ## Non-interactive configuration 17 | 18 | If you want to automatically add a machine, you can provide a preseed configuration in YAML format to the {command}`microcloud preseed` command: 19 | 20 | cat | microcloud preseed 21 | 22 | In the list of systems include only the new machine and set either `initiator` or `initiator_address` which can point to any machine 23 | that is already part of the MicroCloud. 24 | 25 | Make sure to distribute and run the same preseed configuration on the new and existing system configured using either `initiator` or `initiator_address`. 26 | 27 | The preseed YAML file must use the following syntax: 28 | 29 | ```{literalinclude} preseed.yaml 30 | :language: YAML 31 | :emphasize-lines: 1-4,7-10,13-14,17-19,22,25-27,30-35,63-66,72,79-87 32 | ``` 33 | 34 | ### Minimal preseed using multicast discovery 35 | 36 | You can use the following minimal preseed file to add another machine to an existing MicroCloud. 37 | In this case `micro01` takes over the role of the initiator. 38 | Multicast discovery is used to find the existing MicroCloud on the network: 39 | 40 | The disk `/dev/sdb` will be used for the machine's local storage pool. 41 | The already existing remote storage pool will be extended with `/dev/sdc`: 42 | 43 | ```yaml 44 | lookup_subnet: 10.0.0.0/24 45 | initiator: micro01 46 | session_passphrase: foo 47 | systems: 48 | - name: micro04 49 | ovn_uplink_interface: eth1 50 | storage: 51 | local: 52 | path: /dev/sdb 53 | ceph: 54 | - path: /dev/sdc 55 | ``` 56 | 57 | Run the {command}`microcloud preseed` command on `micro01` and `micro04` to add the additional machine. 58 | -------------------------------------------------------------------------------- /doc/images/microcloud_logo_dark.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /service/lxd_join.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "context" 5 | "encoding/pem" 6 | "fmt" 7 | 8 | "github.com/canonical/lxd/lxd/util" 9 | "github.com/canonical/lxd/shared" 10 | "github.com/canonical/lxd/shared/api" 11 | "github.com/canonical/lxd/shared/logger" 12 | "github.com/canonical/lxd/shared/version" 13 | ) 14 | 15 | func (s *LXDService) configFromToken(token string) (*api.ClusterPut, error) { 16 | joinToken, err := shared.JoinTokenDecode(token) 17 | if err != nil { 18 | return nil, fmt.Errorf("Invalid cluster join token: %w", err) 19 | } 20 | 21 | config := &api.ClusterPut{ 22 | Cluster: api.Cluster{ServerName: s.name, Enabled: true}, 23 | ServerAddress: util.CanonicalNetworkAddress(s.address, s.port), 24 | } 25 | 26 | ok, err := s.HasExtension(context.Background(), s.Name(), s.Address(), nil, "explicit_trust_token") 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | if ok { 32 | config.ClusterToken = token 33 | } else { 34 | config.ClusterPassword = token 35 | } 36 | 37 | // Attempt to find a working cluster member to use for joining by retrieving the 38 | // cluster certificate from each address in the join token until we succeed. 39 | for _, clusterAddress := range joinToken.Addresses { 40 | // Cluster URL 41 | config.ClusterAddress = util.CanonicalNetworkAddress(clusterAddress, s.port) 42 | 43 | // Cluster certificate 44 | cert, err := shared.GetRemoteCertificate(fmt.Sprintf("https://%s", config.ClusterAddress), version.UserAgent) 45 | if err != nil { 46 | logger.Warnf("Error connecting to existing cluster member %q: %v\n", clusterAddress, err) 47 | continue 48 | } 49 | 50 | certDigest := shared.CertFingerprint(cert) 51 | if joinToken.Fingerprint != certDigest { 52 | return nil, fmt.Errorf("Certificate fingerprint mismatch between join token and cluster member %q", clusterAddress) 53 | } 54 | 55 | config.ClusterCertificate = string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})) 56 | 57 | break // We've found a working cluster member. 58 | } 59 | 60 | if config.ClusterCertificate == "" { 61 | return nil, fmt.Errorf("Unable to connect to any of the cluster members specified in join token") 62 | } 63 | 64 | return config, nil 65 | } 66 | -------------------------------------------------------------------------------- /api/types/session.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | // SessionRole indicates the role when participating in a trust establishment session. 8 | type SessionRole string 9 | 10 | const ( 11 | // SessionInitiating represents the session of the initiator. 12 | SessionInitiating SessionRole = "initiating" 13 | 14 | // SessionJoining represents the session of the joiner. 15 | SessionJoining SessionRole = "joining" 16 | ) 17 | 18 | // Session represents the websocket protocol used during trust establishment between the client and server. 19 | // Empty fields are omitted to require sending only the necessary information. 20 | type Session struct { 21 | Address string `json:"address,omitempty"` 22 | InitiatorAddress string `json:"initiator_address,omitempty"` 23 | InitiatorName string `json:"initiator_name,omitempty"` 24 | InitiatorFingerprint string `json:"initiator_fingerprint,omitempty"` 25 | Interface string `json:"interface,omitempty"` 26 | Passphrase string `json:"passphrase,omitempty"` 27 | Services map[ServiceType]string `json:"services,omitempty"` 28 | Intent SessionJoinPost `json:"intent,omitempty"` 29 | ConfirmedIntents []SessionJoinPost `json:"confirmed_intents,omitempty"` 30 | Accepted bool `json:"accepted,omitempty"` 31 | LookupTimeout time.Duration `json:"lookup_timeout,omitempty"` 32 | Error string `json:"error,omitempty"` 33 | } 34 | 35 | // SessionJoinPost represents a request made to join an active session. 36 | type SessionJoinPost struct { 37 | Name string `json:"name" yaml:"name"` 38 | Version string `json:"version" yaml:"version"` 39 | Address string `json:"address" yaml:"address"` 40 | Certificate string `json:"certificate" yaml:"certificate"` 41 | Services map[ServiceType]string `json:"services" yaml:"services"` 42 | } 43 | 44 | // SessionStopPut represents a request made to stop an active session. 45 | type SessionStopPut struct { 46 | Reason string `json:"reason"` 47 | } 48 | -------------------------------------------------------------------------------- /api/services.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "net/http" 8 | "time" 9 | 10 | "github.com/canonical/lxd/lxd/response" 11 | "github.com/canonical/lxd/lxd/util" 12 | "github.com/canonical/microcluster/v2/rest" 13 | "github.com/canonical/microcluster/v2/state" 14 | 15 | "github.com/canonical/microcloud/microcloud/api/types" 16 | "github.com/canonical/microcloud/microcloud/service" 17 | ) 18 | 19 | // ServicesCmd represents the /1.0/services API on MicroCloud. 20 | var ServicesCmd = func(sh *service.Handler) rest.Endpoint { 21 | return rest.Endpoint{ 22 | AllowedBeforeInit: true, 23 | Name: "services", 24 | Path: "services", 25 | 26 | Put: rest.EndpointAction{Handler: authHandlerMTLS(sh, servicesPut), ProxyTarget: true}, 27 | } 28 | } 29 | 30 | // servicesPut updates the cluster status of the MicroCloud peer. 31 | func servicesPut(state state.State, r *http.Request) response.Response { 32 | // Parse the request. 33 | req := types.ServicesPut{} 34 | 35 | err := json.NewDecoder(r.Body).Decode(&req) 36 | if err != nil { 37 | return response.BadRequest(err) 38 | } 39 | 40 | joinConfigs := map[types.ServiceType]service.JoinConfig{} 41 | services := make([]types.ServiceType, len(req.Tokens)) 42 | for i, cfg := range req.Tokens { 43 | services[i] = types.ServiceType(cfg.Service) 44 | joinConfigs[cfg.Service] = service.JoinConfig{Token: cfg.JoinToken, LXDConfig: req.LXDConfig, CephConfig: req.CephConfig, OVNConfig: req.OVNConfig} 45 | } 46 | 47 | // Default to the first iface if none specified. 48 | addr := util.NetworkInterfaceAddress() 49 | if req.Address != "" { 50 | addr = req.Address 51 | } 52 | 53 | sh, err := service.NewHandler(state.Name(), addr, state.FileSystem().StateDir, services...) 54 | if err != nil { 55 | return response.SmartError(err) 56 | } 57 | 58 | err = sh.RunConcurrent(types.MicroCloud, types.LXD, func(s service.Service) error { 59 | // set a 5 minute context for completing the join request in case the system is very slow. 60 | ctx, cancel := context.WithTimeout(r.Context(), 5*time.Minute) 61 | defer cancel() 62 | 63 | err = s.Join(ctx, joinConfigs[s.Type()]) 64 | if err != nil { 65 | return fmt.Errorf("Failed to join %q cluster: %w", s.Type(), err) 66 | } 67 | 68 | return nil 69 | }) 70 | if err != nil { 71 | return response.SmartError(err) 72 | } 73 | 74 | return response.EmptySyncResponse 75 | } 76 | -------------------------------------------------------------------------------- /doc/how-to/remove_machine.md: -------------------------------------------------------------------------------- 1 | (howto-remove)= 2 | # How to remove a machine 3 | 4 | If you want to remove a machine from the MicroCloud cluster after the initialisation, use the {command}`microcloud remove` command: 5 | 6 | sudo microcloud remove 7 | 8 | Before removing the machine, ensure that there are no LXD instances or storage volumes located on the machine, and that there are no MicroCeph OSDs located on the machine. 9 | 10 | See {ref}`how to remove instances ` in the LXD documentation. 11 | See {doc}`how to remove OSDs ` in the MicroCeph documentation. 12 | 13 | ```{note} 14 | If local storage was created, MicroCloud will have also added some default storage volumes that will need to be cleaned up: 15 | 16 | lxc config unset storage.images_volume --target 17 | lxc config unset storage.backups_volume --target 18 | lxc storage volume delete local images --target 19 | lxc storage volume delete local backups --target 20 | 21 | Any additional storage volumes belonging to this machine must also be deleted before removal without the `--force` flag. 22 | ``` 23 | 24 | If the machine is no longer reachable over the network, you can also add the `--force` flag to bypass removal restrictions and skip attempting to clean up the machine. Note that MicroCeph requires `--force` to be used if the remaining cluster size will be less than 3. 25 | 26 | ```{caution} 27 | Removing a cluster member with `--force` will not attempt to perform any clean-up of the removed machine. All services will need to be fully re-installed before they can be re-initialised. Resources allocated to the MicroCloud like disks and network interfaces may need to be re-initialised as well. 28 | ``` 29 | 30 | ## Reducing the cluster to 1 machine 31 | 32 | When shrinking the cluster down to 1 machine, you must also clean up the Ceph monmap before proceeding, even when using the `--force` flag. 33 | 34 | sudo microceph.ceph mon remove 35 | sudo microceph cluster sql "delete from services where member_id = (select id from core_cluster_members where name='') and service='mon'" 36 | sudo microcloud remove --force 37 | 38 | If the machine is no longer reachable and Ceph is no longer responsive, see the [Ceph documentation](https://docs.ceph.com/en/squid/rados/operations/add-or-rm-mons/#removing-monitors-from-an-unhealthy-cluster) for more recovery steps. 39 | -------------------------------------------------------------------------------- /doc/how-to/shutdown_machine.md: -------------------------------------------------------------------------------- 1 | (howto-shutdown)= 2 | # How to shut down a machine 3 | 4 | ## Stop or live-migrate all instances on the cluster member 5 | 6 | To shut down a machine that is a MicroCloud cluster member, first ensure that it is not hosting any running LXD instances. 7 | 8 | You can stop all instances on a cluster member using the command: 9 | 10 | ``` 11 | lxc stop --all 12 | ``` 13 | 14 | Alternatively, for instances that can be {ref}`live-migrated `, you can move them to another cluster member without stopping them. See: {ref}`lxd:move-instances` for more information. 15 | 16 | You can also temporarily move all instances on a machine to another cluster member by using cluster evacuation, then restore them after you restart. This method can live-migrate eligible instances; instances that cannot be live-migrated are automatically stopped and restarted. See: {ref}`lxd:cluster-evacuate` for more information. 17 | 18 | ## Enforce services shutdown and restart order 19 | 20 | During the shutdown process of a MicroCloud cluster member, the LXD service must stop _before_ the MicroCeph and MicroOVN services. At restart, the LXD service must start _after_ MicroCeph and MicroOVN. This order ensures that LXD does not run into issues due to unavailable storage or networking services. 21 | 22 | To enforce this shutdown and restart order, create a configuration file in each cluster member's `/etc/systemd/system/snap.lxd.daemon.service.d` directory to override the behaviour of `snap.lxd.daemon.service`. To simplify creating the directory and configuration file, you can copy and paste the following commands into each cluster member: 23 | 24 | ``` 25 | # Create the directory if it doesn't exist 26 | sudo mkdir -p /etc/systemd/system/snap.lxd.daemon.service.d 27 | 28 | # Create the configuration file 29 | cat << EOF | sudo tee /etc/systemd/system/snap.lxd.daemon.service.d/lxd-shutdown.conf 30 | # Makes sure the LXD daemon stops before Ceph/OVN and restarts after Ceph/OVN 31 | [Unit] 32 | After=snap.microceph.daemon.service 33 | After=snap.microovn.daemon.service 34 | EOF 35 | 36 | # Reload systemd daemon 37 | sudo systemctl daemon-reload 38 | ``` 39 | 40 | You only need to perform this step once for each cluster member. Afterwards, the `snap.lxd.daemon.service` respects this configuration at every shutdown and restart. 41 | 42 | ### Shut down 43 | 44 | Once you have completed the steps above, you can safely shut down and restart the machine as normal. -------------------------------------------------------------------------------- /client/proxy.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "crypto/tls" 5 | "crypto/x509" 6 | "fmt" 7 | "net/http" 8 | "net/url" 9 | "strings" 10 | 11 | "github.com/canonical/lxd/shared" 12 | "github.com/canonical/microcluster/v2/client" 13 | 14 | "github.com/canonical/microcloud/microcloud/api/types" 15 | ) 16 | 17 | // AuthConfig is used to configure the various authentication settings during trust establishment. 18 | // In case of unverified mTLS, InsecureSkipVerify has to be set to true. 19 | // In case of partially verified mTLS, the remote servers certificate can be set using TLSServerCertificate. 20 | // Request authentication can be made by setting a valid HMAC. 21 | type AuthConfig struct { 22 | HMAC string 23 | TLSServerCertificate *x509.Certificate 24 | InsecureSkipVerify bool 25 | } 26 | 27 | // UseAuthProxy takes the given microcluster client and HMAC and proxies requests to other services through the MicroCloud API. 28 | // The HMAC will be set in the Authorization header in lieu of mTLS authentication, if present. 29 | // If no HMAC is present mTLS is assumed. 30 | func UseAuthProxy(c *client.Client, serviceType types.ServiceType, conf AuthConfig) (*client.Client, error) { 31 | tp, ok := c.Transport.(*http.Transport) 32 | if !ok { 33 | return nil, fmt.Errorf("Invalid client transport type") 34 | } 35 | 36 | // If the client is a unix client, it may not have any TLS config. 37 | if tp.TLSClientConfig == nil { 38 | tp.TLSClientConfig = &tls.Config{} 39 | } 40 | 41 | tp.TLSClientConfig.InsecureSkipVerify = conf.InsecureSkipVerify 42 | tp.Proxy = AuthProxy(conf.HMAC, serviceType) 43 | 44 | c.Transport = tp 45 | 46 | return c, nil 47 | } 48 | 49 | // AuthProxy takes a request to a service and sends it to MicroCloud instead, 50 | // to be then forwarded to the unix socket of the corresponding service. 51 | // The HMAC is set in the request header to be used partially in lieu of mTLS authentication. 52 | func AuthProxy(hmac string, serviceType types.ServiceType) func(r *http.Request) (*url.URL, error) { 53 | return func(r *http.Request) (*url.URL, error) { 54 | if hmac != "" { 55 | r.Header.Set("Authorization", hmac) 56 | } 57 | 58 | // MicroCloud itself doesn't need to use the proxy. 59 | if serviceType != types.MicroCloud { 60 | path := fmt.Sprintf("/1.0/services/%s", strings.ToLower(string(serviceType))) 61 | if !strings.HasPrefix(r.URL.Path, path) { 62 | r.URL.Path = path + r.URL.Path 63 | } 64 | } 65 | 66 | return shared.ProxyFromEnvironment(r) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /doc/images/microcloud_logo_light.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /test/suites/recover.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | test_recover() { 4 | reset_systems 4 0 0 5 | 6 | systems=("micro01" "micro02" "micro03" "micro04") 7 | 8 | unset_interactive_vars 9 | export MULTI_NODE="yes" 10 | export LOOKUP_IFACE="enp5s0" 11 | export EXPECT_PEERS=3 12 | export OVN_WARNING="yes" 13 | 14 | join_session init micro01 micro02 micro03 micro04 15 | lxc exec micro01 -- tail -1 out | grep "MicroCloud is ready" -q 16 | lxc exec micro02 -- tail -2 out | head -1 | grep "Successfully joined the MicroCloud cluster and closing the session" -q 17 | lxc exec micro03 -- tail -2 out | head -1 | grep "Successfully joined the MicroCloud cluster and closing the session" -q 18 | lxc exec micro04 -- tail -2 out | head -1 | grep "Successfully joined the MicroCloud cluster and closing the session" -q 19 | 20 | for m in "${systems[@]}" ; do 21 | validate_system_lxd "${m}" 4 22 | validate_system_microceph "${m}" 23 | validate_system_microovn "${m}" 24 | done 25 | 26 | # MicroCluster takes a while to update the core_cluster_members table 27 | while lxc exec micro01 --env "TEST_CONSOLE=0" -- microcloud cluster list -f csv | grep -q PENDING; do 28 | sleep 2 29 | done 30 | 31 | for m in "${systems[@]}"; do 32 | lxc exec "${m}" -- sudo snap stop microcloud 33 | done 34 | 35 | lxc exec micro01 --env "TEST_CONSOLE=0" -- microcloud cluster list --local -f yaml 36 | 37 | lxc exec micro01 -- sh -c " 38 | TEST_CONSOLE=0 microcloud cluster list --local -f yaml | 39 | yq ' 40 | sort_by(.name) | 41 | .[0].role = \"voter\" | 42 | .[1].role = \"voter\" | 43 | .[2].role = \"spare\" | 44 | .[3].role = \"spare\"' | 45 | TEST_CONSOLE=0 microcloud cluster recover" 46 | 47 | lxc file pull micro01/var/snap/microcloud/common/state/recovery_db.tar.gz ./ 48 | lxc file push recovery_db.tar.gz micro02/var/snap/microcloud/common/state/recovery_db.tar.gz 49 | 50 | for m in micro01 micro02; do 51 | lxc exec "${m}" -- sudo snap start microcloud 52 | done 53 | 54 | # microcluster takes a long time to update the member roles in the core_cluster_members table 55 | sleep 30 56 | 57 | for m in micro01 micro02; do 58 | cluster_list=$(lxc exec "${m}" --env "TEST_CONSOLE=0" -- microcloud cluster list -f csv) 59 | 60 | # assert_member_role(member_name, role) 61 | assert_member_role() { 62 | [[ $(echo "${cluster_list}" | grep "${1}" | awk -F, '{print $3}') == "${2}" ]] 63 | } 64 | 65 | assert_member_role micro01 voter 66 | assert_member_role micro02 voter 67 | 68 | for spare_member in micro03 micro04; do 69 | assert_member_role "${spare_member}" spare 70 | done 71 | done 72 | } 73 | -------------------------------------------------------------------------------- /cmd/tui/table.go: -------------------------------------------------------------------------------- 1 | package tui 2 | 3 | import ( 4 | "bytes" 5 | "encoding/csv" 6 | "encoding/json" 7 | "fmt" 8 | 9 | "github.com/charmbracelet/lipgloss" 10 | "github.com/charmbracelet/lipgloss/table" 11 | "gopkg.in/yaml.v2" 12 | ) 13 | 14 | const ( 15 | // TableFormatCSV represents data as a CSV string. 16 | TableFormatCSV = "csv" 17 | 18 | // TableFormatJSON represents data as a JSON string. 19 | TableFormatJSON = "json" 20 | 21 | // TableFormatTable represents data as a formatted table. 22 | TableFormatTable = "table" 23 | 24 | // TableFormatYAML represents data as a YAML string. 25 | TableFormatYAML = "yaml" 26 | 27 | // TableFormatCompact represents data as a table without any border styling. 28 | TableFormatCompact = "compact" 29 | ) 30 | 31 | func baseTableTemplate(header []string, compact bool) *table.Table { 32 | t := table.New() 33 | t = t.Headers(header...) 34 | if !compact { 35 | t = t.Border(lipgloss.NormalBorder()) 36 | t = t.BorderStyle(lipgloss.NewStyle().Foreground(Border)) 37 | } else { 38 | t = t.Border(lipgloss.Border{}) 39 | } 40 | 41 | t = t.StyleFunc(func(row, col int) lipgloss.Style { 42 | tmpl := lipgloss.NewStyle() 43 | tmpl = tmpl.Padding(0, 1) 44 | tmpl = tmpl.Align(lipgloss.Center) 45 | 46 | if row == 0 { 47 | header[col] = lipgloss.NewStyle().Foreground(Bright).Bold(true).SetString(header[col]).String() 48 | } 49 | 50 | return tmpl 51 | }) 52 | 53 | return t 54 | } 55 | 56 | // FormatData returns a string representation of the given data according to the format string. 57 | func FormatData(format string, header []string, rows [][]string, raw any) (string, error) { 58 | switch format { 59 | case TableFormatCSV: 60 | var buf bytes.Buffer 61 | w := csv.NewWriter(&buf) 62 | err := w.WriteAll(rows) 63 | if err != nil { 64 | return "", err 65 | } 66 | 67 | return buf.String(), nil 68 | case TableFormatJSON: 69 | bytes, err := json.Marshal(raw) 70 | if err != nil { 71 | return "", err 72 | } 73 | 74 | return string(bytes), nil 75 | case TableFormatYAML: 76 | bytes, err := yaml.Marshal(raw) 77 | if err != nil { 78 | return "", err 79 | } 80 | 81 | return string(bytes), nil 82 | case TableFormatTable: 83 | return NewTable(header, rows), nil 84 | case TableFormatCompact: 85 | t := baseTableTemplate(header, true) 86 | t = t.Rows(rows...) 87 | 88 | return t.String(), nil 89 | } 90 | 91 | return "", fmt.Errorf("Invalid format (%s)", format) 92 | } 93 | 94 | // NewTable returns the string representation of a table with the given data. 95 | func NewTable(header []string, rows [][]string) string { 96 | t := baseTableTemplate(header, false) 97 | t = t.Rows(rows...) 98 | 99 | return t.String() 100 | } 101 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | GOMIN=1.22.7 2 | GOCOVERDIR ?= $(shell go env GOCOVERDIR) 3 | GOPATH ?= $(shell go env GOPATH) 4 | DQLITE_PATH=$(GOPATH)/deps/dqlite 5 | DQLITE_BRANCH=master 6 | 7 | .PHONY: default 8 | default: build 9 | 10 | # Build dependencies 11 | .PHONY: deps 12 | deps: 13 | # dqlite (+raft) 14 | @if [ ! -e "$(DQLITE_PATH)" ]; then \ 15 | echo "Retrieving dqlite from ${DQLITE_BRANCH} branch"; \ 16 | git clone --depth=1 --branch "${DQLITE_BRANCH}" "https://github.com/canonical/dqlite" "$(DQLITE_PATH)"; \ 17 | elif [ -e "$(DQLITE_PATH)/.git" ]; then \ 18 | echo "Updating existing dqlite branch"; \ 19 | cd "$(DQLITE_PATH)"; git pull; \ 20 | fi 21 | 22 | cd "$(DQLITE_PATH)" && \ 23 | autoreconf -i && \ 24 | ./configure --enable-build-raft && \ 25 | make 26 | 27 | # Build targets. 28 | .PHONY: build 29 | build: 30 | ifeq "$(GOCOVERDIR)" "" 31 | go install -tags=agent -v ./cmd/microcloud 32 | go install -tags=agent -v ./cmd/microcloudd 33 | else 34 | go install -tags=agent -v -cover ./cmd/microcloud 35 | go install -tags=agent -v -cover ./cmd/microcloudd 36 | endif 37 | 38 | # Build MicroCloud for testing. Replaces EFF word-list, 39 | # and enables feeding input to questions from a file with TEST_CONSOLE=1. 40 | .PHONY: build-test 41 | build-test: 42 | ifeq "$(GOCOVERDIR)" "" 43 | go install -tags=test -v ./cmd/microcloud 44 | go install -tags=test -v ./cmd/microcloudd 45 | else 46 | go install -tags=test -v -cover ./cmd/microcloud 47 | go install -tags=test -v -cover ./cmd/microcloudd 48 | endif 49 | 50 | # Testing targets. 51 | .PHONY: check 52 | check: check-static check-unit check-system 53 | 54 | .PHONY: check-unit 55 | check-unit: 56 | ifeq "$(GOCOVERDIR)" "" 57 | go test ./... 58 | else 59 | go test ./... -cover -test.gocoverdir="${GOCOVERDIR}" 60 | endif 61 | 62 | .PHONY: check-system 63 | check-system: 64 | cd test && ./main.sh 65 | 66 | .PHONY: check-static 67 | check-static: 68 | ifeq ($(shell command -v golangci-lint 2> /dev/null),) 69 | curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $$HOME/go/bin 70 | endif 71 | ifeq ($(shell command -v revive 2> /dev/null),) 72 | go install github.com/mgechev/revive@latest 73 | endif 74 | golangci-lint run --timeout 5m 75 | revive -config revive.toml -exclude ./cmd/... -set_exit_status ./... 76 | run-parts --exit-on-error --regex '.sh' test/lint 77 | 78 | # Update targets. 79 | .PHONY: update-gomod 80 | update-gomod: 81 | go get -t -v -u ./... 82 | 83 | # Static pins 84 | go get github.com/canonical/lxd@stable-5.21 # Stay on v2 dqlite and LXD LTS client 85 | 86 | go mod tidy -go=$(GOMIN) 87 | 88 | # Update lxd-generate generated database helpers. 89 | .PHONY: update-schema 90 | update-schema: 91 | go generate ./... 92 | gofmt -s -w ./database/ 93 | goimports -w ./database/ 94 | @echo "Code generation completed" 95 | 96 | doc-%: 97 | cd doc && $(MAKE) -f Makefile $* ALLFILES='*.md **/*.md' 98 | 99 | doc: doc-clean-doc doc-html 100 | -------------------------------------------------------------------------------- /doc/how-to/recover.md: -------------------------------------------------------------------------------- 1 | (howto-recover)= 2 | # How to recover a MicroCloud cluster 3 | 4 | ```{note} 5 | Each MicroCloud service uses the [Dqlite](https://dqlite.io/) distributed 6 | database for highly-available storage. While the cluster recovery process is 7 | similar for each service, this document only covers cluster recovery for the 8 | `microcloudd` daemon. For cluster recovery procedures for LXD, MicroCeph and 9 | MicroOVN, see: 10 | 11 | - [LXD Cluster Recovery](https://documentation.ubuntu.com/lxd/en/latest/howto/cluster_recover/) 12 | - [MicroOVN Launchpad Bug](https://bugs.launchpad.net/microovn/+bug/2072377) 13 | - [MicroCeph Issue](https://github.com/canonical/microceph/issues/380) 14 | ``` 15 | 16 | MicroCloud requires a majority of the database voters (a quorum) to be 17 | accessible in order to perform database operations. If a cluster has less than 18 | a quorum of voters up and accessible, then database operations will no longer 19 | be possible on the entire cluster. 20 | 21 | If the loss of quorum is temporary (e.g. some members temporarily lose power), 22 | database operations will be restored when the offline members come back online. 23 | 24 | This document describes how to recover database access if the offline members 25 | have been lost without the possibility of recovery (e.g. disk failure). 26 | 27 | ## Recovery procedure 28 | 29 | 1. Shut down all cluster members before performing cluster recovery: 30 | ``` 31 | sudo snap stop microcloud 32 | ``` 33 | 34 | 1. Once all cluster members are shut down, determine which Dqlite database is 35 | most up to date. Look for files in `/var/snap/microcloud/common/state/database` 36 | whose filenames are two numbers separated by a dash (i.e. 37 | `0000000000056436-0000000000056501`). The largest second number in that directory 38 | is the end index of the most recently closed segment (56501 in the example). 39 | The cluster member with the highest end index is the most up to date. 40 | 41 | 1. On the most up-to-date cluster member, use the following command to 42 | reconfigure the Dqlite roles for each member: 43 | ``` 44 | sudo microcloud cluster recover 45 | ``` 46 | 47 | 1. As indicated by the output of the above command, copy 48 | `/var/snap/microcloud/common/state/recovery_db.tar.gz` to the same path on 49 | each cluster member. 50 | 51 | 1. Restart MicroCloud. The recovered database tarball will be loaded on daemon 52 | startup. Once a quorum of voters have been started, the MicroCloud database 53 | will become available. 54 | ``` 55 | sudo snap start microcloud 56 | ``` 57 | 58 | ## Backups 59 | 60 | MicroCloud creates a backup of the database directory before performing the 61 | recovery operation to ensure that no data is lost. The backup tarball is created 62 | in `/var/snap/microcloud/common/state/`. If the cluster recovery operation 63 | fails, use the following commands to restore the existing database: 64 | 65 | ``` 66 | cd /var/snap/microcloud/common/state 67 | sudo mv database broken_db 68 | sudo tar -xf db_backup.TIMESTAMP.tar.gz 69 | ``` 70 | -------------------------------------------------------------------------------- /api/services_auth.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "net/http" 7 | 8 | "github.com/canonical/lxd/lxd/response" 9 | "github.com/canonical/lxd/lxd/util" 10 | "github.com/canonical/lxd/shared/logger" 11 | "github.com/canonical/lxd/shared/trust" 12 | "github.com/canonical/microcluster/v2/state" 13 | 14 | "github.com/canonical/microcloud/microcloud/service" 15 | ) 16 | 17 | // endpointHandler is just a convenience for writing clean return types. 18 | type endpointHandler func(state.State, *http.Request) response.Response 19 | 20 | // authHandlerMTLS ensures a request has been authenticated using mTLS. 21 | func authHandlerMTLS(sh *service.Handler, f endpointHandler) endpointHandler { 22 | return func(s state.State, r *http.Request) response.Response { 23 | if r.RemoteAddr == "@" { 24 | logger.Debug("Allowing unauthenticated request through unix socket") 25 | 26 | return f(s, r) 27 | } 28 | 29 | // Use certificate based authentication between cluster members. 30 | if r.TLS != nil { 31 | trustedCerts := s.Remotes().CertificatesNative() 32 | for _, cert := range r.TLS.PeerCertificates { 33 | // First evaluate the permanent turst store. 34 | trusted, _ := util.CheckMutualTLS(*cert, trustedCerts) 35 | if trusted { 36 | return f(s, r) 37 | } 38 | 39 | // Second evaluate the temporary trust store. 40 | // This is the fallback during the forming of the cluster. 41 | trusted, _ = util.CheckMutualTLS(*cert, sh.TemporaryTrustStore()) 42 | if trusted { 43 | return f(s, r) 44 | } 45 | } 46 | } 47 | 48 | return response.Forbidden(fmt.Errorf("Failed to authenticate using mTLS")) 49 | } 50 | } 51 | 52 | // authHandlerHMAC ensures a request has been authenticated using the HMAC in the Authorization header. 53 | func authHandlerHMAC(sh *service.Handler, f endpointHandler) endpointHandler { 54 | return func(s state.State, r *http.Request) response.Response { 55 | sessionFunc := func(session *service.Session) error { 56 | h, err := trust.NewHMACArgon2([]byte(session.Passphrase()), nil, trust.NewDefaultHMACConf(HMACMicroCloud10)) 57 | if err != nil { 58 | return err 59 | } 60 | 61 | err = trust.HMACEqual(h, r) 62 | if err != nil { 63 | attemptErr := session.RegisterFailedAttempt() 64 | if attemptErr != nil { 65 | errorCause := errors.New("Stopping session after too many failed attempts") 66 | 67 | // Immediately stop the session to not allow further join attempts. 68 | stopErr := session.Stop(errorCause) 69 | if stopErr != nil { 70 | return fmt.Errorf("Cannot stop session after too many failed attempts: %w", stopErr) 71 | } 72 | 73 | // Log the error and return it to the caller 74 | logger.Warn(errorCause.Error()) 75 | return errorCause 76 | } 77 | 78 | return err 79 | } 80 | 81 | return nil 82 | } 83 | 84 | // Run a r/w transaction against the session as we might stop it due to too many failed attempts. 85 | err := sh.SessionTransaction(false, sessionFunc) 86 | if err != nil { 87 | return response.SmartError(err) 88 | } 89 | 90 | return f(s, r) 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /cmd/microcloud/tokens.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sort" 7 | 8 | cli "github.com/canonical/lxd/shared/cmd" 9 | "github.com/canonical/microcluster/v2/microcluster" 10 | "github.com/spf13/cobra" 11 | 12 | "github.com/canonical/microcloud/microcloud/cmd/tui" 13 | ) 14 | 15 | type cmdSecrets struct { 16 | common *CmdControl 17 | } 18 | 19 | func (c *cmdSecrets) Command() *cobra.Command { 20 | cmd := &cobra.Command{ 21 | Use: "tokens", 22 | Short: "Manage join tokens", 23 | RunE: c.Run, 24 | } 25 | 26 | var cmdList = cmdTokensList{common: c.common} 27 | cmd.AddCommand(cmdList.Command()) 28 | 29 | var cmdRevoke = cmdTokensRevoke{common: c.common} 30 | cmd.AddCommand(cmdRevoke.Command()) 31 | 32 | return cmd 33 | } 34 | 35 | func (c *cmdSecrets) Run(cmd *cobra.Command, args []string) error { 36 | return cmd.Help() 37 | } 38 | 39 | type cmdTokensList struct { 40 | common *CmdControl 41 | flagFormat string 42 | } 43 | 44 | func (c *cmdTokensList) Command() *cobra.Command { 45 | cmd := &cobra.Command{ 46 | Use: "list", 47 | Short: "List join tokens available for use", 48 | RunE: c.Run, 49 | } 50 | 51 | cmd.Flags().StringVarP(&c.flagFormat, "format", "f", tui.TableFormatTable, "Format (csv|json|table|yaml|compact)") 52 | 53 | return cmd 54 | } 55 | 56 | func (c *cmdTokensList) Run(cmd *cobra.Command, args []string) error { 57 | if len(args) != 0 { 58 | return cmd.Help() 59 | } 60 | 61 | options := microcluster.Args{StateDir: c.common.FlagMicroCloudDir} 62 | m, err := microcluster.App(options) 63 | if err != nil { 64 | return err 65 | } 66 | 67 | err = m.Ready(context.Background()) 68 | if err != nil { 69 | return fmt.Errorf("Failed to wait for MicroCloud to get ready: %w", err) 70 | } 71 | 72 | records, err := m.ListJoinTokens(context.Background()) 73 | if err != nil { 74 | return err 75 | } 76 | 77 | data := make([][]string, len(records)) 78 | for i, record := range records { 79 | data[i] = []string{record.Name, record.Token} 80 | } 81 | 82 | header := []string{"NAME", "TOKENS"} 83 | sort.Sort(cli.SortColumnsNaturally(data)) 84 | 85 | table, err := tui.FormatData(c.flagFormat, header, data, records) 86 | if err != nil { 87 | return err 88 | } 89 | 90 | fmt.Println(table) 91 | 92 | return nil 93 | } 94 | 95 | type cmdTokensRevoke struct { 96 | common *CmdControl 97 | } 98 | 99 | func (c *cmdTokensRevoke) Command() *cobra.Command { 100 | cmd := &cobra.Command{ 101 | Use: "revoke ", 102 | Short: "Revoke the specified join token", 103 | RunE: c.Run, 104 | } 105 | 106 | return cmd 107 | } 108 | 109 | func (c *cmdTokensRevoke) Run(cmd *cobra.Command, args []string) error { 110 | if len(args) != 1 { 111 | return cmd.Help() 112 | } 113 | 114 | options := microcluster.Args{StateDir: c.common.FlagMicroCloudDir} 115 | m, err := microcluster.App(options) 116 | if err != nil { 117 | return err 118 | } 119 | 120 | err = m.Ready(context.Background()) 121 | if err != nil { 122 | return fmt.Errorf("Failed to wait for MicroCloud to get ready: %w", err) 123 | } 124 | 125 | err = m.RevokeJoinToken(context.Background(), args[0]) 126 | if err != nil { 127 | return err 128 | } 129 | 130 | return nil 131 | } 132 | -------------------------------------------------------------------------------- /cmd/microcloud/main.go: -------------------------------------------------------------------------------- 1 | // Package microcloud provides the main client tool. 2 | package main 3 | 4 | import ( 5 | "context" 6 | "fmt" 7 | "os" 8 | 9 | "github.com/spf13/cobra" 10 | 11 | "github.com/canonical/microcloud/microcloud/cmd/tui" 12 | "github.com/canonical/microcloud/microcloud/version" 13 | ) 14 | 15 | // CmdControl has functions that are common to the microcloud commands. 16 | // command line tools. 17 | type CmdControl struct { 18 | cmd *cobra.Command //nolint:structcheck,unused // FIXME: Remove the nolint flag when this is in use. 19 | 20 | FlagHelp bool 21 | FlagVersion bool 22 | FlagMicroCloudDir string 23 | FlagNoColor bool 24 | 25 | asker *tui.InputHandler 26 | } 27 | 28 | func main() { 29 | // Only root should run this 30 | if os.Geteuid() != 0 { 31 | fmt.Fprintln(os.Stderr, "This must be run as root") 32 | os.Exit(1) 33 | } 34 | 35 | ctx, cancel := context.WithCancel(context.Background()) 36 | defer cancel() 37 | 38 | asker, err := setupAsker(ctx) 39 | if err != nil { 40 | fmt.Println(err.Error()) 41 | os.Exit(1) 42 | } 43 | 44 | commonCmd := CmdControl{asker: asker} 45 | app := &cobra.Command{ 46 | Use: "microcloud", 47 | Short: "Command for managing the MicroCloud daemon", 48 | Version: version.Version(), 49 | SilenceUsage: true, 50 | CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true}, 51 | PersistentPreRun: func(cmd *cobra.Command, args []string) { 52 | if commonCmd.FlagNoColor { 53 | tui.DisableColors() 54 | } 55 | }, 56 | } 57 | 58 | app.PersistentFlags().StringVar(&commonCmd.FlagMicroCloudDir, "state-dir", "", "Path to store MicroCloud state information"+"``") 59 | app.PersistentFlags().BoolVarP(&commonCmd.FlagHelp, "help", "h", false, "Print help") 60 | app.PersistentFlags().BoolVar(&commonCmd.FlagVersion, "version", false, "Print version number") 61 | app.PersistentFlags().BoolVar(&commonCmd.FlagNoColor, "no-color", false, "Disable colorization of the CLI") 62 | 63 | app.SetVersionTemplate("{{.Version}}\n") 64 | 65 | var cmdInit = cmdInit{common: &commonCmd} 66 | app.AddCommand(cmdInit.Command()) 67 | 68 | var cmdAdd = cmdAdd{common: &commonCmd} 69 | app.AddCommand(cmdAdd.Command()) 70 | 71 | var cmdJoin = cmdJoin{common: &commonCmd} 72 | app.AddCommand(cmdJoin.Command()) 73 | 74 | var cmdPreseed = cmdPreseed{common: &commonCmd} 75 | app.AddCommand(cmdPreseed.Command()) 76 | 77 | var cmdRemove = cmdRemove{common: &commonCmd} 78 | app.AddCommand(cmdRemove.Command()) 79 | 80 | var cmdService = cmdServices{common: &commonCmd} 81 | app.AddCommand(cmdService.Command()) 82 | 83 | var cmdStatus = cmdStatus{common: &commonCmd} 84 | app.AddCommand(cmdStatus.Command()) 85 | 86 | var cmdPeers = cmdClusterMembers{common: &commonCmd} 87 | app.AddCommand(cmdPeers.Command()) 88 | 89 | var cmdShutdown = cmdShutdown{common: &commonCmd} 90 | app.AddCommand(cmdShutdown.Command()) 91 | 92 | var cmdSQL = cmdSQL{common: &commonCmd} 93 | app.AddCommand(cmdSQL.Command()) 94 | 95 | var cmdSecrets = cmdSecrets{common: &commonCmd} 96 | app.AddCommand(cmdSecrets.Command()) 97 | 98 | var cmdWaitready = cmdWaitready{common: &commonCmd} 99 | app.AddCommand(cmdWaitready.Command()) 100 | 101 | app.InitDefaultHelpCmd() 102 | 103 | app.SetErr(&tui.ColorErr{}) 104 | 105 | err = app.Execute() 106 | if err != nil { 107 | os.Exit(1) 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/canonical/microcloud/microcloud 2 | 3 | go 1.22.7 4 | 5 | require ( 6 | github.com/canonical/lxd v0.0.0-20241106165613-4aab50ec18c3 7 | github.com/canonical/microceph/microceph v0.0.0-20241021172252-2dd856d64c62 8 | github.com/canonical/microcluster/v2 v2.1.0 9 | github.com/canonical/microovn/microovn v0.0.0-20241101125123-0d5d663f6575 10 | github.com/charmbracelet/bubbletea v1.2.4 11 | github.com/charmbracelet/lipgloss v1.0.0 12 | github.com/charmbracelet/x/ansi v0.8.0 13 | github.com/gorilla/mux v1.8.1 14 | github.com/gorilla/websocket v1.5.3 15 | github.com/spf13/cobra v1.8.1 16 | github.com/stretchr/testify v1.10.0 17 | golang.org/x/mod v0.22.0 18 | golang.org/x/net v0.34.0 19 | golang.org/x/sync v0.10.0 20 | golang.org/x/sys v0.29.0 21 | gopkg.in/yaml.v2 v2.4.0 22 | gopkg.in/yaml.v3 v3.0.1 23 | ) 24 | 25 | require ( 26 | github.com/Rican7/retry v0.3.1 // indirect 27 | github.com/armon/go-proxyproto v0.1.0 // indirect 28 | github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect 29 | github.com/canonical/go-dqlite/v2 v2.0.0 // indirect 30 | github.com/charmbracelet/x/term v0.2.1 // indirect 31 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 32 | github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect 33 | github.com/flosch/pongo2 v0.0.0-20200913210552-0d938eb266f3 // indirect 34 | github.com/fsnotify/fsnotify v1.8.0 // indirect 35 | github.com/fvbommel/sortorder v1.1.0 // indirect 36 | github.com/go-jose/go-jose/v4 v4.0.4 // indirect 37 | github.com/go-logr/logr v1.4.2 // indirect 38 | github.com/go-logr/stdr v1.2.2 // indirect 39 | github.com/google/renameio v1.0.1 // indirect 40 | github.com/google/uuid v1.6.0 // indirect 41 | github.com/gorilla/securecookie v1.1.2 // indirect 42 | github.com/gosexy/gettext v0.0.0-20160830220431-74466a0a0c4a // indirect 43 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 44 | github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect 45 | github.com/kr/fs v0.1.0 // indirect 46 | github.com/lucasb-eyer/go-colorful v1.2.0 // indirect 47 | github.com/mattn/go-isatty v0.0.20 // indirect 48 | github.com/mattn/go-localereader v0.0.1 // indirect 49 | github.com/mattn/go-runewidth v0.0.16 // indirect 50 | github.com/mattn/go-sqlite3 v1.14.24 // indirect 51 | github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect 52 | github.com/muesli/cancelreader v0.2.2 // indirect 53 | github.com/muesli/termenv v0.15.2 // indirect 54 | github.com/muhlemmer/gu v0.3.1 // indirect 55 | github.com/olekukonko/tablewriter v0.0.5 // indirect 56 | github.com/pkg/errors v0.9.1 // indirect 57 | github.com/pkg/sftp v1.13.7 // indirect 58 | github.com/pkg/xattr v0.4.10 // indirect 59 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 60 | github.com/rivo/uniseg v0.4.7 // indirect 61 | github.com/robfig/cron/v3 v3.0.1 // indirect 62 | github.com/sirupsen/logrus v1.9.3 // indirect 63 | github.com/spf13/pflag v1.0.5 // indirect 64 | github.com/zitadel/logging v0.6.1 // indirect 65 | github.com/zitadel/oidc/v3 v3.32.1 // indirect 66 | github.com/zitadel/schema v1.3.0 // indirect 67 | go.opentelemetry.io/otel v1.31.0 // indirect 68 | go.opentelemetry.io/otel/metric v1.31.0 // indirect 69 | go.opentelemetry.io/otel/trace v1.31.0 // indirect 70 | golang.org/x/crypto v0.32.0 // indirect 71 | golang.org/x/oauth2 v0.23.0 // indirect 72 | golang.org/x/term v0.28.0 // indirect 73 | golang.org/x/text v0.21.0 // indirect 74 | ) 75 | -------------------------------------------------------------------------------- /cmd/microcloud/join.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "time" 8 | 9 | "github.com/spf13/cobra" 10 | 11 | "github.com/canonical/microcloud/microcloud/api" 12 | "github.com/canonical/microcloud/microcloud/api/types" 13 | cloudClient "github.com/canonical/microcloud/microcloud/client" 14 | "github.com/canonical/microcloud/microcloud/service" 15 | ) 16 | 17 | type cmdJoin struct { 18 | common *CmdControl 19 | 20 | flagLookupTimeout int64 21 | flagSessionTimeout int64 22 | flagInitiatorAddress string 23 | } 24 | 25 | func (c *cmdJoin) Command() *cobra.Command { 26 | cmd := &cobra.Command{ 27 | Use: "join", 28 | Short: "Join an existing MicroCloud cluster", 29 | RunE: c.Run, 30 | } 31 | 32 | cmd.Flags().Int64Var(&c.flagLookupTimeout, "lookup-timeout", 0, "Amount of seconds to wait when finding systems on the network. Defaults: 60s") 33 | cmd.Flags().Int64Var(&c.flagSessionTimeout, "session-timeout", 0, "Amount of seconds to wait for the trust establishment session. Defaults: 10m") 34 | cmd.Flags().StringVar(&c.flagInitiatorAddress, "initiator-address", "", "Address of the trust establishment session's initiator") 35 | 36 | return cmd 37 | } 38 | 39 | func (c *cmdJoin) Run(cmd *cobra.Command, args []string) error { 40 | if len(args) != 0 { 41 | return cmd.Help() 42 | } 43 | 44 | fmt.Println("Waiting for services to start ...") 45 | err := checkInitialized(c.common.FlagMicroCloudDir, false, false) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | cfg := initConfig{ 51 | bootstrap: false, 52 | common: c.common, 53 | asker: c.common.asker, 54 | systems: map[string]InitSystem{}, 55 | state: map[string]service.SystemInformation{}, 56 | } 57 | 58 | cfg.lookupTimeout = DefaultLookupTimeout 59 | if c.flagLookupTimeout > 0 { 60 | cfg.lookupTimeout = time.Duration(c.flagLookupTimeout) * time.Second 61 | } 62 | 63 | cfg.sessionTimeout = DefaultSessionTimeout 64 | if c.flagSessionTimeout > 0 { 65 | cfg.sessionTimeout = time.Duration(c.flagSessionTimeout) * time.Second 66 | } 67 | 68 | err = cfg.askAddress(c.flagInitiatorAddress) 69 | if err != nil { 70 | return err 71 | } 72 | 73 | cfg.name, err = os.Hostname() 74 | if err != nil { 75 | return fmt.Errorf("Failed to retrieve system hostname: %w", err) 76 | } 77 | 78 | installedServices := []types.ServiceType{types.MicroCloud, types.LXD} 79 | optionalServices := map[types.ServiceType]string{ 80 | types.MicroCeph: api.MicroCephDir, 81 | types.MicroOVN: api.MicroOVNDir, 82 | } 83 | 84 | // Enable auto setup to skip service related questions. 85 | cfg.autoSetup = true 86 | installedServices, err = cfg.askMissingServices(installedServices, optionalServices) 87 | if err != nil { 88 | return err 89 | } 90 | 91 | cfg.autoSetup = false 92 | 93 | s, err := service.NewHandler(cfg.name, cfg.address, c.common.FlagMicroCloudDir, installedServices...) 94 | if err != nil { 95 | return err 96 | } 97 | 98 | services := make(map[types.ServiceType]string, len(installedServices)) 99 | for _, s := range s.Services { 100 | version, err := s.GetVersion(context.Background()) 101 | if err != nil { 102 | return err 103 | } 104 | 105 | services[s.Type()] = version 106 | } 107 | 108 | passphrase, err := cfg.askPassphrase(s) 109 | if err != nil { 110 | return err 111 | } 112 | 113 | return cfg.runSession(context.Background(), s, types.SessionJoining, cfg.sessionTimeout, func(gw *cloudClient.WebsocketGateway) error { 114 | return cfg.joiningSession(gw, s, services, c.flagInitiatorAddress, passphrase) 115 | }) 116 | } 117 | -------------------------------------------------------------------------------- /doc/.sphinx/_static/404.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /cmd/tui/asker.go: -------------------------------------------------------------------------------- 1 | package tui 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strings" 7 | 8 | tea "github.com/charmbracelet/bubbletea" 9 | "github.com/charmbracelet/x/ansi" 10 | ) 11 | 12 | // asker represents a text input question asker. 13 | type asker struct { 14 | // Asker needs to embed the type for os.Stdout so that we can change how the bubbletea Renderer places the cursor. 15 | *os.File 16 | windowWidth int 17 | 18 | // cancelled sets whether the asker has been cancelled by an invalid input. 19 | cancelled bool 20 | 21 | // answer stores the user's input to the asker. 22 | answer string 23 | 24 | // question is the question string supplied to the asker. 25 | question string 26 | 27 | // defaultAnswer is the default input used if no user input is given. 28 | defaultAnswer string 29 | 30 | // acceptedAnswers is a list of acceptable user inputs, if defined. 31 | acceptedAnswers []string 32 | } 33 | 34 | // Update implements the tea.Model Update method, to update the asker on key-presses. 35 | func (a *asker) Update(msg tea.Msg) (tea.Model, tea.Cmd) { 36 | // Grab the dimensions of the terminal window to properly draw the cursor. 37 | windowMsg, ok := msg.(tea.WindowSizeMsg) 38 | if ok { 39 | a.windowWidth = windowMsg.Width 40 | } 41 | 42 | key, ok := msg.(tea.KeyMsg) 43 | if !ok { 44 | return a, nil 45 | } 46 | 47 | if key.Type == tea.KeyEnter { 48 | return a, tea.Quit 49 | } 50 | 51 | if key.Type == tea.KeyBackspace { 52 | if len(a.answer) > 0 { 53 | a.answer = a.answer[:len(a.answer)-1] 54 | } 55 | 56 | return a, nil 57 | } 58 | 59 | if key.Type == tea.KeyDelete { 60 | if len(a.answer) > 0 { 61 | a.answer = a.answer[1:len(a.answer)] 62 | } 63 | 64 | return a, nil 65 | } 66 | 67 | if key.Type == tea.KeyCtrlJ { 68 | return a, tea.Quit 69 | } 70 | 71 | // Ignore key events 72 | if key.Type == tea.KeyLeft || key.Type == tea.KeyRight || key.Type == tea.KeyUp || key.Type == tea.KeyDown { 73 | return a, nil 74 | } 75 | 76 | if key.Type == tea.KeySpace { 77 | a.answer += " " 78 | return a, nil 79 | } 80 | 81 | if key.Type != tea.KeyRunes { 82 | a.cancelled = true 83 | a.answer = "" 84 | return a, tea.Quit 85 | } 86 | 87 | for _, rune := range key.Runes { 88 | a.answer += string(rune) 89 | } 90 | 91 | return a, nil 92 | } 93 | 94 | // View implements the tea.Model View method, to render the asker. 95 | func (a *asker) View() string { 96 | var acceptedAnswers string 97 | if len(a.acceptedAnswers) > 0 { 98 | acceptedAnswers = Printf(Fmt{Arg: " (%s)"}, Fmt{Arg: strings.Join(a.acceptedAnswers, "/"), Bold: true}) 99 | } 100 | 101 | var defaultAnswer string 102 | if a.defaultAnswer != "" { 103 | defaultAnswer = fmt.Sprintf("default=%s", a.defaultAnswer) 104 | defaultAnswer = Printf(Fmt{Arg: " [%s]"}, Fmt{Arg: defaultAnswer, Bold: true}) 105 | } 106 | 107 | answer := WarningColor(strings.TrimSpace(a.answer), true) 108 | 109 | return fmt.Sprintf("%s%s%s: %s", a.question, acceptedAnswers, defaultAnswer, answer) 110 | } 111 | 112 | // Init implements the tea.Model Init method, to initialize the asker. 113 | func (a *asker) Init() tea.Cmd { 114 | a.cancelled = false 115 | a.answer = "" 116 | 117 | return tea.ShowCursor 118 | } 119 | 120 | // Write changes the cursor position of the line so that it appears in the proper spot at the end of the line. 121 | // The sequence is set at the end of the string by default, causing the string to render the cursor in the first cell. 122 | // Instead, by appending it to the front of the string, the cursor will reset the previously rendered line only. 123 | func (a *asker) Write(b []byte) (int, error) { 124 | str := string(b) 125 | str, ok := strings.CutSuffix(str, ansi.CursorBackward(a.windowWidth)) 126 | if ok { 127 | str = ansi.CursorBackward(a.windowWidth) + str 128 | } 129 | 130 | return a.File.Write([]byte(str)) 131 | } 132 | -------------------------------------------------------------------------------- /multicast/discovery_test.go: -------------------------------------------------------------------------------- 1 | package multicast 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/suite" 11 | ) 12 | 13 | type multicastSuite struct { 14 | suite.Suite 15 | } 16 | 17 | func TestMulticastSuite(t *testing.T) { 18 | suite.Run(t, new(multicastSuite)) 19 | } 20 | 21 | func (m *multicastSuite) Test_Lookup() { 22 | cases := []struct { 23 | desc string 24 | lookupVersion string 25 | lookupIface string 26 | lookupPort int64 27 | responseInfo ServerInfo 28 | lookupErr error 29 | lookupTimeout time.Duration 30 | modifier func(server *Discovery) 31 | }{ 32 | { 33 | desc: "System with matching version can be looked up", 34 | lookupVersion: "2.0", 35 | lookupIface: "lo", 36 | lookupPort: 9444, 37 | responseInfo: ServerInfo{ 38 | Version: "2.0", 39 | Name: "foo", 40 | Address: "1.2.3.4", 41 | }, 42 | }, 43 | { 44 | desc: "System with maximum allowed server name length, IPv6 address and high version number can be looked up", 45 | lookupVersion: "142.0", 46 | lookupIface: "lo", 47 | lookupPort: 9444, 48 | responseInfo: ServerInfo{ 49 | Version: "142.0", 50 | Name: strings.Repeat("a", 255), 51 | Address: "fd42:c4cc:2e1d:132d:a216:3eff:fecd:9d15", 52 | }, 53 | }, 54 | { 55 | desc: "Cannot lookup system if invalid interface is given", 56 | lookupIface: "invalid-interface", 57 | lookupErr: fmt.Errorf(`Failed to resolve lookup interface "invalid-interface": route ip+net: no such network interface`), 58 | }, 59 | { 60 | desc: "Cannot lookup system if the responder is offline", 61 | lookupVersion: "2.0", 62 | lookupIface: "lo", 63 | lookupPort: 9444, 64 | responseInfo: ServerInfo{ 65 | Version: "2.0", 66 | Name: "foo", 67 | Address: "1.2.3.4", 68 | }, 69 | lookupTimeout: 500 * time.Microsecond, 70 | modifier: func(server *Discovery) { 71 | _ = server.StopResponder() 72 | }, 73 | lookupErr: fmt.Errorf("Failed to read from multicast network endpoint: Timeout exceeded"), 74 | }, 75 | { 76 | desc: "Cannot lookup system if the responder uses a different version", 77 | lookupVersion: "3.0", 78 | lookupIface: "lo", 79 | lookupPort: 9444, 80 | responseInfo: ServerInfo{ 81 | Version: "2.0", 82 | Name: "foo", 83 | Address: "1.2.3.4", 84 | }, 85 | lookupTimeout: 500 * time.Microsecond, 86 | lookupErr: fmt.Errorf("Failed to read from multicast network endpoint: Timeout exceeded"), 87 | }, 88 | } 89 | 90 | for _, c := range cases { 91 | m.T().Log(c.desc) 92 | 93 | // Use the loopback interface as it should always be there on any test system. 94 | discovery := NewDiscovery("lo", 9444) 95 | 96 | err := discovery.Respond(context.Background(), c.responseInfo) 97 | m.Require().NoError(err) 98 | 99 | if c.modifier != nil { 100 | c.modifier(discovery) 101 | } 102 | 103 | testDiscovery := NewDiscovery(c.lookupIface, c.lookupPort) 104 | 105 | ctx := context.Background() 106 | var cancel context.CancelFunc 107 | if c.lookupTimeout > 0 { 108 | ctx, cancel = context.WithTimeoutCause(ctx, c.lookupTimeout, fmt.Errorf("Timeout exceeded")) 109 | } 110 | 111 | receivedInfo, err := testDiscovery.Lookup(ctx, c.lookupVersion) 112 | if c.lookupErr == nil { 113 | m.Require().NoError(err) 114 | m.Require().Equal(&c.responseInfo, receivedInfo) 115 | } else { 116 | m.Require().Error(err) 117 | m.Require().Equal(c.lookupErr.Error(), err.Error()) 118 | } 119 | 120 | // Cancel the timeout to avoid leaking the context. 121 | if cancel != nil { 122 | cancel() 123 | } 124 | 125 | // Stop the responder. 126 | err = discovery.StopResponder() 127 | m.Require().NoError(err) 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | MicroCloud logo 3 | MicroCloud logo 4 |

5 | 6 | # **MicroCloud** 7 | 8 | **MicroCloud** allows you to deploy your own fully functional cloud in minutes. 9 | 10 | It’s a snap package that can automatically configure LXD, Ceph, and OVN across a set of servers. 11 | It can leverage multicast to automatically detect other servers on the network, making it possible to set up a complete cluster by running a single command on each of the machines. 12 | 13 | MicroCloud creates a small footprint cluster of compute nodes with distributed storage and secure networking, optimized for repeatable, reliable remote deployments. MicroCloud is aimed at edge computing, and anyone in need of a small-scale private cloud. 14 | 15 | ## **Requirements?** 16 | 17 | MicroCloud requires a minimum of three machines. 18 | It supports up to 50 machines. 19 | 20 | To use local storage, each machine requires a local disk. 21 | To use distributed storage, at least three additional disks (not only partitions) for use by Ceph are required, and these disks must be on at least three different machines. 22 | 23 | Once the simple initialisation is complete, users can launch, run and manage their workloads using system containers or VMs, and otherwise utilise regular LXD functionality. 24 | 25 | ## **How to get started** 26 | 27 | To get started, install the LXD, MicroCeph, MicroOVN and MicroCloud snaps. You can install them all at once with the following command: 28 | 29 | ```sh 30 | snap install lxd microceph microovn microcloud 31 | ``` 32 | 33 | Then start the bootstrapping process with the following command: 34 | 35 | ```sh 36 | microcloud init 37 | ``` 38 | 39 | In case you want to setup a multi machine MicroCloud, run the following command on all the other machines: 40 | 41 | ```sh 42 | microcloud join 43 | ``` 44 | 45 | Following the simple CLI prompts, a working MicroCloud will be ready within minutes. 46 | 47 | 48 | 49 | The MicroCloud snap drives three other snaps ([LXD](https://canonical-microcloud.readthedocs-hosted.com/en/latest/lxd/), [MicroCeph](https://canonical-microcloud.readthedocs-hosted.com/en/latest/microceph/), and [MicroOVN](https://canonical-microcloud.readthedocs-hosted.com/en/latest/microovn/)), enabling automated deployment of a highly available LXD cluster for compute, with Ceph as the storage driver and OVN as the managed network. 50 | 51 | During initialisation, MicroCloud scrapes the other servers for details and then prompts you to add disks to Ceph and configure the networking setup. 52 | 53 | At the end of this, you’ll have an OVN cluster, a Ceph cluster, and a LXD cluster. LXD itself will have been configured with both networking and storage suitable for use in a cluster. 54 | 55 | 56 | 57 | ## **What about networking?** 58 | 59 | By default, MicroCloud uses MicroOVN for networking, which is a minimal wrapper around OVN (Open Virtual Network). 60 | If you decide to not use MicroOVN, MicroCloud falls back on the Ubuntu fan for basic networking. 61 | 62 | You can optionally add the following dedicated networks: 63 | - a network for Ceph management traffic (also called public traffic) 64 | - a network for internal traffic (also called cluster traffic) 65 | - a network for OVN underlay traffic 66 | 67 | ## **What's next?** 68 | 69 | This is just the beginning of MicroCloud. We’re very excited about what’s coming up next! 70 | 71 | ### **RESOURCES:** 72 | 73 | - Documentation: https://canonical-microcloud.readthedocs-hosted.com/ 74 | - Find the package at the Snap Store: 75 | 76 | [![Snapcraft logo](https://dashboard.snapcraft.io/site_media/appmedia/2018/04/Snapcraft-logo-bird.png)](https://snapcraft.io/microcloud) 77 | 78 | - Snap package sources: [microcloud-pkg-snap](https://github.com/canonical/microcloud-pkg-snap) 79 | -------------------------------------------------------------------------------- /service/version_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/suite" 9 | 10 | "github.com/canonical/microcloud/microcloud/api/types" 11 | "github.com/canonical/microcloud/microcloud/version" 12 | ) 13 | 14 | type versionSuite struct { 15 | suite.Suite 16 | } 17 | 18 | func TestVersionSuite(t *testing.T) { 19 | suite.Run(t, new(versionSuite)) 20 | } 21 | 22 | func (s *versionSuite) Test_validateVersions() { 23 | cases := []struct { 24 | desc string 25 | version string 26 | service types.ServiceType 27 | expectErr bool 28 | }{ 29 | { 30 | desc: "Valid MicroCeph", 31 | version: fmt.Sprintf("ceph-version: %s.0~git", microCephMinVersion), 32 | service: types.MicroCeph, 33 | expectErr: false, 34 | }, 35 | { 36 | desc: "Valid MicroOVN", 37 | version: microOVNMinVersion, 38 | service: types.MicroOVN, 39 | expectErr: false, 40 | }, 41 | { 42 | desc: "Valid MicroCloud", 43 | version: version.RawVersion, 44 | service: types.MicroCloud, 45 | expectErr: false, 46 | }, 47 | { 48 | desc: "Valid LXD", 49 | version: lxdMinVersion, 50 | service: types.LXD, 51 | expectErr: false, 52 | }, 53 | { 54 | desc: "Invalid MicroCeph", 55 | version: microCephMinVersion, 56 | service: types.MicroCeph, 57 | expectErr: true, 58 | }, 59 | { 60 | desc: "Valid LXD with different patch version", 61 | version: fmt.Sprintf("%s.999", lxdMinVersion), 62 | service: types.LXD, 63 | expectErr: false, 64 | }, 65 | { 66 | desc: "Valid MicroCeph with different patch version", 67 | version: fmt.Sprintf("ceph-version: %s.999~git", microCephMinVersion), 68 | service: types.MicroCeph, 69 | expectErr: false, 70 | }, 71 | { 72 | desc: "MicroCloud is always valid because it's local", 73 | version: "", 74 | service: types.MicroCloud, 75 | expectErr: false, 76 | }, 77 | { 78 | desc: "Unsupported LXD with different minor version", 79 | version: fmt.Sprintf("%s.999", strings.Split(lxdMinVersion, ".")[0]), 80 | service: types.LXD, 81 | expectErr: true, 82 | }, 83 | { 84 | desc: "Unsupported MicroCeph with larger minor version", 85 | version: fmt.Sprintf("ceph-version: %s.999~git", strings.Split(microCephMinVersion, ".")[0]), 86 | service: types.MicroCeph, 87 | expectErr: true, 88 | }, 89 | { 90 | desc: "Unsupported MicroCeph with smaller minor version", 91 | version: fmt.Sprintf("ceph-version: %s.0~git", strings.Split(microCephMinVersion, ".")[0]), 92 | service: types.MicroCeph, 93 | expectErr: true, 94 | }, 95 | { 96 | desc: "Unsupported LXD with larger major version", 97 | version: "999.0", 98 | service: types.LXD, 99 | expectErr: true, 100 | }, 101 | { 102 | desc: "Unsupported LXD with smaller major version", 103 | version: "1.0", 104 | service: types.LXD, 105 | expectErr: true, 106 | }, 107 | { 108 | desc: "Unsupported MicroCeph with larger major version", 109 | version: "ceph-version: 999.0.0~git", 110 | service: types.MicroCeph, 111 | expectErr: true, 112 | }, 113 | { 114 | desc: "Unsupported MicroCeph with smaller major version", 115 | version: "ceph-version: 1.0.0~git", 116 | service: types.MicroCeph, 117 | expectErr: true, 118 | }, 119 | 120 | { 121 | desc: "Unsupported MicroOVN (direct string comparison)", 122 | version: microOVNMinVersion + ".0", 123 | service: types.MicroOVN, 124 | expectErr: true, 125 | }, 126 | } 127 | 128 | for i, c := range cases { 129 | s.T().Logf("%d: %s", i, c.desc) 130 | 131 | err := validateVersion(c.service, c.version) 132 | if c.expectErr { 133 | s.Error(err) 134 | } else { 135 | s.NoError(err) 136 | } 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /.github/workflows/codeql.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ "main", "*" ] 17 | pull_request: 18 | branches: [ "main", "*" ] 19 | schedule: 20 | - cron: '19 20 * * 5' 21 | 22 | jobs: 23 | analyze: 24 | name: Analyze 25 | # Runner size impacts CodeQL analysis time. To learn more, please see: 26 | # - https://gh.io/recommended-hardware-resources-for-running-codeql 27 | # - https://gh.io/supported-runners-and-hardware-resources 28 | # - https://gh.io/using-larger-runners 29 | # Consider using larger runners for possible analysis time improvements. 30 | runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} 31 | timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }} 32 | permissions: 33 | # required for all workflows 34 | security-events: write 35 | 36 | # only required for workflows in private repositories 37 | actions: read 38 | contents: read 39 | 40 | strategy: 41 | fail-fast: false 42 | matrix: 43 | language: [ 'go' ] 44 | # CodeQL supports [ 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' ] 45 | # Use only 'java-kotlin' to analyze code written in Java, Kotlin or both 46 | # Use only 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both 47 | # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support 48 | 49 | steps: 50 | - name: Checkout repository 51 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 52 | 53 | # Initializes the CodeQL tools for scanning. 54 | - name: Initialize CodeQL 55 | uses: github/codeql-action/init@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8 56 | with: 57 | languages: ${{ matrix.language }} 58 | # If you wish to specify custom queries, you can do so here or in a config file. 59 | # By default, queries listed here will override any specified in a config file. 60 | # Prefix the list here with "+" to use these queries and those in the config file. 61 | 62 | # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs 63 | # queries: security-extended,security-and-quality 64 | 65 | 66 | # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift). 67 | # If this step fails, then you should remove it and run the build manually (see below) 68 | - name: Autobuild 69 | uses: github/codeql-action/autobuild@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8 70 | 71 | # ℹ️ Command-line programs to run using the OS shell. 72 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun 73 | 74 | # If the Autobuild fails above, remove it and uncomment the following three lines. 75 | # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. 76 | 77 | # - run: | 78 | # echo "Run, Build Application using script" 79 | # ./location_of_script_within_repo/buildscript.sh 80 | 81 | - name: Perform CodeQL Analysis 82 | uses: github/codeql-action/analyze@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8 83 | with: 84 | category: "/language:${{matrix.language}}" 85 | -------------------------------------------------------------------------------- /service/lxd_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/canonical/lxd/shared/api" 7 | "github.com/stretchr/testify/suite" 8 | ) 9 | 10 | type networkInterfaceSuite struct { 11 | suite.Suite 12 | } 13 | 14 | func TestNetworkInterfaceSuite(t *testing.T) { 15 | suite.Run(t, new(networkInterfaceSuite)) 16 | } 17 | 18 | func (s *versionSuite) Test_defaultNetworkInterfacesFilter() { 19 | cases := []struct { 20 | desc string 21 | network api.Network 22 | state *api.NetworkState 23 | filtered bool 24 | }{ 25 | { 26 | desc: "Valid interface", 27 | network: api.Network{ 28 | Name: "eth0", 29 | Type: "physical", 30 | }, 31 | state: &api.NetworkState{ 32 | State: "up", 33 | }, 34 | filtered: true, 35 | }, 36 | { 37 | desc: "Valid bridge", 38 | network: api.Network{ 39 | Name: "br-valid", 40 | Type: "bridge", 41 | }, 42 | state: &api.NetworkState{ 43 | State: "up", 44 | }, 45 | filtered: true, 46 | }, 47 | { 48 | desc: "Invalid managed interface", 49 | network: api.Network{ 50 | Name: "eth0", 51 | Type: "physical", 52 | Managed: true, 53 | }, 54 | filtered: false, 55 | }, 56 | { 57 | desc: "Invalid down interface", 58 | network: api.Network{ 59 | Name: "eth0", 60 | Type: "physical", 61 | }, 62 | state: &api.NetworkState{ 63 | State: "down", 64 | }, 65 | filtered: false, 66 | }, 67 | { 68 | desc: "Invalid managed bridge", 69 | network: api.Network{ 70 | Name: "br-valid", 71 | Type: "bridge", 72 | Managed: true, 73 | }, 74 | filtered: false, 75 | }, 76 | { 77 | desc: "Invalid down bridge", 78 | network: api.Network{ 79 | Name: "br-valid", 80 | Type: "bridge", 81 | }, 82 | state: &api.NetworkState{ 83 | State: "down", 84 | }, 85 | filtered: false, 86 | }, 87 | } 88 | 89 | for i, c := range cases { 90 | s.T().Logf("%d: %s", i, c.desc) 91 | 92 | filtered := defaultNetworkInterfacesFilter(c.network, c.state) 93 | s.Equal(c.filtered, filtered) 94 | } 95 | } 96 | 97 | func (s *versionSuite) Test_ovnNetworkInterfacesFilter() { 98 | cases := []struct { 99 | desc string 100 | network api.Network 101 | state *api.NetworkState 102 | filtered bool 103 | }{ 104 | { 105 | desc: "Valid physical interface", 106 | network: api.Network{ 107 | Name: "eth0", 108 | Type: "physical", 109 | }, 110 | state: &api.NetworkState{ 111 | Type: "broadcast", 112 | }, 113 | filtered: true, 114 | }, 115 | { 116 | desc: "Valid bridge interface", 117 | network: api.Network{ 118 | Name: "br-valid", 119 | Type: "bridge", 120 | }, 121 | state: &api.NetworkState{ 122 | Type: "broadcast", 123 | }, 124 | filtered: true, 125 | }, 126 | { 127 | desc: "Valid bond interface", 128 | network: api.Network{ 129 | Name: "bond0", 130 | Type: "bond", 131 | }, 132 | state: &api.NetworkState{ 133 | Type: "broadcast", 134 | }, 135 | filtered: true, 136 | }, 137 | { 138 | desc: "Valid VLAN interface", 139 | network: api.Network{ 140 | Name: "vlan0", 141 | Type: "vlan", 142 | }, 143 | state: &api.NetworkState{ 144 | Type: "broadcast", 145 | }, 146 | filtered: true, 147 | }, 148 | { 149 | desc: "Invalid interface type", 150 | network: api.Network{ 151 | Name: "invalid0", 152 | Type: "invalid", 153 | }, 154 | filtered: false, 155 | }, 156 | { 157 | desc: "Invalid physical interface type", 158 | network: api.Network{ 159 | Name: "lo", 160 | Type: "physical", 161 | }, 162 | state: &api.NetworkState{ 163 | Type: "loopback", 164 | }, 165 | filtered: false, 166 | }, 167 | } 168 | 169 | for i, c := range cases { 170 | s.T().Logf("%d: %s", i, c.desc) 171 | 172 | filtered := ovnNetworkInterfacesFilter(c.network, c.state) 173 | s.Equal(c.filtered, filtered) 174 | } 175 | } 176 | -------------------------------------------------------------------------------- /doc/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | discourse: lxc:15871 3 | relatedlinks: https://snapcraft.io/microcloud 4 | --- 5 | 6 | (home)= 7 | # MicroCloud 8 | 9 | MicroCloud is a collection of services that allows you to deploy your own fully functional cloud in minutes. 10 | The MicroCloud snap automatically configures the different components across a set of servers, making it possible to set up a complete cluster by running a single command on one of the machines. 11 | 12 | Once installed, MicroCloud uses LXD for virtualisation, Ceph for distributed storage, and OVN for networking. 13 | 14 | This way, MicroCloud creates a small footprint cluster of compute nodes with distributed storage and secure networking, optimised for repeatable, reliable remote deployments. 15 | 16 | MicroCloud is aimed at edge computing, and anyone in need of a small-scale private cloud. 17 | 18 | --- 19 | 20 | ## How to use this documentation 21 | 22 | Since MicroCloud is a collection of services, this documentation consists of four different documentation sets. 23 | ````{only} integrated 24 | You can navigate between these documentation sets by using the links in the top bar. 25 | ```` 26 | 27 | {doc}`index` 28 | : The MicroCloud documentation contains information for getting started with MicroCloud, in addition to conceptual and architectural documentation. 29 | This documentation describes how the different components are used within a MicroCloud setup. 30 | 31 | {doc}`lxd:index` 32 | : LXD is the system container and virtual machine manager used for virtualisation in MicroCloud. 33 | This means that after you install MicroCloud, you will manage your instances through LXD and the LXD UI. 34 | 35 | {doc}`microceph:index` 36 | : MicroCeph provides a lightweight way of deploying and managing a [Ceph](https://ceph.io/en/) cluster. 37 | MicroCloud uses MicroCeph to set up distributed Ceph storage. 38 | 39 | {doc}`microovn:index` 40 | : MicroOVN is a snap-based distribution of [OVN](https://www.ovn.org/). 41 | MicroCloud uses MicroOVN to set up OVN networking. 42 | 43 | ```{note} 44 | The MicroCloud documentation set is targeted specifically at users of MicroCloud. 45 | 46 | The other three documentation set describe the full functionality of each component. 47 | This functionality is available as part of your MicroCloud setup, but not all of it is relevant. 48 | For example, all documentation sets contain installation information, but the components are already installed as part of MicroCloud. 49 | Also, while each component documents how to remove cluster members, you should not remove machines from only one component. 50 | Use MicroCloud to remove cluster members (see {ref}`howto-remove`). 51 | ``` 52 | 53 | --- 54 | 55 | ## In the MicroCloud documentation 56 | 57 | ````{grid} 1 1 2 2 58 | 59 | ```{grid-item} [Tutorial](/tutorial/get_started) 60 | 61 | **Start here**: a hands-on introduction to MicroCloud for new users 62 | ``` 63 | 64 | ```{grid-item} [How-to guides](/how-to/index) 65 | 66 | **Step-by-step guides** covering key operations and common tasks 67 | ``` 68 | 69 | ```` 70 | 71 | ````{grid} 1 1 2 2 72 | :reverse: 73 | 74 | ```{grid-item} [Reference](/reference/index) 75 | 76 | **Technical information** - specifications, APIs, architecture 77 | ``` 78 | 79 | ```{grid-item} [Explanation](/explanation/index) 80 | 81 | **Discussion and clarification** of key topics 82 | ``` 83 | 84 | ```` 85 | 86 | --- 87 | 88 | ## Project and community 89 | 90 | MicroCloud is a member of the Ubuntu family. It’s an open source project that warmly welcomes community projects, contributions, suggestions, fixes and constructive feedback. 91 | 92 | - [MicroCloud snap](https://snapcraft.io/microcloud) 93 | - [Contribute](https://github.com/canonical/microcloud) 94 | - [Get support](https://discourse.ubuntu.com/c/lxd/microcloud/) 95 | - [Thinking about using MicroCloud for your next project? Get in touch!](https://canonical.com/microcloud) 96 | 97 | 98 | ```{toctree} 99 | :hidden: 100 | :maxdepth: 2 101 | 102 | self 103 | Tutorial 104 | /how-to/index 105 | /reference/index 106 | /explanation/index 107 | -------------------------------------------------------------------------------- /doc/how-to/install.md: -------------------------------------------------------------------------------- 1 | (howto-install)= 2 | # How to install MicroCloud 3 | 4 | (pre-deployment-requirements)= 5 | ## Pre-deployment requirements 6 | 7 | `````{tabs} 8 | ````{group-tab} General 9 | 10 | The requirements in this section apply to all MicroCloud deployments. 11 | 12 | A physical or virtual machine intended for use as a MicroCloud cluster member must meet the following prerequisites: 13 | 14 | - Software: 15 | - Ubuntu 22.04 or newer (LTS version recommended) 16 | - If you intend to use ZFS storage, use a non-HWE (Hardware Enabled) variant of Ubuntu 22.04 17 | - snapd 2.59 or newer 18 | 19 | - Networking: 20 | - Fixed IP addresses (DHCP not supported) 21 | - At least two network interfaces per cluster member: one for intra-cluster communication and one for external connectivity to the uplink network 22 | - Partially or fully disaggregated networking setups require more interfaces; see: {ref}`howto-ceph-networking` 23 | - To use a {ref}`dedicated underlay network for OVN traffic `, an additional interface per cluster member is required 24 | - Uplink network must support both broadcast and multicast 25 | - Intra-cluster interface must have IPs assigned; external connectivity interface (to uplink) must not have any IPs assigned 26 | 27 | - Storage: 28 | - Disks should be free of existing partitions or file systems 29 | - For local storage, each cluster member must have at least one local disk 30 | - If you intend to use full disk encryption on a cluster member, it must have `snapd` version `2.59.1` or newer installed and the `dm-crypt` kernel module available 31 | - To check if the module exists, run: 32 | 33 | ``` 34 | sudo modinfo dm-crypt 35 | ``` 36 | ```` 37 | 38 | ````{group-tab} Testing or development environments 39 | 40 | ```{important} 41 | These requirements are in addition to those listed in the General tab. 42 | ``` 43 | 44 | - Physical or virtual machines can be used 45 | - Minimum cluster size: 46 | - 1 member 47 | - Memory: 48 | - Minimum 8 GiB RAM per cluster member 49 | - Storage: 50 | - If high availability is required, use distributed storage with: 51 | - a minimum of 3 cluster members 52 | - a minimum of 3 separate disks located across 3 different members 53 | - Otherwise, local storage is sufficient 54 | ```` 55 | 56 | ````{group-tab} Production environments 57 | 58 | ```{important} 59 | These requirements are in addition to those listed in the General tab. 60 | ``` 61 | 62 | - Physical machines only (no VMs) 63 | - Minimum cluster size: 64 | - 3 members 65 | - For critical deployments, we recommend a minimum of 4 members 66 | - Memory: 67 | - Minimum 32 GiB RAM per cluster member 68 | - Software: 69 | - For production deployments subscribed to Ubuntu Pro, each cluster member must use a LTS version of Ubuntu 70 | - Networking: 71 | - For each cluster member, we recommend dual-port network cards with a minimum 10 GiB capacity, or higher if low latency is essential 72 | - Storage: 73 | - For each cluster member, we recommend at least 3 NVMe disks: 74 | - 1 for OS 75 | - 1 for local storage 76 | - 1 for distributed storage 77 | ```` 78 | ````` 79 | 80 | For detailed information, see: {ref}`reference-requirements`. 81 | 82 | ## Installation 83 | 84 | ```{youtube} https://www.youtube.com/watch?v=M0y0hQ16YuE 85 | ``` 86 | 87 | To install MicroCloud, install all required {ref}`snaps` on all machines that you want to include in your cluster. 88 | 89 | To do so, enter the following commands on all machines: 90 | 91 | sudo snap install lxd --channel=5.21/stable --cohort="+" 92 | sudo snap install microceph --channel=squid/stable --cohort="+" 93 | sudo snap install microovn --channel=24.03/stable --cohort="+" 94 | sudo snap install microcloud --channel=2/stable --cohort="+" 95 | 96 | ```{note} 97 | Make sure to install the same version of the snaps on all machines. 98 | See {ref}`howto-snap` for more information. 99 | 100 | If you don't want to use MicroCloud's full functionality, you can install only some of the snaps. 101 | However, this is not recommended. 102 | ``` 103 | 104 | After installing the snaps make sure to hold any automatic updates to keep the used snap versions across MicroCloud in sync. 105 | See {ref}`howto-snap-hold-updates` for more information. 106 | -------------------------------------------------------------------------------- /doc/.sphinx/_static/header.css: -------------------------------------------------------------------------------- 1 | .p-navigation { 2 | border-bottom: 1px solid var(--color-sidebar-background-border); 3 | } 4 | 5 | .p-navigation__nav { 6 | background: #333333; 7 | display: flex; 8 | } 9 | 10 | .p-logo { 11 | display: flex !important; 12 | padding-top: 0 !important; 13 | text-decoration: none; 14 | } 15 | 16 | .p-logo-image { 17 | height: 44px; 18 | padding-right: 10px; 19 | } 20 | 21 | .p-logo-text { 22 | margin-top: 18px; 23 | color: white; 24 | text-decoration: none; 25 | } 26 | 27 | ul.p-navigation__links { 28 | display: flex; 29 | list-style: none; 30 | margin-left: 0; 31 | margin-top: auto; 32 | margin-bottom: auto; 33 | max-width: 800px; 34 | width: 100%; 35 | } 36 | 37 | ul.p-navigation__links li { 38 | margin: 0 auto; 39 | text-align: center; 40 | width: 100%; 41 | } 42 | 43 | ul.p-navigation__links li a { 44 | background-color: rgba(0, 0, 0, 0); 45 | border: none; 46 | border-radius: 0; 47 | color: var(--color-sidebar-link-text); 48 | display: block; 49 | font-weight: 400; 50 | line-height: 1.5rem; 51 | margin: 0; 52 | overflow: hidden; 53 | padding: 1rem 0; 54 | position: relative; 55 | text-align: left; 56 | text-overflow: ellipsis; 57 | transition-duration: .1s; 58 | transition-property: background-color, color, opacity; 59 | transition-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); 60 | white-space: nowrap; 61 | width: 100%; 62 | } 63 | 64 | ul.p-navigation__links .p-navigation__link { 65 | color: #ffffff; 66 | font-weight: 300; 67 | text-align: center; 68 | text-decoration: none; 69 | } 70 | 71 | ul.p-navigation__links .p-navigation__link:hover { 72 | background-color: #2b2b2b; 73 | } 74 | 75 | ul.p-navigation__links .p-dropdown__link:hover { 76 | background-color: var(--color-sidebar-item-background--hover); 77 | } 78 | 79 | ul.p-navigation__links .p-navigation__sub-link { 80 | background: var(--color-background-primary); 81 | padding: .5rem 0 .5rem .5rem; 82 | font-weight: 300; 83 | } 84 | 85 | ul.p-navigation__links .more-links-dropdown li a { 86 | border-left: 1px solid var(--color-sidebar-background-border); 87 | border-right: 1px solid var(--color-sidebar-background-border); 88 | } 89 | 90 | ul.p-navigation__links .more-links-dropdown li:first-child a { 91 | border-top: 1px solid var(--color-sidebar-background-border); 92 | } 93 | 94 | ul.p-navigation__links .more-links-dropdown li:last-child a { 95 | border-bottom: 1px solid var(--color-sidebar-background-border); 96 | } 97 | 98 | ul.p-navigation__links .p-navigation__logo { 99 | padding: 0.5rem; 100 | } 101 | 102 | ul.p-navigation__links .p-navigation__logo img { 103 | width: 40px; 104 | } 105 | 106 | ul.more-links-dropdown { 107 | display: none; 108 | overflow-x: visible; 109 | height: 0; 110 | z-index: 55; 111 | padding: 0; 112 | position: relative; 113 | list-style: none; 114 | margin-bottom: 0; 115 | margin-top: 0; 116 | } 117 | 118 | .nav-more-links::after { 119 | background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='16' height='16'%3E%3Cpath fill='%23111' d='M8.187 11.748l6.187-6.187-1.06-1.061-5.127 5.127L3.061 4.5 2 5.561z'/%3E%3C/svg%3E"); 120 | background-position: center; 121 | background-repeat: no-repeat; 122 | background-size: contain; 123 | content: ""; 124 | display: block; 125 | filter: invert(100%); 126 | height: 1rem; 127 | pointer-events: none; 128 | position: absolute; 129 | right: 1rem; 130 | text-indent: calc(100% + 10rem); 131 | top: calc(1rem + 0.25rem); 132 | width: 1rem; 133 | } 134 | 135 | .nav-ubuntu-com { 136 | display: none; 137 | } 138 | 139 | @media only screen and (min-width: 480px) { 140 | ul.p-navigation__links li { 141 | width: 100%; 142 | } 143 | 144 | .nav-ubuntu-com { 145 | display: inherit; 146 | } 147 | } 148 | 149 | @media only screen and (max-width: 800px) { 150 | .nav-more-links { 151 | margin-left: auto !important; 152 | padding-right: 2rem !important; 153 | width: 8rem !important; 154 | } 155 | } 156 | 157 | @media only screen and (min-width: 800px) { 158 | ul.p-navigation__links li { 159 | width: 100% !important; 160 | } 161 | } 162 | 163 | @media only screen and (min-width: 1310px) { 164 | ul.p-navigation__links { 165 | margin-left: calc(50% - 41em); 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /doc/.sphinx/build_requirements.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | sys.path.append('./') 4 | from custom_conf import * 5 | 6 | # The file contains helper functions and the mechanism to build the 7 | # .sphinx/requirements.txt file that is needed to set up the virtual 8 | # environment. 9 | 10 | # You should not do any modifications to this file. Put your custom 11 | # requirements into the custom_required_modules array in the custom_conf.py 12 | # file. If you need to change this file, contribute the changes upstream. 13 | 14 | legacyCanonicalSphinxExtensionNames = [ 15 | "youtube-links", 16 | "related-links", 17 | "custom-rst-roles", 18 | "terminal-output" 19 | ] 20 | 21 | def IsAnyCanonicalSphinxExtensionUsed(): 22 | for extension in custom_extensions: 23 | if (extension.startswith("canonical.") or 24 | extension in legacyCanonicalSphinxExtensionNames): 25 | return True 26 | 27 | return False 28 | 29 | def IsNotFoundExtensionUsed(): 30 | return "notfound.extension" in custom_extensions 31 | 32 | def IsSphinxTabsUsed(): 33 | for extension in custom_extensions: 34 | if extension.startswith("sphinx_tabs."): 35 | return True 36 | 37 | return False 38 | 39 | def AreRedirectsDefined(): 40 | return ("sphinx_reredirects" in custom_extensions) or ( 41 | ("redirects" in globals()) and \ 42 | (redirects is not None) and \ 43 | (len(redirects) > 0)) 44 | 45 | def IsOpenGraphConfigured(): 46 | if "sphinxext.opengraph" in custom_extensions: 47 | return True 48 | 49 | for global_variable_name in list(globals()): 50 | if global_variable_name.startswith("ogp_"): 51 | return True 52 | 53 | return False 54 | 55 | def IsMyStParserUsed(): 56 | return ("myst_parser" in custom_extensions) or \ 57 | ("custom_myst_extensions" in globals()) 58 | 59 | def DeduplicateExtensions(extensionNames: [str]): 60 | extensionNames = dict.fromkeys(extensionNames) 61 | resultList = [] 62 | encounteredCanonicalExtensions = [] 63 | 64 | for extensionName in extensionNames: 65 | if extensionName in legacyCanonicalSphinxExtensionNames: 66 | extensionName = "canonical." + extensionName 67 | 68 | if extensionName.startswith("canonical."): 69 | if extensionName not in encounteredCanonicalExtensions: 70 | encounteredCanonicalExtensions.append(extensionName) 71 | resultList.append(extensionName) 72 | else: 73 | resultList.append(extensionName) 74 | 75 | return resultList 76 | 77 | if __name__ == "__main__": 78 | requirements = [ 79 | "furo", 80 | "pyspelling", 81 | "sphinx", 82 | "sphinx-autobuild", 83 | "sphinx-copybutton", 84 | "sphinx-design", 85 | "sphinxcontrib-jquery", 86 | "watchfiles", 87 | "GitPython" 88 | 89 | ] 90 | 91 | requirements.extend(custom_required_modules) 92 | 93 | if IsAnyCanonicalSphinxExtensionUsed(): 94 | requirements.append("canonical-sphinx-extensions") 95 | 96 | if IsNotFoundExtensionUsed(): 97 | requirements.append("sphinx-notfound-page") 98 | 99 | if IsSphinxTabsUsed(): 100 | requirements.append("sphinx-tabs") 101 | 102 | if AreRedirectsDefined(): 103 | requirements.append("sphinx-reredirects") 104 | 105 | if IsOpenGraphConfigured(): 106 | requirements.append("sphinxext-opengraph") 107 | 108 | if IsMyStParserUsed(): 109 | requirements.append("myst-parser") 110 | requirements.append("linkify-it-py") 111 | 112 | # removes duplicate entries 113 | requirements = list(dict.fromkeys(requirements)) 114 | requirements.sort() 115 | 116 | with open(".sphinx/requirements.txt", 'w') as requirements_file: 117 | requirements_file.write( 118 | "# DO NOT MODIFY THIS FILE DIRECTLY!\n" 119 | "#\n" 120 | "# This file is generated automatically.\n" 121 | "# Add custom requirements to the custom_required_modules\n" 122 | "# array in the custom_conf.py file and run:\n" 123 | "# make clean && make install\n") 124 | 125 | for requirement in requirements: 126 | requirements_file.write(requirement) 127 | requirements_file.write('\n') 128 | -------------------------------------------------------------------------------- /api/session_join.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "net/http" 9 | "time" 10 | 11 | "github.com/canonical/lxd/lxd/response" 12 | "github.com/canonical/lxd/shared" 13 | "github.com/canonical/lxd/shared/api" 14 | "github.com/canonical/microcluster/v2/rest" 15 | "github.com/canonical/microcluster/v2/state" 16 | 17 | "github.com/canonical/microcloud/microcloud/api/types" 18 | "github.com/canonical/microcloud/microcloud/service" 19 | ) 20 | 21 | // SessionJoinCmd represents the /1.0/session/join API on MicroCloud. 22 | var SessionJoinCmd = func(sh *service.Handler) rest.Endpoint { 23 | return rest.Endpoint{ 24 | AllowedBeforeInit: true, 25 | Name: "session/join", 26 | Path: "session/join", 27 | 28 | Post: rest.EndpointAction{Handler: authHandlerHMAC(sh, sessionJoinPost(sh)), AllowUntrusted: true}, 29 | } 30 | } 31 | 32 | // sessionJoinPost receives join intent requests from new potential members. 33 | func sessionJoinPost(sh *service.Handler) func(state state.State, r *http.Request) response.Response { 34 | return func(state state.State, r *http.Request) response.Response { 35 | // Apply delay right at the beginning before doing any validation. 36 | // This limits the number of join attempts that can be made by an attacker. 37 | select { 38 | case <-time.After(100 * time.Millisecond): 39 | case <-r.Context().Done(): 40 | return response.InternalError(errors.New("Request cancelled")) 41 | } 42 | 43 | // Parse the request. 44 | req := types.SessionJoinPost{} 45 | 46 | err := json.NewDecoder(r.Body).Decode(&req) 47 | if err != nil { 48 | return response.BadRequest(err) 49 | } 50 | 51 | err = sh.SessionTransaction(true, func(session *service.Session) error { 52 | // Only validate the intent (services) on the initiator. 53 | // The joiner has to accept the services from the initiator. 54 | if session.Role() == types.SessionInitiating { 55 | err = validateIntent(r.Context(), sh, req) 56 | if err != nil { 57 | return api.NewStatusError(http.StatusBadRequest, err.Error()) 58 | } 59 | } 60 | 61 | fingerprint, err := shared.CertFingerprintStr(req.Certificate) 62 | if err != nil { 63 | return api.StatusErrorf(http.StatusBadRequest, "Failed to get fingerprint: %w", err) 64 | } 65 | 66 | err = session.RegisterIntent(fingerprint) 67 | if err != nil { 68 | return api.StatusErrorf(http.StatusBadRequest, "Failed to register join intent: %w", err) 69 | } 70 | 71 | // Prevent locking in case there isn't anymore an active consumer reading on the channel. 72 | // This can happen if the initiator's websocket connection isn't anymore active. 73 | // Wait up to 10 seconds for an active consumer. 74 | // When the initiator returns the dice-generated passphrase, a joiner can go ahead and send 75 | // its intent to join. If the initiator hasn't yet started to listen on join intents (too slow), 76 | // the API might return an error as there isn't yet any active consumer. 77 | ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second) 78 | defer cancel() 79 | 80 | select { 81 | case session.IntentCh() <- req: 82 | return nil 83 | case <-ctx.Done(): 84 | return fmt.Errorf("Timeout waiting for an active consumer of the join intent") 85 | } 86 | }) 87 | 88 | return response.SmartError(err) 89 | } 90 | } 91 | 92 | // validateIntent validates the given join intent. 93 | // It checks whether or not the peer is missing any of our services and returns an error if one is missing. 94 | // Also compares each service's daemon version between the joiner and initiator. 95 | func validateIntent(ctx context.Context, sh *service.Handler, intent types.SessionJoinPost) error { 96 | // Reject any peers that are missing our services. 97 | for _, service := range sh.Services { 98 | intentVersion, ok := intent.Services[service.Type()] 99 | if !ok { 100 | return fmt.Errorf("Rejecting peer %q due to missing services (%s)", intent.Name, string(service.Type())) 101 | } 102 | 103 | version, err := service.GetVersion(ctx) 104 | if err != nil { 105 | return fmt.Errorf("Unable to determine initiator's %s version: %w", service.Type(), err) 106 | } 107 | 108 | if intentVersion != version { 109 | return fmt.Errorf("Rejecting peer %q due to invalid %s version. (Want: %q, Detected: %q)", intent.Name, service.Type(), version, intentVersion) 110 | } 111 | } 112 | 113 | return nil 114 | } 115 | -------------------------------------------------------------------------------- /doc/how-to/snaps.md: -------------------------------------------------------------------------------- 1 | (howto-snap)= 2 | # How to manage the snaps 3 | 4 | MicroCloud is distributed as a [snap](https://snapcraft.io/docs). 5 | The benefit of packaging MicroCloud as a snap is that it makes it possible to include the required dependencies, and that it allows MicroCloud to be installed on many different Linux distributions. 6 | The snap ensures that MicroCloud runs in a consistent environment. 7 | 8 | Because MicroCloud uses a set of {ref}`other snaps `, you must make sure to have suitable versions of these snaps installed on all machines of your MicroCloud cluster. 9 | The installed snap versions must be compatible with one another, and for each of the snaps, the same version must be installed on all machines. 10 | 11 | ## Choose the right channel and track 12 | 13 | Snaps come with different channels that define which release of a snap is installed and tracked for updates. 14 | See [Channels and tracks](https://snapcraft.io/docs/channels) in the snap documentation for detailed information. 15 | 16 | MicroCloud currently provides the legacy `1` and the latest `2` track. 17 | 18 | ```{tip} 19 | In general, you should use the default channels for all snaps required to run MicroCloud. 20 | 21 | See {ref}`howto-support` for a list of supported channels that are orchestrated to work together. 22 | ``` 23 | 24 | When installing a snap, specify the channel as follows: 25 | 26 | sudo snap install --channel= 27 | 28 | For example: 29 | 30 | sudo snap install microcloud --channel=2/stable 31 | 32 | To see all available channels of a snap, run the following command: 33 | 34 | snap info 35 | 36 | (howto-snap-control-updates)= 37 | ## Control updates 38 | 39 | By default, snaps are updated automatically. 40 | In the case of MicroCloud, this can be problematic because the related snaps must always use compatible versions, and because all machines of a cluster must use the same version of each snap. 41 | 42 | Therefore, you should manually apply your updates and make sure that all cluster members are in sync regarding the snap versions that they use. 43 | 44 | (howto-snap-hold-updates)= 45 | ### Hold updates 46 | 47 | You can hold snap updates for a specific time or forever, for all snaps or for specific snaps. 48 | 49 | Which strategy to choose depends on your use case. 50 | If you want to fully control updates to your MicroCloud setup, you should put a hold on all related snaps until you decide to update them. 51 | 52 | Enter the following command to indefinitely hold all updates to the snaps needed for MicroCloud: 53 | 54 | sudo snap refresh --hold lxd microceph microovn microcloud 55 | 56 | See [Hold refreshes](https://snapcraft.io/docs/managing-updates#heading--hold) in the snap documentation for detailed information about holding snap updates. 57 | 58 | (howto-snap-cluster)= 59 | ### Keep cluster members in sync 60 | 61 | Snap updates are delivered as [progressive releases](https://snapcraft.io/docs/progressive-releases), which means that updated snap versions are made available to different machines at different times. 62 | This method can cause a problem for cluster updates if some cluster members are refreshed to a version that is not available to other cluster members yet. 63 | 64 | To avoid this problem, use the `--cohort="+"` flag when refreshing your snaps: 65 | 66 | sudo snap refresh --cohort="+" 67 | 68 | This flag ensures that all machines in a cluster see the same snap revision and are therefore not affected by a progressive rollout. 69 | 70 | ## Use a Snap Store Proxy 71 | 72 | If you manage a large MicroCloud deployment and you need absolute control over when updates are applied, consider installing a Snap Store Proxy. 73 | 74 | The Snap Store Proxy is a separate application that sits between the snap client command on your machines and the snap store. 75 | You can configure the Snap Store Proxy to make only specific snap revisions available for installation. 76 | 77 | See the [Snap Store Proxy documentation](https://docs.ubuntu.com/snap-store-proxy/) for information about how to install and register the Snap Store Proxy. 78 | 79 | After setting it up, configure the snap clients on all cluster members to use the proxy. 80 | See [Configuring snap devices](https://docs.ubuntu.com/snap-store-proxy/en/devices) for instructions. 81 | 82 | You can then configure the Snap Store Proxy to override the revisions for the snaps that are needed for MicroCloud: 83 | 84 | sudo snap-proxy override lxd = 85 | sudo snap-proxy override microceph = 86 | sudo snap-proxy override microovn = 87 | sudo snap-proxy override microcloud = 88 | -------------------------------------------------------------------------------- /doc/.sphinx/_integration/header.css: -------------------------------------------------------------------------------- 1 | .p-navigation { 2 | border-bottom: 1px solid var(--color-sidebar-background-border); 3 | } 4 | 5 | .p-navigation__nav { 6 | background: #333333; 7 | display: flex; 8 | } 9 | 10 | .p-logo { 11 | display: flex !important; 12 | padding-top: 0 !important; 13 | text-decoration: none; 14 | } 15 | 16 | .p-logo-image { 17 | height: 44px; 18 | padding-right: 10px; 19 | } 20 | 21 | .p-logo-text { 22 | margin-top: 18px; 23 | color: white; 24 | text-decoration: none; 25 | } 26 | 27 | ul.p-navigation__links { 28 | display: flex; 29 | list-style: none; 30 | margin-left: 0; 31 | margin-top: auto; 32 | margin-bottom: auto; 33 | width: 100%; 34 | } 35 | 36 | ul.p-navigation__links li { 37 | margin: 0 auto; 38 | text-align: center; 39 | width: 100%; 40 | padding-left: 20px; 41 | } 42 | 43 | ul.p-navigation__links li.active { 44 | background-color: #E95420; 45 | } 46 | 47 | ul.p-navigation__links li a { 48 | background-color: rgba(0, 0, 0, 0); 49 | border: none; 50 | border-radius: 0; 51 | color: var(--color-sidebar-link-text); 52 | display: block; 53 | font-weight: 400; 54 | line-height: 1.5rem; 55 | margin: 0; 56 | overflow: hidden; 57 | padding: 1rem 0; 58 | position: relative; 59 | text-align: left; 60 | text-overflow: ellipsis; 61 | transition-duration: .1s; 62 | transition-property: background-color, color, opacity; 63 | transition-timing-function: cubic-bezier(0.215, 0.61, 0.355, 1); 64 | white-space: nowrap; 65 | width: 100%; 66 | } 67 | 68 | ul.p-navigation__links .p-navigation__link { 69 | color: #ffffff; 70 | font-weight: 300; 71 | text-align: center; 72 | text-decoration: none; 73 | } 74 | 75 | 76 | ul.p-navigation__links li a:hover { 77 | text-decoration: none; 78 | } 79 | ul.p-navigation__links li a:visited:hover { 80 | text-decoration: none; 81 | } 82 | 83 | ul.p-navigation__links .p-navigation__link:hover { 84 | background-color: #2b2b2b; 85 | } 86 | 87 | ul.p-navigation__links .p-dropdown__link:hover { 88 | background-color: var(--color-sidebar-item-background--hover); 89 | } 90 | 91 | ul.p-navigation__links .p-navigation__sub-link { 92 | background: var(--color-background-primary); 93 | padding: .5rem 0 .5rem .5rem; 94 | font-weight: 300; 95 | } 96 | 97 | ul.p-navigation__links .more-links-dropdown li a { 98 | border-left: 1px solid var(--color-sidebar-background-border); 99 | border-right: 1px solid var(--color-sidebar-background-border); 100 | } 101 | 102 | ul.p-navigation__links .more-links-dropdown li:first-child a { 103 | border-top: 1px solid var(--color-sidebar-background-border); 104 | } 105 | 106 | ul.p-navigation__links .more-links-dropdown li:last-child a { 107 | border-bottom: 1px solid var(--color-sidebar-background-border); 108 | } 109 | 110 | ul.p-navigation__links .p-navigation__logo { 111 | padding: 0.5rem; 112 | } 113 | 114 | ul.p-navigation__links .p-navigation__logo img { 115 | width: 40px; 116 | } 117 | 118 | ul.more-links-dropdown { 119 | display: none; 120 | overflow-x: visible; 121 | height: 0; 122 | z-index: 55; 123 | padding: 0; 124 | position: relative; 125 | list-style: none; 126 | margin-bottom: 0; 127 | margin-top: 0; 128 | } 129 | 130 | .nav-more-links::after { 131 | background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='16' height='16'%3E%3Cpath fill='%23111' d='M8.187 11.748l6.187-6.187-1.06-1.061-5.127 5.127L3.061 4.5 2 5.561z'/%3E%3C/svg%3E"); 132 | background-position: center; 133 | background-repeat: no-repeat; 134 | background-size: contain; 135 | content: ""; 136 | display: block; 137 | filter: invert(100%); 138 | height: 1rem; 139 | pointer-events: none; 140 | position: absolute; 141 | right: 1rem; 142 | text-indent: calc(100% + 10rem); 143 | top: calc(1rem + 0.25rem); 144 | width: 1rem; 145 | } 146 | 147 | .nav-ubuntu-com { 148 | display: none; 149 | } 150 | 151 | @media only screen and (min-width: 480px) { 152 | ul.p-navigation__links li { 153 | width: 100%; 154 | } 155 | 156 | .nav-ubuntu-com { 157 | display: inherit; 158 | } 159 | } 160 | 161 | @media only screen and (max-width: 800px) { 162 | .nav-more-links { 163 | margin-left: auto !important; 164 | padding-right: 2rem !important; 165 | width: 8rem !important; 166 | } 167 | } 168 | 169 | @media only screen and (min-width: 800px) { 170 | ul.p-navigation__links li { 171 | width: 100% !important; 172 | } 173 | } 174 | 175 | @media only screen and (min-width: 1310px) { 176 | ul.p-navigation__links { 177 | margin-left: calc(50% - 41em); 178 | } 179 | } 180 | -------------------------------------------------------------------------------- /doc/reference/index.md: -------------------------------------------------------------------------------- 1 | (reference)= 2 | # Reference 3 | 4 | The reference material in this section provides technical descriptions of MicroCloud. 5 | 6 | (reference-requirements)= 7 | ## Requirements 8 | 9 | (hardware-requirements)= 10 | ### Hardware requirements 11 | 12 | MicroCloud supports up to 50 machines as members of the same cluster. 13 | 14 | - For testing and development, a single machine (physical or virtual) can be used to create a cluster. 15 | 16 | - For production environments, a minimum of 3 physical machines is required as cluster members. We do not recommend using virtual machines as cluster members in production. 17 | 18 | - For critical deployments, we recommend a minimum of 4 cluster members. While 3 members are sufficient for high availability, an extra member provides redundancy for running critical applications. 19 | 20 | 21 | ```{tip} 22 | If you want to add further members to a cluster after initialisation, use the {command}`microcloud add` command. 23 | ``` 24 | 25 | You can mix different processor architectures within the same MicroCloud cluster. 26 | 27 | Each cluster member must have at least 8 GiB of RAM (more depending on the connected disks). We recommend at least 32 GiB of RAM for production environments. 28 | 29 | ### Storage requirements 30 | 31 | All storage disks should be free of existing partitions or file systems. 32 | 33 | Also see Ceph's {ref}`ceph:hardware-recommendations`. 34 | 35 | #### Local storage 36 | Local storage is typically sufficient for testing and development, as it is fast and convenient. To use local storage, each cluster member requires a local disk. 37 | 38 | 39 | #### Distributed storage 40 | You can set up distributed storage on a test cluster with a single member, but it won’t have the recommended replication configuration which ensures high availability. 41 | 42 | #### High availability 43 | 44 | For high availability, and the ability to recover a cluster should something go wrong, use distributed storage with at least three additional disks for use by Ceph. These disks must be on at least three different cluster members. 45 | 46 | ### Production environments 47 | 48 | For production environments, we recommend at least 3 NVMe disks per cluster member: 49 | - 1 for OS 50 | - 1 for local storage 51 | - 1 for distributed storage 52 | 53 | ### Full disk encryption 54 | 55 | If you intend to use full disk encryption on any cluster member, the `dm-crypt` kernel module must be available, and the snap `dm-crypt` plug must be connected to MicroCeph. The `dm-crypt` module is available by default in Ubuntu 24.04 and higher. 56 | 57 | For further information, see the Prerequisites section of this page: {doc}`microceph:explanation/full-disk-encryption`. Note that the command shown on that page to connect the snap `dm-crypt` plug can only be performed once MicroCeph is installed. The MicroCloud installation steps include installing MicroCeph; thus, {ref}`install MicroCloud first`, then connect the plug. 58 | 59 | ### Networking requirements 60 | 61 | For networking, MicroCloud requires at least two dedicated network interfaces: one for intra-cluster communication and one for external connectivity. 62 | 63 | In production environments, we recommend dual-port network cards with a minimum 10 GiB capacity, or higher if low latency is essential. 64 | 65 | If you want to partially or fully disaggregate the Ceph networks and the OVN underlay network, you need more dedicated interfaces. For details, see: {ref}`howto-ceph-networking`. 66 | 67 | To allow for external connectivity, MicroCloud requires an uplink network that supports broadcast and multicast. See {ref}`explanation-networking` for more information. 68 | 69 | The intra-cluster interface must have IPs assigned, whereas the external connectivity interface must not have any IPs assigned. 70 | 71 | The IP addresses of the cluster members must not change after installation, so DHCP is not supported. 72 | 73 | ### Software requirements 74 | 75 | MicroCloud requires snapd version 2.59 or newer. 76 | 77 | We recommend an LTS version of Ubuntu 22.04 or newer. Production deployments subscribed to Ubuntu Pro are required to use an LTS version. 78 | 79 | If you intend to use ZFS storage, use a non-HWE (Hardware Enabled) variant of Ubuntu 22.04. 80 | 81 | Also see LXD's {ref}`lxd:requirements` and Ceph's {doc}`ceph:start/os-recommendations`. 82 | 83 | (snaps)= 84 | ## Snaps 85 | 86 | To run MicroCloud, you must install the following snaps: 87 | 88 | - [MicroCloud snap](https://snapcraft.io/microcloud) 89 | - [LXD snap](https://snapcraft.io/lxd) 90 | - [MicroCeph snap](https://snapcraft.io/microceph) 91 | - [MicroOVN snap](https://snapcraft.io/microovn) 92 | 93 | See {ref}`howto-install` for installation instructions. 94 | -------------------------------------------------------------------------------- /.github/workflows/security.yml: -------------------------------------------------------------------------------- 1 | name: Vulnerability Scanning with Trivy 2 | on: 3 | workflow_dispatch: 4 | schedule: 5 | - cron: '0 0 * * *' # Test Trivy daily at midnight 6 | 7 | permissions: 8 | contents: read 9 | security-events: write # for uploading SARIF results to the security tab 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name }} 13 | cancel-in-progress: true 14 | 15 | defaults: 16 | run: 17 | shell: bash 18 | 19 | jobs: 20 | trivy-repo: 21 | name: Trivy - Repository 22 | runs-on: ubuntu-22.04 23 | if: ${{ github.ref_name == 'main' }} 24 | steps: 25 | - name: Checkout 26 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 27 | with: 28 | ref: main 29 | 30 | - name: Install Trivy 31 | uses: canonical/lxd/.github/actions/install-trivy@main 32 | 33 | - name: Download Trivy DB 34 | id: db_download 35 | run: trivy fs --download-db-only --cache-dir /home/runner/vuln-cache 36 | continue-on-error: true 37 | 38 | - name: Cache Trivy vulnerability database 39 | if: ${{ steps.db_download.outcome == 'success' }} 40 | uses: actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 41 | with: 42 | path: /home/runner/vuln-cache 43 | key: trivy-cache-${{ github.run_id }} 44 | 45 | - name: Use previously downloaded database instead 46 | if: ${{ steps.db_download.outcome == 'failure' }} 47 | uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 48 | with: 49 | path: /home/runner/vuln-cache 50 | key: download-failed # Use a non existing key to fallback to restore-keys 51 | restore-keys: trivy-cache- 52 | 53 | - name: Run Trivy vulnerability scanner 54 | run: | 55 | trivy fs --skip-db-update \ 56 | --scanners vuln,secret,misconfig \ 57 | --format sarif \ 58 | --cache-dir /home/runner/vuln-cache \ 59 | --severity LOW,MEDIUM,HIGH,CRITICAL \ 60 | --output trivy-microcloud-repo-scan-results.sarif . 61 | 62 | - name: Upload Trivy scan results to GitHub Security tab 63 | uses: github/codeql-action/upload-sarif@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8 64 | with: 65 | sarif_file: "trivy-microcloud-repo-scan-results.sarif" 66 | sha: ${{ github.sha }} 67 | ref: refs/heads/main 68 | 69 | trivy-snap: 70 | name: Trivy - Snap 71 | runs-on: ubuntu-22.04 72 | if: ${{ github.ref_name == 'main' }} 73 | needs: trivy-repo 74 | strategy: 75 | matrix: 76 | include: 77 | - channel: "3/edge" 78 | branch: "main" 79 | - channel: "2/stable" 80 | branch: "v2-edge" 81 | - channel: "1/stable" 82 | branch: "v1-edge" 83 | steps: 84 | - name: Checkout 85 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 86 | with: 87 | ref: ${{ matrix.branch }} 88 | 89 | - name: Install Trivy 90 | uses: canonical/lxd/.github/actions/install-trivy@main 91 | 92 | - name: Restore cached Trivy vulnerability database 93 | uses: actions/cache/restore@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 94 | with: 95 | path: /home/runner/vuln-cache 96 | key: download-failed # Use a non existing key to fallback to restore-keys 97 | restore-keys: trivy-cache- 98 | 99 | - name: Download snap for scan 100 | run: | 101 | snap download microcloud --channel=${{ matrix.channel }} 102 | unsquashfs ./microcloud*.snap 103 | 104 | - name: Run Trivy vulnerability scanner 105 | run: | 106 | trivy rootfs --skip-db-update \ 107 | --scanners vuln,secret,misconfig \ 108 | --format sarif \ 109 | --cache-dir /home/runner/vuln-cache \ 110 | --severity LOW,MEDIUM,HIGH,CRITICAL \ 111 | --output snap-scan-results.sarif squashfs-root 112 | 113 | - name: Flag snap scanning alerts 114 | run: | 115 | jq '.runs[].tool.driver.rules[] |= (.shortDescription.text |= "Snap scan - " + .)' snap-scan-results.sarif > tmp.json 116 | mv tmp.json snap-scan-results.sarif 117 | 118 | - name: Upload Trivy scan results to GitHub Security tab 119 | uses: github/codeql-action/upload-sarif@dd746615b3b9d728a6a37ca2045b68ca76d4841a # v3.28.8 120 | with: 121 | sarif_file: "snap-scan-results.sarif" 122 | sha: ${{ github.sha }} 123 | ref: refs/heads/${{ matrix.branch }} 124 | -------------------------------------------------------------------------------- /doc/.sphinx/_static/furo_colors.css: -------------------------------------------------------------------------------- 1 | body { 2 | --color-code-background: #f8f8f8; 3 | --color-code-foreground: black; 4 | --code-font-size: 1rem; 5 | --font-stack: Ubuntu variable, Ubuntu, -apple-system, Segoe UI, Roboto, Oxygen, Cantarell, Fira Sans, Droid Sans, Helvetica Neue, sans-serif; 6 | --font-stack--monospace: Ubuntu Mono variable, Ubuntu Mono, Consolas, Monaco, Courier, monospace; 7 | --color-foreground-primary: #111; 8 | --color-foreground-secondary: var(--color-foreground-primary); 9 | --color-foreground-muted: #333; 10 | --color-background-secondary: #FFF; 11 | --color-background-hover: #f2f2f2; 12 | --color-brand-primary: #111; 13 | --color-brand-content: #06C; 14 | --color-api-background: #E3E3E3; 15 | --color-inline-code-background: rgba(0,0,0,.03); 16 | --color-sidebar-link-text: #111; 17 | --color-sidebar-item-background--current: #ebebeb; 18 | --color-sidebar-item-background--hover: #f2f2f2; 19 | --toc-font-size: var(--font-size--small); 20 | --color-admonition-title-background--note: var(--color-background-primary); 21 | --color-admonition-title-background--tip: var(--color-background-primary); 22 | --color-admonition-title-background--important: var(--color-background-primary); 23 | --color-admonition-title-background--caution: var(--color-background-primary); 24 | --color-admonition-title--note: #24598F; 25 | --color-admonition-title--tip: #24598F; 26 | --color-admonition-title--important: #C7162B; 27 | --color-admonition-title--caution: #F99B11; 28 | --color-highlighted-background: #EBEBEB; 29 | --color-link-underline: var(--color-link); 30 | --color-link-underline--hover: var(--color-link); 31 | --color-link-underline--visited: var(--color-link--visited); 32 | --color-link-underline--visited--hover: var(--color-link--visited); 33 | --color-version-popup: #772953; 34 | } 35 | 36 | @media not print { 37 | body[data-theme="dark"] { 38 | --color-code-background: #202020; 39 | --color-code-foreground: #d0d0d0; 40 | --color-foreground-secondary: var(--color-foreground-primary); 41 | --color-foreground-muted: #CDCDCD; 42 | --color-background-secondary: var(--color-background-primary); 43 | --color-background-hover: #666; 44 | --color-brand-primary: #fff; 45 | --color-brand-content: #69C; 46 | --color-sidebar-link-text: #f7f7f7; 47 | --color-sidebar-item-background--current: #666; 48 | --color-sidebar-item-background--hover: #333; 49 | --color-admonition-background: transparent; 50 | --color-admonition-title-background--note: var(--color-background-primary); 51 | --color-admonition-title-background--tip: var(--color-background-primary); 52 | --color-admonition-title-background--important: var(--color-background-primary); 53 | --color-admonition-title-background--caution: var(--color-background-primary); 54 | --color-admonition-title--note: #24598F; 55 | --color-admonition-title--tip: #24598F; 56 | --color-admonition-title--important: #C7162B; 57 | --color-admonition-title--caution: #F99B11; 58 | --color-highlighted-background: #666; 59 | --color-version-popup: #F29879; 60 | } 61 | @media (prefers-color-scheme: dark) { 62 | body:not([data-theme="light"]) { 63 | --color-api-background: #A4A4A4; 64 | --color-code-background: #202020; 65 | --color-code-foreground: #d0d0d0; 66 | --color-foreground-secondary: var(--color-foreground-primary); 67 | --color-foreground-muted: #CDCDCD; 68 | --color-background-secondary: var(--color-background-primary); 69 | --color-background-hover: #666; 70 | --color-brand-primary: #fff; 71 | --color-brand-content: #69C; 72 | --color-sidebar-link-text: #f7f7f7; 73 | --color-sidebar-item-background--current: #666; 74 | --color-sidebar-item-background--hover: #333; 75 | --color-admonition-background: transparent; 76 | --color-admonition-title-background--note: var(--color-background-primary); 77 | --color-admonition-title-background--tip: var(--color-background-primary); 78 | --color-admonition-title-background--important: var(--color-background-primary); 79 | --color-admonition-title-background--caution: var(--color-background-primary); 80 | --color-admonition-title--note: #24598F; 81 | --color-admonition-title--tip: #24598F; 82 | --color-admonition-title--important: #C7162B; 83 | --color-admonition-title--caution: #F99B11; 84 | --color-highlighted-background: #666; 85 | --color-link: #F9FCFF; 86 | --color-version-popup: #F29879; 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /doc/how-to/ovn_underlay.md: -------------------------------------------------------------------------------- 1 | (howto-ovn-underlay)= 2 | # How to configure an OVN underlay network 3 | 4 | When running {command}`microcloud init`, if you decided to setup distributed networking and if you have enough at least one unused network interface per cluster member with an IP address, you are asked if you want to configure an underlay network for OVN. 5 | Here is the question you will be asked: 6 | 7 | `Configure dedicated underlay networking? (yes/no) [default=no]: ` 8 | 9 | You can choose to skip this question (just hit `Enter`). This will then use the internal MicroCloud network as an OVN 'underlay', which is the same as the OVN management network ('overlay' network). 10 | 11 | You could also choose to configure a dedicated underlay network for OVN with typing `yes`. A list of available network interfaces with an IP address will be displayed. 12 | You can then select one network interface per cluster member to be used as the interfaces for the underlay network of OVN. 13 | 14 | The following instructions build on the {ref}`get-started` tutorial and show how you can test setting up a MicroCloud with an OVN underlay network. 15 | 16 | 1. Create the dedicated network for the OVN underlay: 17 | 18 | 1. First, create a dedicated network for the OVN cluster members to be used as an underlay. Let's call it `ovnbr0`: 19 | 20 | lxc network create ovnbr0 21 | 22 | 1. Enter the following commands to find out the assigned IPv4 and IPv6 addresses for the networks and note them down: 23 | 24 | lxc network get ovnbr0 ipv4.address 25 | lxc network get ovnbr0 ipv6.address 26 | 27 | 1. Create the network interfaces that will be used for the OVN underlay setup for each VM: 28 | 29 | 1. Add the network device for the `ovnbr0` network: 30 | 31 | lxc config device add micro1 eth2 nic network=ovnbr0 name=eth2 32 | lxc config device add micro2 eth2 nic network=ovnbr0 name=eth2 33 | lxc config device add micro3 eth2 nic network=ovnbr0 name=eth2 34 | lxc config device add micro4 eth2 nic network=ovnbr0 name=eth2 35 | 36 | 1. Now, just like in the tutorial, start the VMs. 37 | 1. On each VM, bring the network interfaces up and give them an IP address within their network subnet: 38 | 39 | 1. For the `ovnbr0` network, do the following for each VM:: 40 | 41 | # If the `ovnbr0` gateway address is `10.0.1.1/24` (subnet should be `10.0.1.0/24`) 42 | ip link set enp7s0 up 43 | # `X` should be a number between 2 and 254, different for each VM 44 | ip addr add 10.0.1.X/24 dev enp7s0 45 | 46 | 1. Now, you can start the MicroCloud initialisation process and provide the subnets you noted down when asked for the OVN underlay. 47 | 1. We will use `ovnbr0` for the OVN underlay traffic. In a production setup, you'd choose the fast subnet for this traffic: 48 | 49 | Configure dedicated underlay networking? (yes/no) [default=no]: yes 50 | Select exactly one network interface from each cluster member: 51 | Space to select; enter to confirm; type to filter results. 52 | Up/down to move; right to select all; left to select none. 53 | +----------+--------+----------+-------------------------------------------+ 54 | | LOCATION | IFACE | TYPE | IP ADDRESS (CIDR) | 55 | +----------+--------+----------+-------------------------------------------+ 56 | [x] | micro1 | enp7s0 | physical | 10.0.1.2/24 | 57 | [ ] | micro1 | enp7s0 | physical | fd42:5782:5902:5b9e:216:3eff:fe01:67af/64 | 58 | [x] | micro3 | enp7s0 | physical | 10.0.1.4/24 | 59 | [ ] | micro3 | enp7s0 | physical | fd42:5782:5902:5b9e:216:3eff:fe36:d29c/64 | 60 | > [x] | micro2 | enp7s0 | physical | 10.0.1.3/24 | 61 | [ ] | micro2 | enp7s0 | physical | fd42:5782:5902:5b9e:216:3eff:fedb:f04e/64 | 62 | +----------+--------+----------+-------------------------------------------+ 63 | 64 | 1. The MicroCloud initialisation process will now continue as usual and the OVN cluster will be configured with the underlay network you provided. 65 | 1. You can now inspect the OVN underlay setup: 66 | 67 | 1. Inspect the OVN southbound encapsulation parameters: 68 | 69 | ```{terminal} 70 | :input: microovn.ovn-sbctl --columns=ip,type find Encap type=geneve 71 | :user: root 72 | :host: micro1 73 | :scroll: 74 | 75 | ip : "10.77.55.2" 76 | type : geneve 77 | 78 | ip : "10.77.55.4" 79 | type : geneve 80 | 81 | ip : "10.77.55.3" 82 | type : geneve 83 | ``` -------------------------------------------------------------------------------- /doc/.sphinx/_templates/footer.html: -------------------------------------------------------------------------------- 1 | {# ru-fu: copied from Furo, with modifications as stated below. Modifications are marked 'mod:'. #} 2 | 3 | 40 |
41 |
42 | {%- if show_copyright %} 43 | 54 | {%- endif %} 55 | 56 | {# mod: removed "Made with" #} 57 | 58 | {%- if last_updated -%} 59 |
60 | {% trans last_updated=last_updated|e -%} 61 | Last updated on {{ last_updated }} 62 | {%- endtrans -%} 63 |
64 | {%- endif %} 65 | 66 | {%- if show_source and has_source and sourcename %} 67 |
68 | Show source 70 |
71 | {%- endif %} 72 |
73 |
74 | {% if github_url and github_folder and pagename and page_source_suffix and display_contributors %} 75 | {% set contributors = get_contribs(github_url, github_folder, pagename, page_source_suffix, display_contributors_since) %} 76 | {% if contributors %} 77 | {% if contributors | length > 1 %} 78 | Thanks to the {{ contributors |length }} contributors! 79 | {% else %} 80 | Thanks to our contributor! 81 | {% endif %} 82 |
83 | 90 | {% endif %} 91 | {% endif %} 92 |
93 |
94 | 95 | {# mod: replaced RTD icons with our links #} 96 | 97 | {% if discourse %} 98 | 101 | {% endif %} 102 | 103 | {% if mattermost %} 104 | 107 | {% endif %} 108 | 109 | {% if matrix %} 110 | 113 | {% endif %} 114 | 115 | {% if github_url and github_version and github_folder %} 116 | 117 | {% if github_issues %} 118 | 121 | {% endif %} 122 | 123 | 126 | {% endif %} 127 | 128 | 129 |
130 |
131 | 132 | -------------------------------------------------------------------------------- /cmd/tui/console.go: -------------------------------------------------------------------------------- 1 | package tui 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "context" 7 | "fmt" 8 | "io" 9 | "os" 10 | "strconv" 11 | "strings" 12 | "time" 13 | 14 | "github.com/charmbracelet/x/ansi" 15 | ) 16 | 17 | // testConsole is used to simulate a terminal for the purposes of interacting with the MicroCloud CLI for testing. 18 | type testConsole struct { 19 | queue *bufio.Reader // queue of commands to send to "stdin". 20 | inRead *os.File // read by the input handler like stdin. 21 | inWrite *os.File // writes to "stdin" from the test console. 22 | 23 | out *os.File // output of the program. 24 | } 25 | 26 | // newTestConsole creates a new testConsole with the given input and output files. 27 | func newTestConsole(r io.Reader, output *os.File) (*testConsole, error) { 28 | inRead, inWrite, err := os.Pipe() 29 | if err != nil { 30 | return nil, err 31 | } 32 | 33 | sc := bufio.NewScanner(r) 34 | b := bytes.Buffer{} 35 | for sc.Scan() { 36 | txt := sc.Text() 37 | txt, _, _ = strings.Cut(txt, " # ") 38 | txt = strings.TrimSpace(txt) 39 | 40 | b.WriteString(txt) 41 | b.WriteString("\n") 42 | } 43 | 44 | return &testConsole{ 45 | inRead: inRead, 46 | inWrite: inWrite, 47 | out: output, 48 | queue: bufio.NewReader(bytes.NewReader(b.Bytes())), 49 | }, nil 50 | } 51 | 52 | // PrepareTestAsker removes comments from the lines read from the given reader, and assigns them to the test console reader. 53 | // The test console will read lines from the input file and write them one by one to one side of a pipe. 54 | // The other side of the pipe is read by the asker to interpret as input to the current question. 55 | // The output can be tracked with the given output file. 56 | func PrepareTestAsker(ctx context.Context, r io.Reader, output *os.File) (*InputHandler, error) { 57 | console, err := newTestConsole(r, output) 58 | if err != nil { 59 | return nil, err 60 | } 61 | 62 | handler := NewInputHandler(console.inRead, console.out) 63 | handler.testMode = true 64 | ctx, cancel := context.WithCancel(ctx) 65 | go func() { 66 | for { 67 | select { 68 | case <-ctx.Done(): 69 | console.inRead.Close() 70 | console.inWrite.Close() 71 | console.out.Close() 72 | return 73 | default: 74 | err := console.parseInput(handler) 75 | if err != nil { 76 | handler.table.SendUpdate(ErrorMsg(err)) 77 | cancel() 78 | continue 79 | } 80 | 81 | time.Sleep(300 * time.Millisecond) 82 | } 83 | } 84 | }() 85 | 86 | return handler, nil 87 | } 88 | 89 | // parseInput reads a line from the test input and sends the appropriate interaction to the MicroCloud CLI. 90 | func (c *testConsole) parseInput(handler *InputHandler) error { 91 | // If there is no active asker, we don't need to read any lines yet. 92 | if !handler.isActive() { 93 | return nil 94 | } 95 | 96 | line, _, err := c.queue.ReadLine() 97 | if err != nil { 98 | return fmt.Errorf("Failed to read input: %w", err) 99 | } 100 | 101 | input := string(line) 102 | 103 | // Log the input string. 104 | _, err = c.out.WriteString(fmt.Sprintf(" ANSWER: {%s}\n", input)) 105 | if err != nil { 106 | return err 107 | } 108 | 109 | var action string 110 | switch input { 111 | case "table:down": 112 | action = ansi.CUD1 113 | case "table:up": 114 | action = ansi.CUU1 115 | case "table:select-none": 116 | action = ansi.CUB1 117 | case "table:select-all": 118 | action = ansi.CUF1 119 | case "table:select": 120 | action = " " 121 | case "table:done": 122 | action = "\r" 123 | default: 124 | _, wait, _ := strings.Cut(input, "table:wait ") 125 | _, expect, _ := strings.Cut(input, "table:expect ") 126 | _, filter, _ := strings.Cut(input, "table:filter ") 127 | if wait != "" { 128 | sleepTime, err := time.ParseDuration(wait) 129 | if err != nil { 130 | return err 131 | } 132 | 133 | time.Sleep(sleepTime) 134 | } else if expect != "" { 135 | count, err := strconv.Atoi(expect) 136 | if err != nil { 137 | return err 138 | } 139 | 140 | // If expecting 0, error out as the table will be invalid with 0 rows. 141 | if count == 0 { 142 | return fmt.Errorf("Cannot expect 0 rows") 143 | } 144 | 145 | // Sanity check that the table didn't start with more rows than we are going to expect. 146 | if len(handler.getAllRows()) > count { 147 | return fmt.Errorf("Table has more rows (%d) than expected (%d)", len(handler.getAllRows()), count) 148 | } 149 | 150 | // Wait until we receive the expected row count. 151 | for len(handler.getAllRows()) < count { 152 | time.Sleep(300 * time.Millisecond) 153 | } 154 | } else if filter != "" { 155 | action = filter 156 | } else { 157 | action = input + "\r" 158 | } 159 | } 160 | 161 | if action != "" { 162 | _, err := c.inWrite.WriteString(action) 163 | if err != nil { 164 | return err 165 | } 166 | 167 | // The input handler erases the previous line when the rendering changes so add a newline. 168 | _, err = c.out.WriteString("\n") 169 | if err != nil { 170 | return err 171 | } 172 | } 173 | 174 | return nil 175 | } 176 | -------------------------------------------------------------------------------- /cmd/tui/handler.go: -------------------------------------------------------------------------------- 1 | package tui 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strings" 7 | "sync" 8 | 9 | "github.com/canonical/lxd/shared" 10 | tea "github.com/charmbracelet/bubbletea" 11 | ) 12 | 13 | // ContextError is the charmbracelet representation of a context cancellation error. 14 | var ContextError error = tea.ErrProgramKilled 15 | 16 | // InputHandler handles input dialogs. 17 | type InputHandler struct { 18 | input *os.File 19 | output *os.File 20 | 21 | // testMode is set to true if the handler is initialized in test mode with PrepareTestAsker. 22 | testMode bool 23 | 24 | table *selectableTable 25 | tableMu sync.Mutex 26 | 27 | activeMu sync.RWMutex 28 | active bool 29 | activeCh chan struct{} 30 | } 31 | 32 | // NewInputHandler creates a new input handler for managing dialogs. 33 | func NewInputHandler(input *os.File, output *os.File) *InputHandler { 34 | return &InputHandler{ 35 | input: input, 36 | output: output, 37 | table: &selectableTable{}, 38 | activeCh: make(chan struct{}), 39 | } 40 | } 41 | 42 | func (i *InputHandler) setActive(active bool) { 43 | i.activeMu.Lock() 44 | defer i.activeMu.Unlock() 45 | 46 | i.active = active 47 | } 48 | 49 | func (i *InputHandler) isActive() bool { 50 | i.activeMu.RLock() 51 | defer i.activeMu.RUnlock() 52 | 53 | return i.active 54 | } 55 | 56 | // getAllRows lists all filtered and unflitered rows from the current table. 57 | func (i *InputHandler) getAllRows() [][]string { 58 | i.tableMu.Lock() 59 | defer i.tableMu.Unlock() 60 | 61 | allRows := make([][]string, len(i.table.rawRows)) 62 | for i, row := range i.table.rawRows { 63 | copy(allRows[i], row) 64 | } 65 | 66 | return allRows 67 | } 68 | 69 | // AskBoolWarn is the same as AskBool but it appends "! Warning:" to the front of the message. 70 | func (i *InputHandler) AskBoolWarn(question string, defaultAnswer bool) (bool, error) { 71 | question = fmt.Sprintf("%s %s: %s", WarningSymbol(), WarningColor("Warning", true), question) 72 | return i.AskBool(question, defaultAnswer) 73 | } 74 | 75 | // AskBool prints the given prompt and accepts either "yes" or "no" as answers. It returns the boolean equivalent. 76 | func (i *InputHandler) AskBool(question string, defaultAnswer bool) (bool, error) { 77 | i.setActive(true) 78 | defer i.setActive(false) 79 | defaultAnswerStr := "no" 80 | if defaultAnswer { 81 | defaultAnswerStr = "yes" 82 | } 83 | 84 | result, err := i.handleQuestion(question, defaultAnswerStr, []string{"yes", "no"}) 85 | if err != nil { 86 | return false, err 87 | } 88 | 89 | if shared.ValueInSlice(strings.ToLower(result.answer), []string{"yes", "y"}) { 90 | fmt.Println(result.View()) 91 | return true, nil 92 | } else if shared.ValueInSlice(strings.ToLower(result.answer), []string{"no", "n"}) { 93 | fmt.Println(result.View()) 94 | return false, nil 95 | } 96 | 97 | return false, fmt.Errorf("Response %q must be one of %v", result.answer, result.acceptedAnswers) 98 | } 99 | 100 | // AskStringWarn is the same as AskString but it appends "! Warning:" to the front of the message. 101 | func (i *InputHandler) AskStringWarn(question string, defaultAnswer string, validator func(string) error) (string, error) { 102 | question = fmt.Sprintf("%s %s: %s", WarningSymbol(), WarningColor("Warning", true), question) 103 | return i.AskString(question, defaultAnswer, validator) 104 | } 105 | 106 | // AskString prints the given prompt and accepts a string answer. If no answer is provided, it uses the default answer. 107 | func (i *InputHandler) AskString(question string, defaultAnswer string, validator func(string) error) (string, error) { 108 | i.setActive(true) 109 | defer i.setActive(false) 110 | result, err := i.handleQuestion(question, defaultAnswer, nil) 111 | if err != nil { 112 | return "", err 113 | } 114 | 115 | err = validator(result.answer) 116 | if err != nil { 117 | return "", err 118 | } 119 | 120 | fmt.Println(result.View()) 121 | 122 | return result.answer, nil 123 | } 124 | 125 | func (i *InputHandler) handleQuestion(question string, defaultAnswer string, acceptedAnswers []string) (*asker, error) { 126 | ask := &asker{ 127 | question: question, 128 | defaultAnswer: defaultAnswer, 129 | acceptedAnswers: acceptedAnswers, 130 | File: i.output, 131 | } 132 | 133 | // The standard renderer does not yet support custom cursor positions so we need to 134 | // manually remove the sequence from the end of the string to get proper cursor tracking. 135 | // see: https://github.com/charmbracelet/bubbletea/issues/918 136 | out, err := tea.NewProgram(ask, tea.WithOutput(ask), tea.WithInput(i.input)).Run() 137 | if err != nil { 138 | return nil, err 139 | } 140 | 141 | result, ok := out.(*asker) 142 | if !ok { 143 | return nil, fmt.Errorf("Unexpected question result") 144 | } 145 | 146 | if result.cancelled { 147 | return nil, fmt.Errorf("Input cancelled") 148 | } 149 | 150 | if strings.TrimSpace(result.answer) == "" { 151 | result.answer = result.defaultAnswer 152 | } else { 153 | result.answer = strings.TrimSpace(result.answer) 154 | } 155 | 156 | return result, nil 157 | } 158 | -------------------------------------------------------------------------------- /doc/how-to/preseed.yaml: -------------------------------------------------------------------------------- 1 | # `initiator` defines which system takes over the role of the initiator during the trust establishment using multicast discovery. 2 | # Make sure to also set `lookup_subnet`. 3 | # The field cannot be set together with `initiator_address`. 4 | # Required if `initiator_address` isn't specified. 5 | initiator: micro01 6 | 7 | # `initiator_address` defines which system takes over the role of the initiator during the trust establishment. 8 | # It also allows joining systems to learn about the address they have to connect to. 9 | # The field cannot be set together with `initiator`. 10 | # Required if `initiator` isn't specified. 11 | initiator_address: 10.0.0.1 12 | 13 | # `lookup_subnet` is required and limits the subnet when looking up systems using multicast discovery. 14 | # The first assigned address of this subnet is used for MicroCloud itself. 15 | lookup_subnet: 10.0.0.0/24 16 | 17 | # `lookup_timeout` is optional and configures how long the joining system will wait for a system to be discovered using multicast discovery. 18 | # The value has to be provided in seconds. 19 | # It defaults to 60 seconds. 20 | lookup_timeout: 300 21 | 22 | # `session_passphrase` is required and configures the passphrase used during the trust establishment session. 23 | session_passphrase: 83P27XWKbDczUyE7xaX3pgVfaEacfQ2qiQ0r6gPb 24 | 25 | # `session_timeout` is optional and configures how long the trust establishment session will last. 26 | # The value has to be provided in seconds. 27 | # It defaults to 60 minutes. 28 | session_timeout: 300 29 | 30 | # `systems` is required and lists the systems we expect to find by their host name. 31 | # `name` is required and represents the host name. 32 | # `address` sets the address used for MicroCloud and is required in case `initiator_address` is present. 33 | # `ovn_uplink_interface` is optional and represents the name of the interface reserved for use with OVN. 34 | # `ovn_underlay_ip` is optional and represents the Geneve Encap IP for each system. 35 | # `storage` is optional and represents explicit paths to disks for each system. 36 | systems: 37 | - name: micro01 38 | address: 10.0.0.1 39 | ovn_uplink_interface: eth1 40 | ovn_underlay_ip: 10.0.2.101 41 | - name: micro02 42 | address: 10.0.0.2 43 | ovn_uplink_interface: eth1 44 | ovn_underlay_ip: 10.0.2.102 45 | storage: 46 | local: 47 | path: /dev/nvme5n1 48 | wipe: true 49 | ceph: 50 | - path: /dev/nvme4n1 51 | wipe: true 52 | - path: nvme3n1 53 | wipe: true 54 | encrypt: true 55 | - name: micro03 56 | address: 10.0.0.3 57 | ovn_uplink_interface: eth1 58 | ovn_underlay_ip: 10.0.2.103 59 | - name: micro04 60 | address: 10.0.0.4 61 | ovn_uplink_interface: eth1 62 | 63 | # `ceph` is optional and represents the Ceph global configuration 64 | # `cephfs: true` can be used to optionally set up a CephFS file system alongside Ceph distributed storage. 65 | # `internal_network: subnet` optionally specifies the internal cluster network for the Ceph cluster. This network handles OSD heartbeats, object replication, and recovery traffic. 66 | # `public_network: subnet` optionally specifies the public network for the Ceph cluster. This network conveys information regarding the management of your Ceph nodes. It is by default set to the MicroCloud lookup subnet. 67 | ceph: 68 | cephfs: true 69 | internal_network: 10.0.1.0/24 70 | public_network: 10.0.0.0/24 71 | 72 | # `ovn` is optional and represents the OVN & uplink network configuration for LXD. 73 | ovn: 74 | ipv4_gateway: 192.0.2.1/24 75 | ipv4_range: 192.0.2.100-192.0.2.254 76 | ipv6_gateway: 2001:db8:d:200::1/64 77 | dns_servers: 192.0.2.1,2001:db8:d:200::1 78 | 79 | # `storage` is optional and is used as basic filtering logic for finding disks across all systems. 80 | # Filters will only apply to systems which do not have an explicitly defined disk above for the corresponding storage type. 81 | # Filters are checked in order of appearance. 82 | # The names and values of each key correspond to the YAML field names for the `api.ResouresStorageDisk` 83 | # struct here: 84 | # https://github.com/canonical/lxd/blob/c86603236167a43836c2766647e2fac97d79f899/shared/api/resource.go#L591 85 | # Supported operands: &&, ||, <, >, <=, >=, ==, !=, ! 86 | # String values must not be in quotes unless the string contains a space. 87 | # Single quotes are fine, but double quotes must be escaped. 88 | # `find_min` and `find_max` can be used to validate the number of disks each filter finds. 89 | storage: 90 | local: 91 | - find: size > 10GiB && size < 50GiB && type == nvme 92 | find_min: 1 93 | find_max: 1 94 | wipe: true 95 | - find: size > 10GiB && size < 50GiB && type == hdd && block_size == 512 && model == 'Samsung %' 96 | find_min: 3 97 | find_max: 3 98 | wipe: false 99 | ceph: 100 | - find: size > 10GiB && size < 50GiB && type == nvme 101 | find_min: 1 102 | find_max: 2 103 | wipe: true 104 | - find: size > 10GiB && size < 50GiB && type == hdd && partitioned == false && block_size == 512 && model == 'Samsung %' 105 | find_min: 3 106 | find_max: 8 107 | wipe: false 108 | -------------------------------------------------------------------------------- /doc/Makefile.sp: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | # `Makefile.sp` is from the Sphinx starter pack and should not be 4 | # modified. 5 | # Add your customisation to `Makefile` instead. 6 | 7 | # You can set these variables from the command line, and also 8 | # from the environment for the first two. 9 | SPHINXDIR = .sphinx 10 | SPHINXOPTS ?= -c . -d $(SPHINXDIR)/.doctrees -j auto 11 | SPHINXBUILD ?= sphinx-build 12 | SOURCEDIR = . 13 | BUILDDIR = _build 14 | VENVDIR = $(SPHINXDIR)/venv 15 | PA11Y = $(SPHINXDIR)/node_modules/pa11y/bin/pa11y.js --config $(SPHINXDIR)/pa11y.json 16 | VENV = $(VENVDIR)/bin/activate 17 | TARGET = * 18 | ALLFILES = *.rst **/*.rst 19 | ADDPREREQS ?= 20 | 21 | .PHONY: sp-full-help sp-woke-install sp-pa11y-install sp-install sp-run sp-html \ 22 | sp-epub sp-serve sp-clean sp-clean-doc sp-spelling sp-spellcheck sp-linkcheck sp-woke \ 23 | sp-pa11y Makefile.sp sp-vale 24 | 25 | sp-full-help: $(VENVDIR) 26 | @. $(VENV); $(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 27 | @echo "\n\033[1;31mNOTE: This help texts shows unsupported targets!\033[0m" 28 | @echo "Run 'make help' to see supported targets." 29 | 30 | # Shouldn't assume that venv is available on Ubuntu by default; discussion here: 31 | # https://bugs.launchpad.net/ubuntu/+source/python3.4/+bug/1290847 32 | $(SPHINXDIR)/requirements.txt: 33 | @python3 -c "import venv" || \ 34 | (echo "You must install python3-venv before you can build the documentation."; exit 1) 35 | python3 -m venv $(VENVDIR) 36 | @if [ ! -z "$(ADDPREREQS)" ]; then \ 37 | . $(VENV); pip install --require-virtualenv $(ADDPREREQS); \ 38 | fi 39 | . $(VENV); python3 $(SPHINXDIR)/build_requirements.py 40 | 41 | # If requirements are updated, venv should be rebuilt and timestamped. 42 | $(VENVDIR): $(SPHINXDIR)/requirements.txt 43 | @echo "... setting up virtualenv" 44 | python3 -m venv $(VENVDIR) 45 | . $(VENV); pip install --require-virtualenv \ 46 | --upgrade -r $(SPHINXDIR)/requirements.txt \ 47 | --log $(VENVDIR)/pip_install.log 48 | @test ! -f $(VENVDIR)/pip_list.txt || \ 49 | mv $(VENVDIR)/pip_list.txt $(VENVDIR)/pip_list.txt.bak 50 | @. $(VENV); pip list --local --format=freeze > $(VENVDIR)/pip_list.txt 51 | @touch $(VENVDIR) 52 | 53 | sp-woke-install: 54 | @type woke >/dev/null 2>&1 || \ 55 | { echo "Installing \"woke\" snap... \n"; sudo snap install woke; } 56 | 57 | sp-pa11y-install: 58 | @type $(PA11Y) >/dev/null 2>&1 || { \ 59 | echo "Installing \"pa11y\" from npm... \n"; \ 60 | mkdir -p $(SPHINXDIR)/node_modules/ ; \ 61 | npm install --prefix $(SPHINXDIR) pa11y; \ 62 | } 63 | 64 | sp-install: $(VENVDIR) 65 | 66 | sp-run: sp-install 67 | . $(VENV); sphinx-autobuild -b dirhtml "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) 68 | 69 | # Doesn't depend on $(BUILDDIR) to rebuild properly at every run. 70 | sp-html: sp-install 71 | . $(VENV); $(SPHINXBUILD) -W --keep-going -b dirhtml "$(SOURCEDIR)" "$(BUILDDIR)" -w $(SPHINXDIR)/warnings.txt $(SPHINXOPTS) 72 | 73 | sp-epub: sp-install 74 | . $(VENV); $(SPHINXBUILD) -b epub "$(SOURCEDIR)" "$(BUILDDIR)" -w $(SPHINXDIR)/warnings.txt $(SPHINXOPTS) 75 | 76 | sp-serve: sp-html 77 | cd "$(BUILDDIR)"; python3 -m http.server --bind 127.0.0.1 8000 78 | 79 | sp-clean: sp-clean-doc 80 | @test ! -e "$(VENVDIR)" -o -d "$(VENVDIR)" -a "$(abspath $(VENVDIR))" != "$(VENVDIR)" 81 | rm -rf $(VENVDIR) 82 | rm -f $(SPHINXDIR)/requirements.txt 83 | rm -rf $(SPHINXDIR)/node_modules/ 84 | rm -rf $(SPHINXDIR)/styles 85 | rm -rf $(SPHINXDIR)/vale.ini 86 | 87 | sp-clean-doc: 88 | git clean -fx "$(BUILDDIR)" 89 | rm -rf $(SPHINXDIR)/.doctrees 90 | 91 | sp-spellcheck: 92 | . $(VENV) ; python3 -m pyspelling -c $(SPHINXDIR)/spellingcheck.yaml -j $(shell nproc) 93 | 94 | sp-spelling: sp-html sp-spellcheck 95 | 96 | sp-linkcheck: sp-install 97 | . $(VENV) ; $(SPHINXBUILD) -b linkcheck "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) || { grep --color -F "[broken]" "$(BUILDDIR)/output.txt"; exit 1; } 98 | exit 0 99 | 100 | sp-woke: sp-woke-install 101 | woke $(ALLFILES) --exit-1-on-failure \ 102 | -c https://github.com/canonical/Inclusive-naming/raw/main/config.yml 103 | 104 | sp-pa11y: sp-pa11y-install sp-html 105 | find $(BUILDDIR) -name *.html -print0 | xargs -n 1 -0 $(PA11Y) 106 | 107 | sp-vale: sp-install 108 | @. $(VENV); test -d $(SPHINXDIR)/venv/lib/python*/site-packages/vale || pip install vale 109 | @. $(VENV); test -f $(SPHINXDIR)/vale.ini || python3 $(SPHINXDIR)/get_vale_conf.py 110 | @. $(VENV); find $(SPHINXDIR)/venv/lib/python*/site-packages/vale/vale_bin -size 195c -exec vale --config "$(SPHINXDIR)/vale.ini" $(TARGET) > /dev/null \; 111 | @echo "" 112 | @echo "Running Vale against $(TARGET). To change target set TARGET= with make command" 113 | @echo "" 114 | @. $(VENV); vale --config "$(SPHINXDIR)/vale.ini" --glob='*.{md,txt,rst}' $(TARGET) 115 | 116 | 117 | 118 | # Catch-all target: route all unknown targets to Sphinx using the new 119 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 120 | %: Makefile.sp 121 | . $(VENV); $(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 122 | -------------------------------------------------------------------------------- /client/client.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "crypto/x509" 7 | "encoding/json" 8 | "fmt" 9 | "time" 10 | 11 | "github.com/canonical/lxd/shared/api" 12 | "github.com/canonical/microcluster/v2/client" 13 | "github.com/canonical/microcluster/v2/rest/response" 14 | "github.com/gorilla/websocket" 15 | 16 | "github.com/canonical/microcloud/microcloud/api/types" 17 | ) 18 | 19 | // GetStatus fetches a set of status information for the whole cluster. 20 | func GetStatus(ctx context.Context, c *client.Client) ([]types.Status, error) { 21 | queryCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) 22 | defer cancel() 23 | 24 | var statuses []types.Status 25 | err := c.Query(queryCtx, "GET", types.APIVersion, api.NewURL().Path("status"), nil, &statuses) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | return statuses, nil 31 | } 32 | 33 | // StartSession starts a new session and returns the underlying websocket connection. 34 | func StartSession(ctx context.Context, c *client.Client, role string, sessionTimeout time.Duration) (*websocket.Conn, error) { 35 | queryCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) 36 | defer cancel() 37 | 38 | url := api.NewURL().Path("session", role).WithQuery("timeout", sessionTimeout.String()) 39 | conn, err := c.Websocket(queryCtx, types.APIVersion, url) 40 | if err != nil { 41 | return nil, fmt.Errorf("Failed to start session websocket: %w", err) 42 | } 43 | 44 | return conn, nil 45 | } 46 | 47 | // StopSession is called from the initiator to stop a joiner session. 48 | func StopSession(ctx context.Context, c *client.Client, stopMsg string) error { 49 | queryCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) 50 | defer cancel() 51 | 52 | data := types.SessionStopPut{Reason: stopMsg} 53 | err := c.Query(queryCtx, "PUT", types.APIVersion, api.NewURL().Path("session", "stop"), data, nil) 54 | if err != nil { 55 | return fmt.Errorf("Failed to stop joiner session: %w", err) 56 | } 57 | 58 | return nil 59 | } 60 | 61 | // JoinServices sends join information to initiate the cluster join process. 62 | func JoinServices(ctx context.Context, c *client.Client, data types.ServicesPut) error { 63 | queryCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) 64 | defer cancel() 65 | 66 | err := c.Query(queryCtx, "PUT", types.APIVersion, api.NewURL().Path("services"), data, nil) 67 | if err != nil { 68 | return fmt.Errorf("Failed to update cluster status of services: %w", err) 69 | } 70 | 71 | return nil 72 | } 73 | 74 | // JoinIntent sends the join intent to a potential cluster. 75 | func JoinIntent(ctx context.Context, c *client.Client, data types.SessionJoinPost) (*x509.Certificate, error) { 76 | queryCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) 77 | defer cancel() 78 | 79 | // The join intent request is using HMAC authorization. 80 | // Therefore we have to marshal the data ourselves as the JSON encoder used 81 | // by the query functions is appending a newline at the end. 82 | // See https://pkg.go.dev/encoding/json#Encoder.Encode. 83 | // This newline will cause the HMAC verification to fail on the server side 84 | // as the server will recreate the HMAC based on the request body. 85 | // The JSON marshaller doesn't add a newline. 86 | dataBytes, err := json.Marshal(data) 87 | if err != nil { 88 | return nil, fmt.Errorf("Failed to marshal join intent: %w", err) 89 | } 90 | 91 | path := api.NewURL().Path("session", "join") 92 | 93 | // We can pass a reader to indicate to the query functions the body is already marshalled. 94 | resp, err := c.QueryRaw(queryCtx, "POST", types.APIVersion, path, bytes.NewBuffer(dataBytes)) 95 | if err != nil { 96 | return nil, fmt.Errorf("Failed to send join intent: %w", err) 97 | } 98 | 99 | // Parse the response to check for errors. 100 | _, err = response.ParseResponse(resp) 101 | if err != nil { 102 | return nil, err 103 | } 104 | 105 | if len(resp.TLS.PeerCertificates) == 0 { 106 | return nil, fmt.Errorf("Peer's certificate is missing") 107 | } 108 | 109 | return resp.TLS.PeerCertificates[0], nil 110 | } 111 | 112 | // RemoteIssueToken issues a token on the remote MicroCloud. 113 | func RemoteIssueToken(ctx context.Context, c *client.Client, serviceType types.ServiceType, data types.ServiceTokensPost) (string, error) { 114 | queryCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) 115 | defer cancel() 116 | 117 | var token string 118 | err := c.Query(queryCtx, "POST", types.APIVersion, api.NewURL().Path("services", string(serviceType), "tokens"), data, &token) 119 | if err != nil { 120 | return "", fmt.Errorf("Failed to issue remote token: %w", err) 121 | } 122 | 123 | return token, nil 124 | } 125 | 126 | // DeleteClusterMember removes the cluster member from any service that it is part of. 127 | func DeleteClusterMember(ctx context.Context, c *client.Client, memberName string, force bool) error { 128 | queryCtx, cancel := context.WithTimeout(ctx, time.Minute) 129 | defer cancel() 130 | 131 | path := api.NewURL().Path("services", "cluster", memberName) 132 | if force { 133 | path = path.WithQuery("force", "1") 134 | } 135 | 136 | return c.Query(queryCtx, "DELETE", types.APIVersion, path, nil, nil) 137 | } 138 | --------------------------------------------------------------------------------