├── .dockerignore ├── .github └── workflows │ └── build.yml ├── .gitignore ├── .golangci.json ├── Dockerfile.dapper ├── LICENSE ├── Makefile ├── README.md ├── cmd └── rancherd │ ├── bootstrap │ └── bootstrap.go │ ├── gettoken │ └── gettoken.go │ ├── gettpmhash │ └── gettpmhash.go │ ├── info │ └── info.go │ ├── main.go │ ├── probe │ └── probe.go │ ├── resetadmin │ └── resetadmin.go │ ├── retry │ └── retry.go │ ├── updateclientsecret │ └── update.go │ └── upgrade │ └── upgrade.go ├── config-example.yaml ├── go.mod ├── go.sum ├── install.sh ├── pkg ├── auth │ └── auth.go ├── cacerts │ └── cacerts.go ├── config │ ├── remote.go │ ├── runtime.go │ └── types.go ├── discovery │ └── discovery.go ├── images │ └── images.go ├── join │ └── join.go ├── kubectl │ └── kubectl.go ├── os │ └── os.go ├── plan │ ├── bootstrap.go │ ├── run.go │ ├── token.go │ └── upgrade.go ├── probe │ ├── probe.go │ └── run.go ├── rancher │ ├── cluster.go │ ├── run.go │ └── wait.go ├── rancherd │ ├── rancher.go │ └── versions.go ├── registry │ └── registry.go ├── resources │ └── resources.go ├── retry │ └── apply.go ├── roles │ └── role.go ├── runtime │ ├── instruction.go │ ├── role.go │ └── wait.go ├── self │ └── self.go ├── token │ └── token.go ├── tpm │ ├── get.go │ ├── tpm.go │ └── tpm_attestor.go ├── version │ └── version.go └── versions │ └── versions.go ├── rancherd.service └── scripts ├── boilerplate.go.txt ├── build ├── build-sha-file ├── ci ├── entry ├── package ├── release ├── test ├── validate └── version /.dockerignore: -------------------------------------------------------------------------------- 1 | ./.dapper 2 | ./.cache 3 | ./dist 4 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | on: 3 | push: 4 | branches: 5 | - harvester-dev 6 | tags: 7 | - v* 8 | pull_request: 9 | jobs: 10 | build: 11 | name: Build images 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout code 15 | uses: actions/checkout@v4 16 | 17 | # Build binaries and SHA files 18 | - name: Run make ci 19 | run: make ci 20 | 21 | - name: Release 22 | if: startsWith(github.ref, 'refs/tags/') 23 | env: 24 | GH_TOKEN: ${{ github.token }} 25 | run: | 26 | gh release upload ${{ github.ref_name }} dist/artifacts/rancherd-amd64 27 | gh release upload ${{ github.ref_name }} dist/artifacts/rancherd-arm64 28 | gh release upload ${{ github.ref_name }} dist/artifacts/sha256sum-amd64.txt 29 | gh release upload ${{ github.ref_name }} dist/artifacts/sha256sum-arm64.txt 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.dapper 2 | /.cache 3 | /bin 4 | /dist 5 | *.swp 6 | .idea 7 | -------------------------------------------------------------------------------- /.golangci.json: -------------------------------------------------------------------------------- 1 | { 2 | "linters": { 3 | "disable-all": true, 4 | "enable": [ 5 | "govet", 6 | "revive", 7 | "goimports", 8 | "misspell", 9 | "ineffassign", 10 | "gofmt" 11 | ] 12 | }, 13 | "run": { 14 | "skip-files": [ 15 | "/zz_generated_" 16 | ], 17 | "deadline": "5m" 18 | }, 19 | "issues": { 20 | "exclude-rules": [ 21 | { 22 | "linters": "revive", 23 | "text": "should have comment or be unexported" 24 | } 25 | ] 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /Dockerfile.dapper: -------------------------------------------------------------------------------- 1 | FROM registry.suse.com/bci/golang:1.23 2 | 3 | RUN zypper in -y bash git gcc docker vim less file curl wget ca-certificates trousers-devel 4 | 5 | RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.63.4 6 | 7 | ENV DAPPER_ENV REPO TAG DRONE_TAG 8 | ENV DAPPER_SOURCE /go/src/github.com/rancher/rancherd/ 9 | ENV DAPPER_OUTPUT ./bin ./dist 10 | ENV DAPPER_DOCKER_SOCKET true 11 | ENV DAPPER_RUN_ARGS "-v rancherd-go:/root/go -v rancherd-cache:/root/.cache" 12 | WORKDIR ${DAPPER_SOURCE} 13 | 14 | ENTRYPOINT ["./scripts/entry"] 15 | CMD ["ci"] 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | TARGETS := $(shell ls scripts) 2 | 3 | .dapper: 4 | @echo Downloading dapper 5 | @curl -sL https://releases.rancher.com/dapper/latest/dapper-$$(uname -s)-$$(uname -m) > .dapper.tmp 6 | @@chmod +x .dapper.tmp 7 | @./.dapper.tmp -v 8 | @mv .dapper.tmp .dapper 9 | 10 | $(TARGETS): .dapper 11 | ./.dapper $@ 12 | 13 | .DEFAULT_GOAL := ci 14 | 15 | .PHONY: $(TARGETS) 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Rancherd 2 | 3 | > **WARNING** 4 | > 5 | > - The project intends to be used in [Harvester project](https://harvesterhci.io/) at this moment. The Harvester team does its best effort to support general uses. 6 | > - For ["RancherD"](https://rancher.com/docs/rancher/v2.5/en/installation/other-installation-methods/install-rancher-on-linux/rancherd-configuration/) (the tool came with Rancher 2.5) issues, please report them in the [Rancher project](https://github.com/rancher/rancher). 7 | 8 | 9 | Rancherd bootstraps a node with Kubernetes (k3s/rke2) and Rancher such 10 | that all future management of Kubernetes and Rancher can be done from 11 | Kubernetes. Rancherd will only run once per node. Once the system has 12 | been fully bootstrapped it will not run again. It is intended that the 13 | primary use of Rancherd is to be ran from cloud-init or a similar system. 14 | 15 | ## Quick Start 16 | 17 | To create a three node cluster run the following on servers named `server1`, 18 | `server2`, and `server3`. 19 | 20 | On `server1` 21 | ```bash 22 | mkdir -p /etc/rancher/rancherd 23 | cat > /etc/rancher/rancherd/config.yaml << EOF 24 | role: cluster-init 25 | token: somethingrandom 26 | EOF 27 | curl -fL https://raw.githubusercontent.com/rancher/rancherd/master/install.sh | sh - 28 | ``` 29 | 30 | On `server2` 31 | ```bash 32 | mkdir -p /etc/rancher/rancherd 33 | cat > /etc/rancher/rancherd/config.yaml << EOF 34 | role: server 35 | server: https://server1:8443 36 | token: somethingrandom 37 | EOF 38 | curl -fL https://raw.githubusercontent.com/rancher/rancherd/master/install.sh | sh - 39 | ``` 40 | 41 | On `server3` 42 | ```bash 43 | mkdir -p /etc/rancher/rancherd 44 | cat > /etc/rancher/rancherd/config.yaml << EOF 45 | role: server 46 | server: https://server1:8443 47 | token: somethingrandom 48 | EOF 49 | curl -fL https://raw.githubusercontent.com/rancher/rancherd/master/install.sh | sh - 50 | ``` 51 | 52 | ## Installation 53 | 54 | ### cloud-init 55 | 56 | The primary way of running Rancherd is intended to be done from cloud-init. 57 | Add to your cloud-init the following for a single node cluster. All 58 | configuration that would be found in the rancherd config.yaml should 59 | be embedded in the `rancherd` key in the cloud-config. 60 | 61 | ```yaml 62 | #cloud-config 63 | rancherd: 64 | role: cluster-init 65 | runcmd: 66 | - curl -fL https://raw.githubusercontent.com/rancher/rancherd/master/install.sh | sh - 67 | ``` 68 | 69 | ### Manual 70 | 71 | `rancherd` binary can be downloaded from https://github.com/rancher/rancherd/releases/latest 72 | and manually ran. 73 | 74 | ### Curl script (systemd installation) 75 | 76 | The below command will download `rancherd` binary and setup a systemd unit and run it. 77 | 78 | ```bash 79 | curl -sfL https://raw.githubusercontent.com/rancher/rancherd/master/install.sh | sh - 80 | ``` 81 | 82 | ## Cluster Initialization 83 | 84 | Creating a cluster always starts with one node initializing the cluster, by 85 | assigning the `cluster-init` role and then other nodes joining to the cluster. 86 | The new cluster will have a token generated for it or you can manually 87 | assign a unique string. The token for an existing cluster can be determined 88 | by running `rancherd get-token`. 89 | 90 | ## Joining Nodes 91 | 92 | Nodes can be joined to the cluster as the role `server` to add more control 93 | plane nodes or as the role `agent` to add more worker nodes. To join a node 94 | you must have the Rancher server URL (which is by default running on port 95 | `8443`) and the token. 96 | 97 | ## Node Roles 98 | 99 | 100 | Rancherd will bootstrap a node with one of the following roles 101 | 102 | 1. __cluster-init__: Every cluster must start with one node that has the 103 | cluster-init role. 104 | 2. __server__: Joins the cluster as a new control-plane,etcd,worker node 105 | 3. __agent__: Joins the cluster as a worker only node. 106 | 107 | ## Server discovery 108 | 109 | It can be quite cumbersome to automate bringing up a clustered system 110 | that requires one bootstrap node. Also there are more considerations 111 | around load balancing and replacing nodes in a proper production setup. 112 | Rancherd support server discovery based on https://github.com/hashicorp/go-discover. 113 | 114 | When using server discovery the `cluster-init` role is not used, only `server` 115 | and `agent`. The `server` URL is also dropped in place of using the `discovery` 116 | key. The `discovery` configuration will be used to dynamically determine what 117 | is the server URL and if the current node should act as the `cluster-init` node. 118 | 119 | Example 120 | ```yaml 121 | role: server 122 | discovery: 123 | params: 124 | # Corresponds to go-discover provider name 125 | provider: "mdns" 126 | # All other key/values are parameters corresponding to what 127 | # the go-discover provider is expecting 128 | service: "rancher-server" 129 | # If this is a new cluster it will wait until 3 server are 130 | # available and they all agree on the same cluster-init node 131 | expectedServers: 3 132 | # How long servers are remembered for. It is useful for providers 133 | # that are not consistent in their responses, like mdns. 134 | serverCacheDuration: 1m 135 | ``` 136 | More information on how to use the discovery is in the config examples. 137 | 138 | ## Configuration 139 | 140 | Configuration for rancherd goes in `/etc/rancher/rancherd/config.yaml`. A full 141 | example configuration with documentation is available in 142 | [config-example.yaml](./config-example.yaml). 143 | 144 | Minimal configuration 145 | ```yaml 146 | # /etc/rancher/rancherd/config.yaml 147 | 148 | # role: Valid values cluster-init, server, agent 149 | role: cluster-init 150 | 151 | # token: A shared secret known by all clusters in the system 152 | token: somethingrandom 153 | 154 | # server: The server URL to join a cluster to. By default port 8443. 155 | # Only valid for roles server and agent, not cluster-init 156 | server: https://example.com:8443 157 | ``` 158 | 159 | ### Version Channels 160 | 161 | The `kubernetesVersion` and `rancherVersion` accept channel names instead of explict versions. 162 | 163 | Valid `kubernetesVersion` channels are as follows: 164 | 165 | | Channel Name | Description | 166 | |--------------|-------------| 167 | | stable | k3s stable (default value of kubernetesVersion) | 168 | | latest | k3s latest | 169 | | testing | k3s test | 170 | | stable:k3s | Same as stable channel | 171 | | latest:k3s | Same as latest channel | 172 | | testing:k3s | Same as testing channel | 173 | | stable:rke2 | rke2 stable | 174 | | latest:rke2 | rke2 latest | 175 | | testing:rke2 | rke2 testing | 176 | | v1.21 | Latest k3s v1.21 release. The applies to any Kubernetes minor version | 177 | | v1.21:rke2 | Latest rke2 v1.21 release. The applies to any Kubernetes minor version | 178 | 179 | Valid `rancherVersions` channels are as follows: 180 | 181 | | Channel Name | Description | 182 | |--------------|-------------| 183 | | stable | [stable helm repo](https://releases.rancher.com/server-charts/stable/index.yaml) (default value of rancherVersion) | 184 | | latest | [latest helm repo](https://releases.rancher.com/server-charts/latest/index.yaml) | 185 | 186 | ### Rancher Config 187 | 188 | By default Rancher is installed with the following values.yaml. You can override 189 | any of these settings with the `rancherValues` setting in the rancherd `config.yaml` 190 | ```yaml 191 | # Multi-Cluster Management is disabled by default, change to multi-cluster-management=true to enable 192 | features: multi-cluster-management=false 193 | 194 | # The Rancher UI will run on the host port 8443 by default. Set to 0 to disable 195 | # and instead use ingress.enabled=true to route traffic through ingress 196 | hostPort: 8443 197 | 198 | # Accessing ingress is disabled by default. 199 | ingress: 200 | enabled: false 201 | 202 | # Don't create a default admin password 203 | noDefaultAdmin: true 204 | 205 | # The negative value means it will up to that many replicas if there are 206 | # at least that many nodes available. For example, if you have 2 nodes and 207 | # `replicas` is `-3` then 2 replicas will run. Once you add a third node 208 | # a then 3 replicas will run 209 | replicas: -3 210 | 211 | # External TLS is assumed 212 | tls: external 213 | ``` 214 | 215 | A full reference of all parameters in the values.yaml is available in 216 | the [Rancher repo](https://github.com/rancher/rancher/blob/release/v2.6/chart/values.yaml). 217 | 218 | ## Dashboard/UI 219 | 220 | The Rancher UI is running by default on port `:8443`. There is no default 221 | `admin` user password set. You must run `rancherd reset-admin` once to 222 | get an `admin` password to login. 223 | 224 | ## Multi-Cluster Management 225 | 226 | By default Multi Cluster Managmement is disabled in Rancher. To enable set the 227 | following in the rancherd config.yaml 228 | ```yaml 229 | rancherValues: 230 | features: multi-cluster-management=true 231 | ``` 232 | 233 | ## Upgrading 234 | 235 | rancherd itself doesn't need to be upgraded. It is only ran once per node 236 | and then after that provides no value. What you do need to upgrade after 237 | the fact is Rancher and Kubernetes. 238 | 239 | ### Rancher 240 | Rancher is installed as a helm chart following the standard procedure. You can upgrade 241 | Rancher with the standard procedure documented at 242 | https://rancher.com/docs/rancher/v2.6/en/installation/install-rancher-on-k8s/upgrades/. 243 | 244 | ### Kubernetes 245 | To upgrade Kubernetes you will use Rancher to orchestrate the upgrade. This is a matter of changing 246 | the Kubernetes version on the `fleet-local/local` `Cluster` in the `provisioning.cattle.io/v1` 247 | apiVersion. For example 248 | 249 | ```shell 250 | kubectl edit clusters.provisioning.cattle.io -n fleet-local local 251 | ``` 252 | ```yaml 253 | apiVersion: provisioning.cattle.io/v1 254 | kind: Cluster 255 | metadata: 256 | name: local 257 | namespace: fleet-local 258 | spec: 259 | # Change to new valid k8s version 260 | kubernetesVersion: v1.21.4+k3s1 261 | ``` 262 | 263 | ### Automated 264 | 265 | You can also use the `rancherd upgrade` command on a `server` node to automatically do the 266 | above procedure. 267 | -------------------------------------------------------------------------------- /cmd/rancherd/bootstrap/bootstrap.go: -------------------------------------------------------------------------------- 1 | package bootstrap 2 | 3 | import ( 4 | "github.com/rancher/rancherd/pkg/rancherd" 5 | cli "github.com/rancher/wrangler-cli" 6 | "github.com/spf13/cobra" 7 | ) 8 | 9 | func NewBootstrap() *cobra.Command { 10 | return cli.Command(&Bootstrap{}, cobra.Command{ 11 | Short: "Run Rancher and Kubernetes bootstrap", 12 | }) 13 | } 14 | 15 | type Bootstrap struct { 16 | Force bool `usage:"Run bootstrap even if already bootstrapped" short:"f"` 17 | //DataDir string `usage:"Path to rancherd state" default:"/var/lib/rancher/rancherd"` 18 | //Config string `usage:"Custom config path" default:"/etc/rancher/rancherd/config.yaml" short:"c"` 19 | } 20 | 21 | func (b *Bootstrap) Run(cmd *cobra.Command, _ []string) error { 22 | r := rancherd.New(rancherd.Config{ 23 | Force: b.Force, 24 | DataDir: rancherd.DefaultDataDir, 25 | ConfigPath: rancherd.DefaultConfigFile, 26 | }) 27 | return r.Run(cmd.Context()) 28 | } 29 | -------------------------------------------------------------------------------- /cmd/rancherd/gettoken/gettoken.go: -------------------------------------------------------------------------------- 1 | package gettoken 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/rancher/rancherd/pkg/token" 7 | cli "github.com/rancher/wrangler-cli" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | func NewGetToken() *cobra.Command { 12 | return cli.Command(&GetToken{}, cobra.Command{ 13 | Short: "Print token to join nodes to the cluster", 14 | }) 15 | } 16 | 17 | type GetToken struct { 18 | Kubeconfig string `usage:"Kubeconfig file" env:"KUBECONFIG"` 19 | } 20 | 21 | func (p *GetToken) Run(cmd *cobra.Command, _ []string) error { 22 | str, err := token.GetToken(cmd.Context(), p.Kubeconfig) 23 | if err != nil { 24 | return err 25 | } 26 | fmt.Println(str) 27 | return nil 28 | } 29 | -------------------------------------------------------------------------------- /cmd/rancherd/gettpmhash/gettpmhash.go: -------------------------------------------------------------------------------- 1 | package gettpmhash 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/rancher/rancherd/pkg/tpm" 7 | cli "github.com/rancher/wrangler-cli" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | func NewGetTPMHash() *cobra.Command { 12 | return cli.Command(&GetTPMHash{}, cobra.Command{ 13 | Use: "get-tpm-hash", 14 | Short: "Print TPM hash to identify this machine", 15 | }) 16 | } 17 | 18 | type GetTPMHash struct { 19 | } 20 | 21 | func (p *GetTPMHash) Run(*cobra.Command, []string) error { 22 | str, err := tpm.GetPubHash() 23 | if err != nil { 24 | return err 25 | } 26 | fmt.Println(str) 27 | return nil 28 | } 29 | -------------------------------------------------------------------------------- /cmd/rancherd/info/info.go: -------------------------------------------------------------------------------- 1 | package info 2 | 3 | import ( 4 | "github.com/rancher/rancherd/pkg/rancherd" 5 | cli "github.com/rancher/wrangler-cli" 6 | "github.com/spf13/cobra" 7 | ) 8 | 9 | func NewInfo() *cobra.Command { 10 | return cli.Command(&Info{}, cobra.Command{ 11 | Short: "Print installation versions", 12 | }) 13 | } 14 | 15 | type Info struct { 16 | } 17 | 18 | func (b *Info) Run(cmd *cobra.Command, _ []string) error { 19 | r := rancherd.New(rancherd.Config{ 20 | DataDir: rancherd.DefaultDataDir, 21 | ConfigPath: rancherd.DefaultConfigFile, 22 | }) 23 | return r.Info(cmd.Context()) 24 | } 25 | -------------------------------------------------------------------------------- /cmd/rancherd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | cli "github.com/rancher/wrangler-cli" 5 | "github.com/spf13/cobra" 6 | 7 | "github.com/rancher/rancherd/cmd/rancherd/bootstrap" 8 | "github.com/rancher/rancherd/cmd/rancherd/gettoken" 9 | "github.com/rancher/rancherd/cmd/rancherd/gettpmhash" 10 | "github.com/rancher/rancherd/cmd/rancherd/info" 11 | "github.com/rancher/rancherd/cmd/rancherd/probe" 12 | "github.com/rancher/rancherd/cmd/rancherd/resetadmin" 13 | "github.com/rancher/rancherd/cmd/rancherd/retry" 14 | "github.com/rancher/rancherd/cmd/rancherd/updateclientsecret" 15 | "github.com/rancher/rancherd/cmd/rancherd/upgrade" 16 | ) 17 | 18 | type Rancherd struct { 19 | } 20 | 21 | func (a *Rancherd) Run(cmd *cobra.Command, _ []string) error { 22 | return cmd.Help() 23 | } 24 | 25 | func main() { 26 | root := cli.Command(&Rancherd{}, cobra.Command{ 27 | Long: "Bootstrap Rancher and k3s/rke2 on a node", 28 | }) 29 | root.AddCommand( 30 | bootstrap.NewBootstrap(), 31 | gettoken.NewGetToken(), 32 | resetadmin.NewResetAdmin(), 33 | probe.NewProbe(), 34 | retry.NewRetry(), 35 | upgrade.NewUpgrade(), 36 | info.NewInfo(), 37 | gettpmhash.NewGetTPMHash(), 38 | updateclientsecret.NewUpdateClientSecret(), 39 | ) 40 | cli.Main(root) 41 | } 42 | -------------------------------------------------------------------------------- /cmd/rancherd/probe/probe.go: -------------------------------------------------------------------------------- 1 | package probe 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/rancher/rancherd/pkg/probe" 8 | cli "github.com/rancher/wrangler-cli" 9 | "github.com/spf13/cobra" 10 | ) 11 | 12 | func NewProbe() *cobra.Command { 13 | return cli.Command(&Probe{}, cobra.Command{ 14 | Short: "Run plan probes", 15 | Hidden: true, 16 | }) 17 | } 18 | 19 | type Probe struct { 20 | Interval string `usage:"Polling interval to run probes" default:"2s" short:"i"` 21 | File string `usage:"Plan file" default:"/var/lib/rancher/rancherd/plan/plan.json" short:"f"` 22 | } 23 | 24 | func (p *Probe) Run(cmd *cobra.Command, _ []string) error { 25 | interval, err := time.ParseDuration(p.Interval) 26 | if err != nil { 27 | return fmt.Errorf("parsing duration %s: %w", p.Interval, err) 28 | } 29 | 30 | return probe.RunProbes(cmd.Context(), p.File, interval) 31 | } 32 | -------------------------------------------------------------------------------- /cmd/rancherd/resetadmin/resetadmin.go: -------------------------------------------------------------------------------- 1 | package resetadmin 2 | 3 | import ( 4 | "github.com/rancher/rancherd/pkg/auth" 5 | cli "github.com/rancher/wrangler-cli" 6 | "github.com/spf13/cobra" 7 | ) 8 | 9 | func NewResetAdmin() *cobra.Command { 10 | return cli.Command(&ResetAdmin{}, cobra.Command{ 11 | Short: "Bootstrap and reset admin password", 12 | }) 13 | } 14 | 15 | type ResetAdmin struct { 16 | Password string `usage:"Password for Rancher login" env:"PASSWORD"` 17 | PasswordFile string `usage:"Password for Rancher login, from file" env:"PASSWORD_FILE"` 18 | Kubeconfig string `usage:"Kubeconfig file" env:"KUBECONFIG"` 19 | } 20 | 21 | func (p *ResetAdmin) Run(cmd *cobra.Command, _ []string) error { 22 | return auth.ResetAdmin(cmd.Context(), &auth.Options{ 23 | Password: p.Password, 24 | PasswordFile: p.PasswordFile, 25 | Kubeconfig: p.Kubeconfig, 26 | }) 27 | } 28 | -------------------------------------------------------------------------------- /cmd/rancherd/retry/retry.go: -------------------------------------------------------------------------------- 1 | package retry 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/rancher/rancherd/pkg/retry" 7 | cli "github.com/rancher/wrangler-cli" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | func NewRetry() *cobra.Command { 12 | return cli.Command(&Retry{}, cobra.Command{ 13 | Short: "Retry command until it succeeds", 14 | DisableFlagParsing: true, 15 | Hidden: true, 16 | }) 17 | } 18 | 19 | type Retry struct { 20 | SleepFirst bool `usage:"Sleep 5 seconds before running command"` 21 | } 22 | 23 | func (p *Retry) Run(cmd *cobra.Command, args []string) error { 24 | if p.SleepFirst { 25 | time.Sleep(5 * time.Second) 26 | } 27 | return retry.Retry(cmd.Context(), 15*time.Second, args) 28 | } 29 | -------------------------------------------------------------------------------- /cmd/rancherd/updateclientsecret/update.go: -------------------------------------------------------------------------------- 1 | package updateclientsecret 2 | 3 | import ( 4 | cli "github.com/rancher/wrangler-cli" 5 | "github.com/spf13/cobra" 6 | 7 | "github.com/rancher/rancherd/pkg/rancher" 8 | ) 9 | 10 | func NewUpdateClientSecret() *cobra.Command { 11 | return cli.Command(&UpdateClientSecret{}, cobra.Command{ 12 | Short: "Update cluster client secret to have API Server URL and CA Certs configured", 13 | }) 14 | } 15 | 16 | type UpdateClientSecret struct { 17 | Kubeconfig string `usage:"Kubeconfig file" env:"KUBECONFIG"` 18 | } 19 | 20 | func (s *UpdateClientSecret) Run(cmd *cobra.Command, _ []string) error { 21 | return rancher.UpdateClientSecret(cmd.Context(), &rancher.Options{Kubeconfig: s.Kubeconfig}) 22 | } 23 | -------------------------------------------------------------------------------- /cmd/rancherd/upgrade/upgrade.go: -------------------------------------------------------------------------------- 1 | package upgrade 2 | 3 | import ( 4 | "github.com/rancher/rancherd/pkg/rancherd" 5 | cli "github.com/rancher/wrangler-cli" 6 | "github.com/spf13/cobra" 7 | ) 8 | 9 | func NewUpgrade() *cobra.Command { 10 | return cli.Command(&Upgrade{}, cobra.Command{ 11 | Short: "Upgrade Rancher and Kubernetes", 12 | }) 13 | } 14 | 15 | type Upgrade struct { 16 | RancherVersion string `usage:"Target Rancher version" short:"r" default:"stable"` 17 | RancherOSVersion string `usage:"Target RancherOS version" short:"o" default:"latest" name:"rancher-os-version"` 18 | KubernetesVersion string `usage:"Target Kubernetes version" short:"k" default:"stable"` 19 | Force bool `usage:"Run without prompting for confirmation" short:"f"` 20 | } 21 | 22 | func (b *Upgrade) Run(cmd *cobra.Command, _ []string) error { 23 | r := rancherd.New(rancherd.Config{ 24 | Force: b.Force, 25 | DataDir: rancherd.DefaultDataDir, 26 | ConfigPath: rancherd.DefaultConfigFile, 27 | }) 28 | return r.Upgrade(cmd.Context(), rancherd.UpgradeConfig{ 29 | RancherVersion: b.RancherVersion, 30 | KubernetesVersion: b.KubernetesVersion, 31 | RancherOSVersion: b.RancherOSVersion, 32 | }) 33 | } 34 | -------------------------------------------------------------------------------- /config-example.yaml: -------------------------------------------------------------------------------- 1 | ######################################################## 2 | # The below parameters apply to cluster-init role only # 3 | ######################################################## 4 | 5 | # The Kubernetes version to be installed. This must be a k3s or RKE2 version 6 | # v1.21 or newer. k3s and RKE2 versions always have a `k3s` or `rke2` in the 7 | # version string. 8 | kubernetesVersion: v1.22.2+k3s1 9 | 10 | # The Rancher version to be installed or a channel "latest" or "stable" 11 | rancherVersion: v2.6.0 12 | 13 | # Values set on the Rancher Helm chart. Refer to 14 | # https://github.com/rancher/rancher/blob/release/v2.6/chart/values.yaml 15 | # for possible values. 16 | rancherValues: 17 | # Below are the default values set 18 | 19 | # Multi-Cluster Management is disabled by default, change to multi-cluster-management=true to enable 20 | features: multi-cluster-management=false 21 | # The Rancher UI will run on the host port 8443 by default. Set to 0 to disable 22 | # and instead use ingress.enabled=true to route traffic through ingress 23 | hostPort: 8443 24 | # Accessing ingress is disabled by default. 25 | ingress: 26 | enabled: false 27 | # Don't create a default admin password 28 | noDefaultAdmin: true 29 | # The negative value means it will up to that many replicas if there are 30 | # at least that many nodes available. For example, if you have 2 nodes and 31 | # `replicas` is `-3` then 2 replicas will run. Once you add a third node 32 | # a then 3 replicas will run 33 | replicas: -3 34 | # External TLS is assumed 35 | tls: external 36 | 37 | 38 | # Addition SANs (hostnames) to be added to the generated TLS certificate that 39 | # served on port 6443. 40 | tlsSans: 41 | - additionalhostname.example.com 42 | 43 | # Generic commands to run before bootstrapping the node. 44 | preInstructions: 45 | - name: something 46 | # This image will be extracted to a temporary folder and 47 | # set as the current working dir. The command will not run 48 | # contained or chrooted, this is only a way to copy assets 49 | # to the host. This is parameter is optional 50 | image: custom/image:1.1.1 51 | # Environment variables to set 52 | env: 53 | - FOO=BAR 54 | # Program arguments 55 | args: 56 | - arg1 57 | - arg2 58 | # Command to run 59 | command: /bin/dosomething 60 | # Save output to /var/lib/rancher/rancherd/plan/plan-output.json 61 | saveOutput: false 62 | 63 | # Generic commands to run after bootstrapping the node. 64 | postInstructions: 65 | - name: something 66 | env: 67 | - FOO=BAR 68 | args: 69 | - arg1 70 | - arg2 71 | command: /bin/dosomething 72 | saveOutput: false 73 | 74 | # Kubernetes resources that will be created once Rancher is bootstrapped 75 | resources: 76 | - kind: ConfigMap 77 | apiVersion: v1 78 | metadata: 79 | name: random 80 | data: 81 | key: value 82 | 83 | # Contents of the registries.yaml that will be used by k3s/RKE2. The structure 84 | # is documented at https://rancher.com/docs/k3s/latest/en/installation/private-registry/ 85 | registries: {} 86 | 87 | # The default registry used for all Rancher container images. For more information 88 | # refer to https://rancher.com/docs/rancher/v2.6/en/admin-settings/config-private-registry/ 89 | systemDefaultRegistry: someprefix.example.com:5000 90 | 91 | # Advanced: The system agent installer image used for Kubernetes 92 | runtimeInstallerImage: ... 93 | 94 | # Advanced: The system agent installer image used for Rancher 95 | rancherInstallerImage: ... 96 | 97 | ########################################### 98 | # The below parameters apply to all roles # 99 | ########################################### 100 | 101 | # The URL to Rancher to join a node. If you have disabled the hostPort and configured 102 | # TLS then this will be the server you have setup. 103 | server: https://myserver.example.com:8443 104 | 105 | # A shared secret to join nodes to the cluster 106 | token: sometoken 107 | 108 | # Instead of setting the server parameter above the server value can be dynamically 109 | # determined from cloud provider metadata. This is powered by https://github.com/hashicorp/go-discover. 110 | # Discovery requires that the hostPort is not disabled. 111 | discovery: 112 | params: 113 | # Corresponds to go-discover provider name 114 | provider: "mdns" 115 | # All other key/values are parameters corresponding to what 116 | # the go-discover provider is expecting 117 | service: "rancher-server" 118 | # If this is a new cluster it will wait until 3 server are 119 | # available and they all agree on the same cluster-init node 120 | expectedServers: 3 121 | # How long servers are remembered for. It is useful for providers 122 | # that are not consistent in their responses, like mdns. 123 | serverCacheDuration: 1m 124 | 125 | # The role of this node. Every cluster must start with one node as role=cluster-init. 126 | # After that nodes can be joined using the server role for control-plane nodes and 127 | # agent role for worker only nodes. The server/agent terms correspond to the server/agent 128 | # terms in k3s and RKE2 129 | role: cluster-init,server,agent 130 | # The Kubernetes node name that will be set 131 | nodeName: custom-hostname 132 | # The IP address that will be set in Kubernetes for this node 133 | address: 123.123.123.123 134 | # The internal IP address that will be used for this node 135 | internalAddress: 123.123.123.124 136 | # Taints to apply to this node upon creation 137 | taints: 138 | - dedicated=special-user:NoSchedule 139 | # Labels to apply to this node upon creation 140 | labels: 141 | - key=value 142 | 143 | # Advanced: Arbitrary configuration that will be placed in /etc/rancher/k3s/config.yaml.d/40-rancherd.yaml 144 | # or /etc/rancher/rke2/config.yaml.d/40-rancherd.yaml 145 | extraConfig: {} 146 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/rancher/rancherd 2 | 3 | go 1.23.4 4 | 5 | replace ( 6 | k8s.io/api => k8s.io/api v0.24.10 7 | k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.24.10 8 | k8s.io/apimachinery => k8s.io/apimachinery v0.24.10 9 | k8s.io/apiserver => k8s.io/apiserver v0.24.10 10 | k8s.io/cli-runtime => k8s.io/cli-runtime v0.24.10 11 | k8s.io/client-go => k8s.io/client-go v0.24.10 12 | k8s.io/cloud-provider => k8s.io/cloud-provider v0.24.10 13 | k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.24.10 14 | k8s.io/code-generator => k8s.io/code-generator v0.24.10 15 | k8s.io/component-base => k8s.io/component-base v0.24.10 16 | k8s.io/component-helpers => k8s.io/component-helpers v0.24.10 17 | k8s.io/controller-manager => k8s.io/controller-manager v0.24.10 18 | k8s.io/cri-api => k8s.io/cri-api v0.24.10 19 | k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.24.10 20 | k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.24.10 21 | k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.24.10 22 | k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.10 23 | k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.10 24 | k8s.io/kubectl => k8s.io/kubectl v0.24.10 25 | k8s.io/kubelet => k8s.io/kubelet v0.24.10 26 | k8s.io/kubernetes => k8s.io/kubernetes v1.24.10 27 | k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.24.10 28 | k8s.io/metrics => k8s.io/metrics v0.24.10 29 | k8s.io/mount-utils => k8s.io/mount-utils v0.24.10 30 | k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.24.10 31 | k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.24.10 32 | sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.12.3 33 | ) 34 | 35 | require ( 36 | github.com/google/certificate-transparency-go v1.1.2 37 | github.com/google/go-attestation v0.3.2 38 | github.com/gorilla/websocket v1.4.2 39 | github.com/hashicorp/go-discover v0.0.0-20201029210230-738cb3105cd0 40 | github.com/pkg/errors v0.9.1 41 | github.com/rancher/rancher/pkg/apis v0.0.0-20210920193801-79027c456224 42 | github.com/rancher/system-agent v0.0.1-alpha30 43 | github.com/rancher/wharfie v0.3.2 44 | github.com/rancher/wrangler v0.8.6-0.20210819203859-0babd42fbad8 45 | github.com/rancher/wrangler-cli v0.0.0-20210217230406-95cfa275f52f 46 | github.com/sirupsen/logrus v1.8.1 47 | github.com/spf13/cobra v1.7.0 48 | golang.org/x/crypto v0.14.0 49 | golang.org/x/mod v0.8.0 50 | gopkg.in/yaml.v2 v2.4.0 51 | gopkg.in/yaml.v3 v3.0.1 52 | k8s.io/api v0.28.3 53 | k8s.io/apimachinery v0.28.3 54 | k8s.io/client-go v12.0.0+incompatible 55 | sigs.k8s.io/yaml v1.4.0 56 | ) 57 | 58 | require ( 59 | cloud.google.com/go/compute v1.23.1 // indirect 60 | cloud.google.com/go/compute/metadata v0.2.3 // indirect 61 | github.com/Azure/azure-sdk-for-go v55.7.0+incompatible // indirect 62 | github.com/Azure/go-autorest v14.2.0+incompatible // indirect 63 | github.com/Azure/go-autorest/autorest v0.11.19 // indirect 64 | github.com/Azure/go-autorest/autorest/adal v0.9.14 // indirect 65 | github.com/Azure/go-autorest/autorest/azure/auth v0.5.0 // indirect 66 | github.com/Azure/go-autorest/autorest/azure/cli v0.4.0 // indirect 67 | github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect 68 | github.com/Azure/go-autorest/autorest/to v0.4.1-0.20210111195520-9fc88b15294e // indirect 69 | github.com/Azure/go-autorest/autorest/validation v0.3.2-0.20210111195520-9fc88b15294e // indirect 70 | github.com/Azure/go-autorest/logger v0.2.1 // indirect 71 | github.com/Azure/go-autorest/tracing v0.6.0 // indirect 72 | github.com/aws/aws-sdk-go v1.38.65 // indirect 73 | github.com/beorn7/perks v1.0.1 // indirect 74 | github.com/blang/semver v3.5.1+incompatible // indirect 75 | github.com/blang/semver/v4 v4.0.0 // indirect 76 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 77 | github.com/containerd/stargz-snapshotter/estargz v0.4.1 // indirect 78 | github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect 79 | github.com/davecgh/go-spew v1.1.1 // indirect 80 | github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661 // indirect 81 | github.com/digitalocean/godo v1.7.5 // indirect 82 | github.com/dimchansky/utfbom v1.1.0 // indirect 83 | github.com/docker/cli v20.10.6+incompatible // indirect 84 | github.com/docker/distribution v2.8.1+incompatible // indirect 85 | github.com/docker/docker v20.10.12+incompatible // indirect 86 | github.com/docker/docker-credential-helpers v0.6.3 // indirect 87 | github.com/emicklei/go-restful/v3 v3.11.0 // indirect 88 | github.com/evanphx/json-patch v4.12.0+incompatible // indirect 89 | github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect 90 | github.com/fsnotify/fsnotify v1.7.0 // indirect 91 | github.com/ghodss/yaml v1.0.0 // indirect 92 | github.com/go-logr/logr v1.3.0 // indirect 93 | github.com/go-openapi/jsonpointer v0.20.0 // indirect 94 | github.com/go-openapi/jsonreference v0.20.2 // indirect 95 | github.com/go-openapi/swag v0.22.4 // indirect 96 | github.com/gogo/protobuf v1.3.2 // indirect 97 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 98 | github.com/golang/protobuf v1.5.3 // indirect 99 | github.com/google/gnostic v0.7.0 // indirect 100 | github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect 101 | github.com/google/go-cmp v0.6.0 // indirect 102 | github.com/google/go-containerregistry v0.5.0 // indirect 103 | github.com/google/go-querystring v1.0.0 // indirect 104 | github.com/google/go-tpm v0.3.2 // indirect 105 | github.com/google/go-tspi v0.2.1-0.20190423175329-115dea689aad // indirect 106 | github.com/google/gofuzz v1.2.0 // indirect 107 | github.com/google/s2a-go v0.1.4 // indirect 108 | github.com/google/uuid v1.4.0 // indirect 109 | github.com/googleapis/enterprise-certificate-proxy v0.2.4 // indirect 110 | github.com/googleapis/gax-go/v2 v2.12.0 // indirect 111 | github.com/gophercloud/gophercloud v0.7.0 // indirect 112 | github.com/hashicorp/errwrap v1.0.0 // indirect 113 | github.com/hashicorp/go-multierror v1.0.0 // indirect 114 | github.com/hashicorp/mdns v1.0.1 // indirect 115 | github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 // indirect 116 | github.com/imdario/mergo v0.3.16 // indirect 117 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 118 | github.com/jmespath/go-jmespath v0.4.0 // indirect 119 | github.com/josharian/intern v1.0.0 // indirect 120 | github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62 // indirect 121 | github.com/json-iterator/go v1.1.12 // indirect 122 | github.com/klauspost/compress v1.15.9 // indirect 123 | github.com/linode/linodego v0.7.1 // indirect 124 | github.com/mailru/easyjson v0.7.7 // indirect 125 | github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect 126 | github.com/miekg/dns v1.1.35 // indirect 127 | github.com/mitchellh/go-homedir v1.1.0 // indirect 128 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 129 | github.com/modern-go/reflect2 v1.0.2 // indirect 130 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 131 | github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 // indirect 132 | github.com/opencontainers/go-digest v1.0.0 // indirect 133 | github.com/opencontainers/image-spec v1.0.2 // indirect 134 | github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c // indirect 135 | github.com/pierrec/lz4 v2.6.0+incompatible // indirect 136 | github.com/prometheus/client_golang v1.17.0 // indirect 137 | github.com/prometheus/client_model v0.5.0 // indirect 138 | github.com/prometheus/common v0.45.0 // indirect 139 | github.com/prometheus/procfs v0.12.0 // indirect 140 | github.com/rancher/lasso v0.0.0-20210616224652-fc3ebd901c08 // indirect 141 | github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 // indirect 142 | github.com/russross/blackfriday/v2 v2.1.0 // indirect 143 | github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d // indirect 144 | github.com/spf13/pflag v1.0.5 // indirect 145 | github.com/tencentcloud/tencentcloud-sdk-go v3.0.83+incompatible // indirect 146 | github.com/urfave/cli v1.22.4 // indirect 147 | github.com/vmware/govmomi v0.26.0 // indirect 148 | go.opencensus.io v0.24.0 // indirect 149 | golang.org/x/net v0.17.0 // indirect 150 | golang.org/x/oauth2 v0.13.0 // indirect 151 | golang.org/x/sync v0.3.0 // indirect 152 | golang.org/x/sys v0.13.0 // indirect 153 | golang.org/x/term v0.13.0 // indirect 154 | golang.org/x/text v0.13.0 // indirect 155 | golang.org/x/time v0.3.0 // indirect 156 | gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect 157 | google.golang.org/api v0.128.0 // indirect 158 | google.golang.org/appengine v1.6.8 // indirect 159 | google.golang.org/genproto/googleapis/api v0.0.0-20231012201019-e917dd12ba7a // indirect 160 | google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect 161 | google.golang.org/grpc v1.59.0 // indirect 162 | google.golang.org/protobuf v1.31.0 // indirect 163 | gopkg.in/inf.v0 v0.9.1 // indirect 164 | gopkg.in/resty.v1 v1.12.0 // indirect 165 | k8s.io/apiextensions-apiserver v0.28.3 // indirect 166 | k8s.io/apiserver v0.28.3 // indirect 167 | k8s.io/component-base v0.28.3 // indirect 168 | k8s.io/klog v1.0.0 // indirect 169 | k8s.io/klog/v2 v2.100.1 // indirect 170 | k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect 171 | k8s.io/kubelet v0.0.0 // indirect 172 | k8s.io/kubernetes v1.21.0 // indirect 173 | k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect 174 | sigs.k8s.io/cluster-api v0.3.11-0.20210430180359-45b6080c2764 // indirect 175 | sigs.k8s.io/controller-runtime v0.16.3 // indirect 176 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 177 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect 178 | ) 179 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | set -o noglob 4 | 5 | # Usage: 6 | # curl ... | ENV_VAR=... sh - 7 | # or 8 | # ENV_VAR=... ./install.sh 9 | # 10 | # Environment variables: 11 | # - RANCHERD_* 12 | # Environment variables which begin with RANCHERD_ will be preserved for the 13 | # systemd service to use. Setting RANCHERD_URL without explicitly setting 14 | # a systemd exec command will default the command to "agent", and we 15 | # enforce that RANCHERD_TOKEN or RANCHERD_CLUSTER_SECRET is also set. 16 | # 17 | # - INSTALL_RANCHERD_SKIP_DOWNLOAD 18 | # If set to true will not download rancherd hash or binary. 19 | # 20 | # - INSTALL_RANCHERD_FORCE_RESTART 21 | # If set to true will always restart the rancherd service 22 | # 23 | # - INSTALL_RANCHERD_SKIP_ENABLE 24 | # If set to true will not enable or start rancherd service. 25 | # 26 | # - INSTALL_RANCHERD_SKIP_START 27 | # If set to true will not start rancherd service. 28 | # 29 | # - INSTALL_RANCHERD_VERSION 30 | # Version of rancherd to download from github. Will attempt to download from the 31 | # stable channel if not specified. 32 | # 33 | # - INSTALL_RANCHERD_BIN_DIR 34 | # Directory to install rancherd binary, links, and uninstall script to, or use 35 | # /usr/local/bin as the default 36 | # 37 | # - INSTALL_RANCHERD_BIN_DIR_READ_ONLY 38 | # If set to true will not write files to INSTALL_RANCHERD_BIN_DIR, forces 39 | # setting INSTALL_RANCHERD_SKIP_DOWNLOAD=true 40 | # 41 | # - INSTALL_RANCHERD_SYSTEMD_DIR 42 | # Directory to install systemd service and environment files to, or use 43 | # /etc/systemd/system as the default 44 | # 45 | GITHUB_URL=https://github.com/rancher/rancherd/releases 46 | DOWNLOADER= 47 | 48 | # --- helper functions for logs --- 49 | info() 50 | { 51 | echo '[INFO] ' "$@" 52 | } 53 | warn() 54 | { 55 | echo '[WARN] ' "$@" >&2 56 | } 57 | fatal() 58 | { 59 | echo '[ERROR] ' "$@" >&2 60 | exit 1 61 | } 62 | 63 | # --- fatal if no systemd --- 64 | verify_system() { 65 | if [ ! -d /run/systemd ]; then 66 | fatal 'Can not find systemd to use as a process supervisor for rancherd' 67 | fi 68 | } 69 | 70 | # --- add quotes to command arguments --- 71 | quote() { 72 | for arg in "$@"; do 73 | printf '%s\n' "$arg" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/'/" 74 | done 75 | } 76 | 77 | # --- add indentation and trailing slash to quoted args --- 78 | quote_indent() { 79 | printf ' \\\n' 80 | for arg in "$@"; do 81 | printf '\t%s \\\n' "$(quote "$arg")" 82 | done 83 | } 84 | 85 | # --- escape most punctuation characters, except quotes, forward slash, and space --- 86 | escape() { 87 | printf '%s' "$@" | sed -e 's/\([][!#$%&()*;<=>?\_`{|}]\)/\\\1/g;' 88 | } 89 | 90 | # --- escape double quotes --- 91 | escape_dq() { 92 | printf '%s' "$@" | sed -e 's/"/\\"/g' 93 | } 94 | 95 | # --- define needed environment variables --- 96 | setup_env() { 97 | SYSTEM_NAME=rancherd 98 | 99 | # --- check for invalid characters in system name --- 100 | valid_chars=$(printf '%s' "${SYSTEM_NAME}" | sed -e 's/[][!#$%&()*;<=>?\_`{|}/[:space:]]/^/g;' ) 101 | if [ "${SYSTEM_NAME}" != "${valid_chars}" ]; then 102 | invalid_chars=$(printf '%s' "${valid_chars}" | sed -e 's/[^^]/ /g') 103 | fatal "Invalid characters for system name: 104 | ${SYSTEM_NAME} 105 | ${invalid_chars}" 106 | fi 107 | 108 | # --- use sudo if we are not already root --- 109 | SUDO=sudo 110 | if [ $(id -u) -eq 0 ]; then 111 | SUDO= 112 | fi 113 | 114 | # --- use binary install directory if defined or create default --- 115 | if [ -n "${INSTALL_RANCHERD_BIN_DIR}" ]; then 116 | BIN_DIR=${INSTALL_RANCHERD_BIN_DIR} 117 | else 118 | # --- use /usr/local/bin if root can write to it, otherwise use /opt/bin if it exists 119 | BIN_DIR=/usr/local/bin 120 | if ! $SUDO sh -c "touch ${BIN_DIR}/rancherd-ro-test && rm -rf ${BIN_DIR}/rancherd-ro-test"; then 121 | if [ -d /opt/bin ]; then 122 | BIN_DIR=/opt/bin 123 | fi 124 | fi 125 | fi 126 | 127 | # --- use systemd directory if defined or create default --- 128 | if [ -n "${INSTALL_RANCHERD_SYSTEMD_DIR}" ]; then 129 | SYSTEMD_DIR="${INSTALL_RANCHERD_SYSTEMD_DIR}" 130 | else 131 | SYSTEMD_DIR=/etc/systemd/system 132 | fi 133 | 134 | # --- set related files from system name --- 135 | SERVICE_RANCHERD=${SYSTEM_NAME}.service 136 | 137 | # --- use service or environment location depending on systemd --- 138 | FILE_RANCHERD_SERVICE=${SYSTEMD_DIR}/${SERVICE_RANCHERD} 139 | FILE_RANCHERD_ENV=${SYSTEMD_DIR}/${SERVICE_RANCHERD}.env 140 | 141 | # --- get hash of config & exec for currently installed rancherd --- 142 | PRE_INSTALL_HASHES=$(get_installed_hashes) 143 | 144 | # --- if bin directory is read only skip download --- 145 | if [ "${INSTALL_RANCHERD_BIN_DIR_READ_ONLY}" = true ]; then 146 | INSTALL_RANCHERD_SKIP_DOWNLOAD=true 147 | fi 148 | } 149 | 150 | # --- check if skip download environment variable set --- 151 | can_skip_download() { 152 | if [ "${INSTALL_RANCHERD_SKIP_DOWNLOAD}" != true ]; then 153 | return 1 154 | fi 155 | } 156 | 157 | # --- verify an executable rancherd binary is installed --- 158 | verify_rancherd_is_executable() { 159 | if [ ! -x ${BIN_DIR}/rancherd ]; then 160 | fatal "Executable rancherd binary not found at ${BIN_DIR}/rancherd" 161 | fi 162 | } 163 | 164 | # --- set arch and suffix, fatal if architecture not supported --- 165 | setup_verify_arch() { 166 | if [ -z "$ARCH" ]; then 167 | ARCH=$(uname -m) 168 | fi 169 | case $ARCH in 170 | amd64) 171 | ARCH=amd64 172 | SUFFIX=-${ARCH} 173 | ;; 174 | x86_64) 175 | ARCH=amd64 176 | SUFFIX=-amd64 177 | ;; 178 | #arm64) 179 | # ARCH=arm64 180 | # SUFFIX=-${ARCH} 181 | # ;; 182 | #aarch64) 183 | # ARCH=arm64 184 | # SUFFIX=-${ARCH} 185 | # ;; 186 | #arm*) 187 | # ARCH=arm 188 | # SUFFIX=-${ARCH}hf 189 | # ;; 190 | *) 191 | fatal "Unsupported architecture $ARCH" 192 | esac 193 | } 194 | 195 | # --- verify existence of network downloader executable --- 196 | verify_downloader() { 197 | # Return failure if it doesn't exist or is no executable 198 | [ -x "$(command -v $1)" ] || return 1 199 | 200 | # Set verified executable as our downloader program and return success 201 | DOWNLOADER=$1 202 | return 0 203 | } 204 | 205 | # --- create temporary directory and cleanup when done --- 206 | setup_tmp() { 207 | TMP_DIR=$(mktemp -d -t rancherd-install.XXXXXXXXXX) 208 | TMP_HASH=${TMP_DIR}/rancherd.hash 209 | TMP_BIN=${TMP_DIR}/rancherd.bin 210 | cleanup() { 211 | code=$? 212 | set +e 213 | trap - EXIT 214 | rm -rf ${TMP_DIR} 215 | exit $code 216 | } 217 | trap cleanup INT EXIT 218 | } 219 | 220 | # --- use desired rancherd version if defined or find version from channel --- 221 | get_release_version() { 222 | if [ -n "${INSTALL_RANCHERD_VERSION}" ]; then 223 | VERSION_RANCHERD=${INSTALL_RANCHERD_VERSION} 224 | else 225 | version_url="${GITHUB_URL}/latest" 226 | case $DOWNLOADER in 227 | curl) 228 | VERSION_RANCHERD=$(curl -w '%{url_effective}' -L -s -S ${version_url} -o /dev/null | sed -e 's|.*/||') 229 | ;; 230 | wget) 231 | VERSION_RANCHERD=$(wget -SqO /dev/null ${version_url} 2>&1 | grep -i Location | sed -e 's|.*/||') 232 | ;; 233 | *) 234 | fatal "Incorrect downloader executable '$DOWNLOADER'" 235 | ;; 236 | esac 237 | fi 238 | info "Using ${VERSION_RANCHERD} as release" 239 | } 240 | 241 | # --- download from github url --- 242 | download() { 243 | [ $# -eq 2 ] || fatal 'download needs exactly 2 arguments' 244 | 245 | case $DOWNLOADER in 246 | curl) 247 | curl -o $1 -sfL $2 248 | ;; 249 | wget) 250 | wget -qO $1 $2 251 | ;; 252 | *) 253 | fatal "Incorrect executable '$DOWNLOADER'" 254 | ;; 255 | esac 256 | 257 | # Abort if download command failed 258 | [ $? -eq 0 ] || fatal 'Download failed' 259 | } 260 | 261 | # --- download hash from github url --- 262 | download_hash() { 263 | HASH_URL=${GITHUB_URL}/download/${VERSION_RANCHERD}/sha256sum-${ARCH}.txt 264 | info "Downloading hash ${HASH_URL}" 265 | download ${TMP_HASH} ${HASH_URL} 266 | HASH_EXPECTED=$(grep " rancherd${SUFFIX}$" ${TMP_HASH}) 267 | HASH_EXPECTED=${HASH_EXPECTED%%[[:blank:]]*} 268 | } 269 | 270 | # --- check hash against installed version --- 271 | installed_hash_matches() { 272 | if [ -x ${BIN_DIR}/rancherd ]; then 273 | HASH_INSTALLED=$(sha256sum ${BIN_DIR}/rancherd) 274 | HASH_INSTALLED=${HASH_INSTALLED%%[[:blank:]]*} 275 | if [ "${HASH_EXPECTED}" = "${HASH_INSTALLED}" ]; then 276 | return 277 | fi 278 | fi 279 | return 1 280 | } 281 | 282 | # --- download binary from github url --- 283 | download_binary() { 284 | BIN_URL=${GITHUB_URL}/download/${VERSION_RANCHERD}/rancherd${SUFFIX} 285 | info "Downloading binary ${BIN_URL}" 286 | download ${TMP_BIN} ${BIN_URL} 287 | } 288 | 289 | # --- verify downloaded binary hash --- 290 | verify_binary() { 291 | info "Verifying binary download" 292 | HASH_BIN=$(sha256sum ${TMP_BIN}) 293 | HASH_BIN=${HASH_BIN%%[[:blank:]]*} 294 | if [ "${HASH_EXPECTED}" != "${HASH_BIN}" ]; then 295 | fatal "Download sha256 does not match ${HASH_EXPECTED}, got ${HASH_BIN}" 296 | fi 297 | } 298 | 299 | # --- setup permissions and move binary to system directory --- 300 | setup_binary() { 301 | chmod 755 ${TMP_BIN} 302 | info "Installing rancherd to ${BIN_DIR}/rancherd" 303 | $SUDO chown root:root ${TMP_BIN} 304 | $SUDO mv -f ${TMP_BIN} ${BIN_DIR}/rancherd 305 | } 306 | 307 | # --- download and verify rancherd --- 308 | download_and_verify() { 309 | if can_skip_download; then 310 | info 'Skipping rancherd download and verify' 311 | verify_rancherd_is_executable 312 | return 313 | fi 314 | 315 | setup_verify_arch 316 | verify_downloader curl || verify_downloader wget || fatal 'Can not find curl or wget for downloading files' 317 | setup_tmp 318 | get_release_version 319 | download_hash 320 | 321 | if installed_hash_matches; then 322 | info 'Skipping binary downloaded, installed rancherd matches hash' 323 | return 324 | fi 325 | 326 | download_binary 327 | verify_binary 328 | setup_binary 329 | } 330 | 331 | # --- disable current service if loaded -- 332 | systemd_disable() { 333 | $SUDO systemctl disable ${SYSTEM_NAME} >/dev/null 2>&1 || true 334 | $SUDO rm -f /etc/systemd/system/${SERVICE_RANCHERD} || true 335 | $SUDO rm -f /etc/systemd/system/${SERVICE_RANCHERD}.env || true 336 | } 337 | 338 | # --- capture current env and create file containing rancherd_ variables --- 339 | create_env_file() { 340 | info "env: Creating environment file ${FILE_RANCHERD_ENV}" 341 | $SUDO touch ${FILE_RANCHERD_ENV} 342 | $SUDO chmod 0600 ${FILE_RANCHERD_ENV} 343 | env | grep '^RANCHERD_' | $SUDO tee ${FILE_RANCHERD_ENV} >/dev/null 344 | env | grep -Ei '^(NO|HTTP|HTTPS)_PROXY' | $SUDO tee -a ${FILE_RANCHERD_ENV} >/dev/null 345 | } 346 | 347 | # --- write systemd service file --- 348 | create_systemd_service_file() { 349 | info "systemd: Creating service file ${FILE_RANCHERD_SERVICE}" 350 | $SUDO tee ${FILE_RANCHERD_SERVICE} >/dev/null << EOF 351 | [Unit] 352 | Description=Rancher Bootstrap 353 | Documentation=https://github.com/rancher/rancherd 354 | Wants=network-online.target 355 | After=network-online.target 356 | 357 | [Install] 358 | WantedBy=multi-user.target 359 | 360 | [Service] 361 | Type=oneshot 362 | EnvironmentFile=-/etc/default/%N 363 | EnvironmentFile=-/etc/sysconfig/%N 364 | EnvironmentFile=-${FILE_RANCHERD_ENV} 365 | KillMode=process 366 | # Having non-zero Limit*s causes performance problems due to accounting overhead 367 | # in the kernel. We recommend using cgroups to do container-local accounting. 368 | LimitNOFILE=1048576 369 | LimitNPROC=infinity 370 | LimitCORE=infinity 371 | TasksMax=infinity 372 | TimeoutStartSec=0 373 | ExecStart=${BIN_DIR}/rancherd bootstrap 374 | EOF 375 | } 376 | 377 | # --- get hashes of the current rancherd bin and service files 378 | get_installed_hashes() { 379 | $SUDO sha256sum ${BIN_DIR}/rancherd ${FILE_RANCHERD_SERVICE} ${FILE_RANCHERD_ENV} 2>&1 || true 380 | } 381 | 382 | # --- enable and start systemd service --- 383 | systemd_enable() { 384 | info "systemd: Enabling ${SYSTEM_NAME} unit" 385 | $SUDO systemctl enable ${FILE_RANCHERD_SERVICE} >/dev/null 386 | $SUDO systemctl daemon-reload >/dev/null 387 | } 388 | 389 | systemd_start() { 390 | info "systemd: Starting ${SYSTEM_NAME}" 391 | $SUDO systemctl restart --no-block ${SYSTEM_NAME} 392 | info "Run \"journalctl -u ${SYSTEM_NAME} -f\" to watch logs" 393 | } 394 | 395 | # --- enable and start openrc service --- 396 | openrc_enable() { 397 | info "openrc: Enabling ${SYSTEM_NAME} service for default runlevel" 398 | $SUDO rc-update add ${SYSTEM_NAME} default >/dev/null 399 | } 400 | 401 | openrc_start() { 402 | info "openrc: Starting ${SYSTEM_NAME}" 403 | $SUDO ${FILE_RANCHERD_SERVICE} restart 404 | } 405 | 406 | # --- startup systemd or openrc service --- 407 | service_enable_and_start() { 408 | [ "${INSTALL_RANCHERD_SKIP_ENABLE}" = true ] && return 409 | 410 | systemd_enable 411 | 412 | [ "${INSTALL_RANCHERD_SKIP_START}" = true ] && return 413 | 414 | POST_INSTALL_HASHES=$(get_installed_hashes) 415 | if [ "${PRE_INSTALL_HASHES}" = "${POST_INSTALL_HASHES}" ] && [ "${INSTALL_RANCHERD_FORCE_RESTART}" != true ]; then 416 | info 'No change detected so skipping service start' 417 | return 418 | fi 419 | 420 | systemd_start 421 | return 0 422 | } 423 | 424 | # --- re-evaluate args to include env command --- 425 | eval set -- $(escape "${INSTALL_RANCHERD_EXEC}") $(quote "$@") 426 | 427 | # --- run the install process -- 428 | { 429 | verify_system 430 | setup_env "$@" 431 | download_and_verify 432 | systemd_disable 433 | create_env_file 434 | create_systemd_service_file 435 | service_enable_and_start 436 | } 437 | -------------------------------------------------------------------------------- /pkg/auth/auth.go: -------------------------------------------------------------------------------- 1 | package auth 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io/ioutil" 7 | "os" 8 | "strings" 9 | 10 | "github.com/pkg/errors" 11 | "github.com/rancher/rancherd/pkg/kubectl" 12 | "github.com/rancher/wrangler/pkg/randomtoken" 13 | "github.com/sirupsen/logrus" 14 | "golang.org/x/crypto/bcrypt" 15 | "gopkg.in/yaml.v2" 16 | corev1 "k8s.io/api/core/v1" 17 | rbacv1 "k8s.io/api/rbac/v1" 18 | apierrors "k8s.io/apimachinery/pkg/api/errors" 19 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 20 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 21 | "k8s.io/apimachinery/pkg/labels" 22 | "k8s.io/apimachinery/pkg/runtime/schema" 23 | "k8s.io/client-go/dynamic" 24 | "k8s.io/client-go/kubernetes" 25 | corev1interface "k8s.io/client-go/kubernetes/typed/core/v1" 26 | "k8s.io/client-go/tools/clientcmd" 27 | ) 28 | 29 | var ( 30 | bootstrappedRole = "authz.management.cattle.io/bootstrapped-role" 31 | bootstrapAdminConfig = "admincreated" 32 | cattleNamespace = "cattle-system" 33 | defaultAdminLabelKey = "authz.management.cattle.io/bootstrapping" 34 | defaultAdminLabelValue = "admin-user" 35 | defaultAdminLabel = map[string]string{defaultAdminLabelKey: defaultAdminLabelValue} 36 | ) 37 | 38 | type Options struct { 39 | Password string 40 | PasswordFile string 41 | Kubeconfig string 42 | } 43 | 44 | func ResetAdmin(ctx context.Context, opts *Options) error { 45 | if opts == nil { 46 | opts = &Options{} 47 | } 48 | if err := validation(*opts); err != nil { 49 | return err 50 | } 51 | if err := resetAdmin(ctx, opts.Kubeconfig, opts.Password, opts.PasswordFile); err != nil { 52 | return errors.Wrap(err, "cluster and rancher are not ready. Please try later") 53 | } 54 | return nil 55 | } 56 | 57 | func validation(opts Options) error { 58 | if opts.Password != "" && opts.PasswordFile != "" { 59 | return errors.New("only one option can be set for password and password-file") 60 | } 61 | return nil 62 | } 63 | 64 | func resetAdmin(ctx context.Context, kubeconfig, password, passwordFile string) error { 65 | token, err := randomtoken.Generate() 66 | if err != nil { 67 | return err 68 | } 69 | mustChangePassword := true 70 | if password != "" { 71 | token = password 72 | mustChangePassword = false 73 | } 74 | if passwordFile != "" { 75 | passwordFromFile, err := ioutil.ReadFile(passwordFile) 76 | if err != nil { 77 | return err 78 | } 79 | token = strings.TrimSuffix(string(passwordFromFile), "\n") 80 | mustChangePassword = false 81 | } 82 | 83 | kubeconfig, err = kubectl.GetKubeconfig(kubeconfig) 84 | if err != nil { 85 | return err 86 | } 87 | 88 | conf, err := clientcmd.BuildConfigFromFlags("", kubeconfig) 89 | if err != nil { 90 | return err 91 | } 92 | 93 | client := dynamic.NewForConfigOrDie(conf) 94 | userClient := client.Resource(schema.GroupVersionResource{ 95 | Group: "management.cattle.io", 96 | Version: "v3", 97 | Resource: "users", 98 | }) 99 | configmapClient := kubernetes.NewForConfigOrDie(conf).CoreV1().ConfigMaps(cattleNamespace) 100 | nodeClient := kubernetes.NewForConfigOrDie(conf).CoreV1().Nodes() 101 | grbClient := client.Resource(schema.GroupVersionResource{ 102 | Group: "management.cattle.io", 103 | Version: "v3", 104 | Resource: "globalrolebindings", 105 | }) 106 | crbClient := client.Resource(schema.GroupVersionResource{ 107 | Group: "rbac.authorization.k8s.io", 108 | Version: "v1", 109 | Resource: "clusterrolebindings", 110 | }) 111 | settingClient := client.Resource(schema.GroupVersionResource{ 112 | Group: "management.cattle.io", 113 | Version: "v3", 114 | Resource: "settings", 115 | }) 116 | clustersClient := client.Resource(schema.GroupVersionResource{ 117 | Group: "management.cattle.io", 118 | Version: "v3", 119 | Resource: "clusters", 120 | }) 121 | 122 | var adminName string 123 | var adminUser unstructured.Unstructured 124 | set := labels.Set(defaultAdminLabel) 125 | admins, err := userClient.List(ctx, v1.ListOptions{LabelSelector: set.String()}) 126 | if err != nil { 127 | return err 128 | } 129 | 130 | if len(admins.Items) > 0 { 131 | adminName = admins.Items[0].GetName() 132 | adminUser = admins.Items[0] 133 | } 134 | 135 | if _, err := configmapClient.Get(ctx, bootstrapAdminConfig, v1.GetOptions{}); err != nil { 136 | if !apierrors.IsNotFound(err) { 137 | return err 138 | } 139 | } else { 140 | // if it is already bootstrapped, reset admin password 141 | count := len(admins.Items) 142 | if count != 1 { 143 | var users []string 144 | for _, u := range admins.Items { 145 | users = append(users, u.GetName()) 146 | } 147 | return errors.Errorf("%v users were found with %v label. They are %v. Can only reset the default admin password when there is exactly one user with this label", 148 | count, set, users) 149 | } 150 | 151 | admin := admins.Items[0] 152 | hash, err := bcrypt.GenerateFromPassword([]byte(token), bcrypt.DefaultCost) 153 | if err != nil { 154 | return err 155 | } 156 | admin.Object["password"] = string(hash) 157 | admin.Object["mustChangePassword"] = false 158 | _, err = userClient.Update(ctx, &admin, v1.UpdateOptions{}) 159 | if err != nil { 160 | return err 161 | } 162 | printServerURL(ctx, nodeClient, settingClient) 163 | logrus.Infof("Default admin reset. New username: %v, new Password: %v", admin.Object["username"], token) 164 | return nil 165 | } 166 | 167 | // make sure Admin user gets created 168 | if len(admins.Items) == 0 { 169 | // Config map does not exist and no users, attempt to create the default admin user 170 | hash, _ := bcrypt.GenerateFromPassword([]byte(token), bcrypt.DefaultCost) 171 | admin, err := userClient.Create(ctx, 172 | &unstructured.Unstructured{ 173 | Object: map[string]interface{}{ 174 | "apiVersion": "management.cattle.io/v3", 175 | "kind": "User", 176 | "metadata": v1.ObjectMeta{ 177 | GenerateName: "user-", 178 | Labels: defaultAdminLabel, 179 | }, 180 | "displayName": "Default Admin", 181 | "username": "admin", 182 | "password": string(hash), 183 | "mustChangePassword": mustChangePassword, 184 | }, 185 | }, v1.CreateOptions{}) 186 | if err != nil && !apierrors.IsAlreadyExists(err) { 187 | return err 188 | } 189 | adminName = admin.GetName() 190 | adminUser = *admin 191 | } 192 | 193 | // Make sure the admin user become the admin of system/default project of local cluster 194 | if err := setClusterAnnotation(ctx, clustersClient, adminName); err != nil { 195 | return err 196 | } 197 | 198 | // Make sure globalRolebinding is created with admin user 199 | bindings, err := grbClient.List(ctx, v1.ListOptions{LabelSelector: set.String()}) 200 | if err != nil && !apierrors.IsNotFound(err) { 201 | return err 202 | } 203 | if err == nil && len(bindings.Items) == 0 { 204 | _, err = grbClient.Create(ctx, 205 | &unstructured.Unstructured{ 206 | Object: map[string]interface{}{ 207 | "metadata": v1.ObjectMeta{ 208 | GenerateName: "globalrolebinding-", 209 | Labels: defaultAdminLabel, 210 | }, 211 | "apiVersion": "management.cattle.io/v3", 212 | "kind": "GlobalRoleBinding", 213 | "userName": adminName, 214 | "globalRoleName": "admin", 215 | }, 216 | }, v1.CreateOptions{}) 217 | if err != nil { 218 | return err 219 | } 220 | } 221 | 222 | // Make sure admin user is the cluster-admin of local cluster 223 | crbBindings, err := crbClient.List(ctx, v1.ListOptions{LabelSelector: set.String()}) 224 | if err != nil { 225 | return err 226 | } 227 | if len(crbBindings.Items) == 0 && adminName != "" { 228 | _, err = crbClient.Create(ctx, 229 | &unstructured.Unstructured{ 230 | Object: map[string]interface{}{ 231 | "metadata": v1.ObjectMeta{ 232 | GenerateName: "default-admin-", 233 | Labels: defaultAdminLabel, 234 | }, 235 | "apiVersion": "rbac.authorization.k8s.io/v1", 236 | "kind": "ClusterRoleBinding", 237 | "ownerReferences": []v1.OwnerReference{ 238 | { 239 | APIVersion: "management.cattle.io/v3", 240 | Kind: "user", 241 | Name: adminUser.GetName(), 242 | UID: adminUser.GetUID(), 243 | }, 244 | }, 245 | "subjects": []rbacv1.Subject{ 246 | { 247 | Kind: "User", 248 | APIGroup: rbacv1.GroupName, 249 | Name: adminUser.GetName(), 250 | }, 251 | }, 252 | "roleRef": rbacv1.RoleRef{ 253 | APIGroup: rbacv1.GroupName, 254 | Kind: "ClusterRole", 255 | Name: "cluster-admin", 256 | }, 257 | }, 258 | }, v1.CreateOptions{}) 259 | if err != nil { 260 | return err 261 | } 262 | } 263 | 264 | _, err = configmapClient.Create(ctx, 265 | &corev1.ConfigMap{ 266 | ObjectMeta: v1.ObjectMeta{ 267 | Namespace: cattleNamespace, 268 | Name: bootstrapAdminConfig, 269 | }, 270 | }, v1.CreateOptions{}) 271 | if err != nil { 272 | if !apierrors.IsAlreadyExists(err) { 273 | return err 274 | } 275 | } 276 | 277 | printServerURL(ctx, nodeClient, settingClient) 278 | logrus.Infof("Default admin and password created. Username: admin, Password: %v", token) 279 | return nil 280 | } 281 | 282 | func printServerURL(ctx context.Context, nodeClient corev1interface.NodeInterface, settingClient dynamic.NamespaceableResourceInterface) { 283 | serverURL, err := getServerURL(ctx, nodeClient, settingClient) 284 | if err != nil { 285 | logrus.Warnf("Can't retrieve serverURL to reach rancher server. Error: %v", err) 286 | } 287 | 288 | if serverURL != "" { 289 | logrus.Infof("Server URL: %v", serverURL) 290 | } else { 291 | logrus.Info("Rancher is listening on http/8080 and https/8443") 292 | } 293 | } 294 | 295 | // getServerURL reads the possible serverUrl in following order 296 | // 1. First fetch from server-url setting from rancher 297 | // 2. Fetch From tls-san set in rke2 config 298 | // 3. Fetch the externalNodeIP then internalNodeIP 299 | func getServerURL(ctx context.Context, nodeClient corev1interface.NodeInterface, settingClient dynamic.NamespaceableResourceInterface) (string, error) { 300 | serverURLSettings, err := settingClient.Get(ctx, "server-url", v1.GetOptions{}) 301 | if err != nil { 302 | return "", err 303 | } 304 | value := serverURLSettings.Object["value"].(string) 305 | defaultValue := serverURLSettings.Object["default"].(string) 306 | if value != "" { 307 | return value, nil 308 | } else if defaultValue != "" { 309 | return value, nil 310 | } 311 | 312 | tlsSan, err := readTLSSan() 313 | if err != nil { 314 | return "", err 315 | } 316 | if tlsSan != "" { 317 | return fmt.Sprintf("https://%v:8443", tlsSan), nil 318 | } 319 | 320 | nodes, err := nodeClient.List(ctx, v1.ListOptions{}) 321 | if err != nil { 322 | return "", err 323 | } 324 | if len(nodes.Items) > 0 { 325 | addresses := nodes.Items[0].Status.Addresses 326 | // prefer external IP over internal IP 327 | for _, address := range addresses { 328 | if address.Type == corev1.NodeExternalIP { 329 | return fmt.Sprintf("https://%v:8443", address.Address), nil 330 | } 331 | if address.Type == corev1.NodeInternalIP { 332 | return fmt.Sprintf("https://%v:8443", address.Address), nil 333 | } 334 | } 335 | } 336 | 337 | return "", nil 338 | } 339 | 340 | func setClusterAnnotation(ctx context.Context, clustersClient dynamic.NamespaceableResourceInterface, adminName string) error { 341 | cluster, err := clustersClient.Get(ctx, "local", v1.GetOptions{}) 342 | if err != nil { 343 | return fmt.Errorf("Local cluster is not ready yet (get local cluster: %w)", err) 344 | } 345 | if adminName == "" { 346 | return errors.Errorf("User is not set yet") 347 | } 348 | ann := cluster.GetAnnotations() 349 | if ann == nil { 350 | ann = make(map[string]string) 351 | } 352 | ann["field.cattle.io/creatorId"] = adminName 353 | cluster.SetAnnotations(ann) 354 | 355 | // reset CreatorMadeOwner condition so that controller will reconcile and reassign admin to the default user 356 | setConditionToFalse(cluster.Object, "DefaultProjectCreated") 357 | setConditionToFalse(cluster.Object, "SystemProjectCreated") 358 | setConditionToFalse(cluster.Object, "CreatorMadeOwner") 359 | 360 | _, err = clustersClient.Update(ctx, cluster, v1.UpdateOptions{}) 361 | return err 362 | } 363 | 364 | func setConditionToFalse(object map[string]interface{}, cond string) { 365 | status, ok := object["status"].(map[string]interface{}) 366 | if !ok { 367 | return 368 | } 369 | conditions, ok := status["conditions"].([]interface{}) 370 | if !ok { 371 | return 372 | } 373 | for _, condition := range conditions { 374 | m, ok := condition.(map[string]interface{}) 375 | if !ok { 376 | continue 377 | } 378 | if t, ok := m["type"].(string); ok && t == cond { 379 | m["status"] = "False" 380 | } 381 | } 382 | return 383 | } 384 | 385 | func readTLSSan() (string, error) { 386 | bytes, err := ioutil.ReadFile("/etc/rancher/rke2/config.yaml") 387 | if err != nil && !os.IsNotExist(err) { 388 | return "", err 389 | } 390 | 391 | if len(bytes) == 0 { 392 | return "", nil 393 | } 394 | 395 | data := yaml.MapSlice{} 396 | if err := yaml.Unmarshal(bytes, &data); err != nil { 397 | return "", err 398 | } 399 | 400 | for _, item := range data { 401 | if item.Key == "tls-san" { 402 | if v, ok := item.Value.([]interface{}); ok { 403 | if s, ok := v[0].(string); ok { 404 | return s, nil 405 | } 406 | } 407 | } 408 | } 409 | 410 | return "", nil 411 | } 412 | -------------------------------------------------------------------------------- /pkg/cacerts/cacerts.go: -------------------------------------------------------------------------------- 1 | package cacerts 2 | 3 | import ( 4 | "crypto/hmac" 5 | "crypto/sha256" 6 | "crypto/sha512" 7 | "crypto/tls" 8 | "crypto/x509" 9 | "encoding/base64" 10 | "encoding/hex" 11 | "fmt" 12 | "io/ioutil" 13 | "net/http" 14 | url2 "net/url" 15 | "time" 16 | 17 | "github.com/rancher/rancherd/pkg/tpm" 18 | "github.com/rancher/system-agent/pkg/applyinator" 19 | "github.com/rancher/wrangler/pkg/randomtoken" 20 | ) 21 | 22 | var insecureClient = &http.Client{ 23 | Timeout: time.Second * 5, 24 | Transport: &http.Transport{ 25 | Proxy: http.ProxyFromEnvironment, 26 | TLSClientConfig: &tls.Config{ 27 | InsecureSkipVerify: true, 28 | }, 29 | }, 30 | } 31 | 32 | func Get(server, token, path string) ([]byte, string, error) { 33 | return get(server, token, path, true) 34 | } 35 | 36 | func MachineGet(server, token, path string) ([]byte, string, error) { 37 | return get(server, token, path, false) 38 | } 39 | 40 | func get(server, token, path string, clusterToken bool) ([]byte, string, error) { 41 | u, err := url2.Parse(server) 42 | if err != nil { 43 | return nil, "", err 44 | } 45 | u.Path = path 46 | 47 | var ( 48 | isTPM bool 49 | ) 50 | if !clusterToken { 51 | isTPM, token, err = tpm.ResolveToken(token) 52 | if err != nil { 53 | return nil, "", err 54 | } 55 | } 56 | 57 | cacert, caChecksum, err := CACerts(server, token, clusterToken) 58 | if err != nil { 59 | return nil, "", err 60 | } 61 | 62 | if isTPM { 63 | data, err := tpm.Get(cacert, u.String(), nil) 64 | return data, caChecksum, err 65 | } 66 | 67 | req, err := http.NewRequest(http.MethodGet, u.String(), nil) 68 | if err != nil { 69 | return nil, "", err 70 | } 71 | if !clusterToken { 72 | req.Header.Set("Authorization", "Bearer "+base64.StdEncoding.EncodeToString([]byte(token))) 73 | } 74 | 75 | var resp *http.Response 76 | if len(cacert) == 0 { 77 | resp, err = http.DefaultClient.Do(req) 78 | if err != nil { 79 | return nil, "", err 80 | } 81 | } else { 82 | pool := x509.NewCertPool() 83 | pool.AppendCertsFromPEM(cacert) 84 | client := http.Client{ 85 | Timeout: 5 * time.Second, 86 | Transport: &http.Transport{ 87 | Proxy: http.ProxyFromEnvironment, 88 | TLSClientConfig: &tls.Config{ 89 | RootCAs: pool, 90 | }, 91 | }, 92 | } 93 | defer client.CloseIdleConnections() 94 | 95 | resp, err = client.Do(req) 96 | if err != nil { 97 | return nil, "", err 98 | } 99 | } 100 | 101 | data, err := ioutil.ReadAll(resp.Body) 102 | if resp.StatusCode != http.StatusOK { 103 | return nil, "", fmt.Errorf("%s: %s", data, resp.Status) 104 | } 105 | return data, caChecksum, err 106 | } 107 | 108 | func CACerts(server, token string, clusterToken bool) ([]byte, string, error) { 109 | nonce, err := randomtoken.Generate() 110 | if err != nil { 111 | return nil, "", err 112 | } 113 | 114 | url, err := url2.Parse(server) 115 | if err != nil { 116 | return nil, "", err 117 | } 118 | 119 | requestURL := fmt.Sprintf("https://%s/cacerts", url.Host) 120 | if !clusterToken { 121 | requestURL = fmt.Sprintf("https://%s/v1-rancheros/cacerts", url.Host) 122 | } 123 | 124 | if resp, err := http.Get(requestURL); err == nil { 125 | _, _ = ioutil.ReadAll(resp.Body) 126 | resp.Body.Close() 127 | return nil, "", nil 128 | } 129 | 130 | req, err := http.NewRequest(http.MethodGet, requestURL, nil) 131 | if err != nil { 132 | return nil, "", err 133 | } 134 | req.Header.Set("X-Cattle-Nonce", nonce) 135 | req.Header.Set("Authorization", "Bearer "+hashBase64([]byte(token))) 136 | 137 | resp, err := insecureClient.Do(req) 138 | if err != nil { 139 | return nil, "", fmt.Errorf("insecure cacerts download from %s: %w", requestURL, err) 140 | } 141 | defer resp.Body.Close() 142 | 143 | data, err := ioutil.ReadAll(resp.Body) 144 | if err != nil { 145 | return nil, "", err 146 | } 147 | 148 | if resp.StatusCode != http.StatusOK { 149 | return nil, "", fmt.Errorf("response %d: %s getting cacerts: %s", resp.StatusCode, resp.Status, data) 150 | } 151 | 152 | if resp.Header.Get("X-Cattle-Hash") != hash(token, nonce, data) { 153 | return nil, "", fmt.Errorf("response hash (%s) does not match (%s)", 154 | resp.Header.Get("X-Cattle-Hash"), 155 | hash(token, nonce, data)) 156 | } 157 | 158 | if len(data) == 0 { 159 | return nil, "", nil 160 | } 161 | 162 | return data, hashHex(data), nil 163 | } 164 | 165 | func ToUpdateCACertificatesInstruction() (*applyinator.Instruction, error) { 166 | cmd := "update-ca-certificates" 167 | 168 | return &applyinator.Instruction{ 169 | Name: "update-ca-certificates", 170 | SaveOutput: true, 171 | Command: cmd, 172 | }, nil 173 | } 174 | 175 | func ToFile(server, token string) (*applyinator.File, error) { 176 | cacert, _, err := CACerts(server, token, true) 177 | if err != nil { 178 | return nil, err 179 | } 180 | 181 | return &applyinator.File{ 182 | Content: base64.StdEncoding.EncodeToString(cacert), 183 | Path: "/etc/pki/trust/anchors/embedded-rancher-ca.pem", 184 | Permissions: "0644", 185 | }, nil 186 | } 187 | 188 | func hashHex(token []byte) string { 189 | hash := sha256.Sum256(token) 190 | return hex.EncodeToString(hash[:]) 191 | } 192 | 193 | func hashBase64(token []byte) string { 194 | hash := sha256.Sum256(token) 195 | return base64.StdEncoding.EncodeToString(hash[:]) 196 | } 197 | 198 | func hash(token, nonce string, bytes []byte) string { 199 | digest := hmac.New(sha512.New, []byte(token)) 200 | digest.Write([]byte(nonce)) 201 | digest.Write([]byte{0}) 202 | digest.Write(bytes) 203 | digest.Write([]byte{0}) 204 | hash := digest.Sum(nil) 205 | return base64.StdEncoding.EncodeToString(hash) 206 | } 207 | -------------------------------------------------------------------------------- /pkg/config/remote.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | 7 | "github.com/rancher/rancherd/pkg/cacerts" 8 | "github.com/rancher/wrangler/pkg/data" 9 | "github.com/rancher/wrangler/pkg/data/convert" 10 | "github.com/sirupsen/logrus" 11 | ) 12 | 13 | func processRemote(cfg Config) (Config, error) { 14 | if cfg.Role != "" || cfg.Server == "" || cfg.Token == "" { 15 | return cfg, nil 16 | } 17 | 18 | logrus.Infof("server and token set but required role is not set. Trying to bootstrapping config from machine inventory") 19 | resp, _, err := cacerts.MachineGet(cfg.Server, cfg.Token, "/v1-rancheros/inventory") 20 | if err != nil { 21 | return cfg, fmt.Errorf("from machine inventory: %w", err) 22 | } 23 | 24 | config := map[string]interface{}{} 25 | if err := json.Unmarshal(resp, &config); err != nil { 26 | return cfg, fmt.Errorf("inventory response: %s: %w", resp, err) 27 | } 28 | 29 | currentConfig, err := convert.EncodeToMap(cfg) 30 | if err != nil { 31 | return cfg, err 32 | } 33 | 34 | var ( 35 | newConfig = data.MergeMapsConcatSlice(currentConfig, config) 36 | result Config 37 | ) 38 | 39 | if err := convert.ToObj(newConfig, &result); err != nil { 40 | return result, err 41 | } 42 | 43 | copyConfig := result 44 | copyConfig.Token = "--redacted--" 45 | downloadedConfig, err := json.Marshal(copyConfig) 46 | if err == nil { 47 | logrus.Infof("Downloaded config: %s", downloadedConfig) 48 | } 49 | 50 | return result, nil 51 | } 52 | -------------------------------------------------------------------------------- /pkg/config/runtime.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import "strings" 4 | 5 | var ( 6 | RuntimeRKE2 Runtime = "rke2" 7 | RuntimeK3S Runtime = "k3s" 8 | RuntimeUnknown Runtime = "unknown" 9 | ) 10 | 11 | type Runtime string 12 | 13 | type RuntimeConfig struct { 14 | Role string `json:"role,omitempty"` 15 | SANS []string `json:"tlsSans,omitempty"` 16 | NodeName string `json:"nodeName,omitempty"` 17 | Address string `json:"address,omitempty"` 18 | InternalAddress string `json:"internalAddress,omitempty"` 19 | Taints []string `json:"taints,omitempty"` 20 | Labels []string `json:"labels,omitempty"` 21 | Token string `json:"token,omitempty"` 22 | ConfigValues map[string]interface{} `json:"extraConfig,omitempty"` 23 | } 24 | 25 | func GetRuntime(kubernetesVersion string) Runtime { 26 | if isRKE2(kubernetesVersion) { 27 | return RuntimeRKE2 28 | } 29 | return RuntimeK3S 30 | } 31 | 32 | func isRKE2(kubernetesVersion string) bool { 33 | return strings.Contains(kubernetesVersion, string(RuntimeRKE2)) 34 | } 35 | -------------------------------------------------------------------------------- /pkg/config/types.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "io/fs" 5 | "io/ioutil" 6 | "os" 7 | "path/filepath" 8 | "strings" 9 | 10 | v1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" 11 | "github.com/rancher/system-agent/pkg/applyinator" 12 | "github.com/rancher/wharfie/pkg/registries" 13 | "github.com/rancher/wrangler/pkg/data" 14 | "github.com/rancher/wrangler/pkg/data/convert" 15 | "github.com/rancher/wrangler/pkg/yaml" 16 | "github.com/sirupsen/logrus" 17 | ) 18 | 19 | var ( 20 | implicitPaths = []string{ 21 | "/usr/share/oem/rancher/rancherd/config.yaml", 22 | "/usr/share/rancher/rancherd/config.yaml", 23 | // RancherOS userdata 24 | "/oem/userdata", 25 | // RancherOS installation yip config 26 | "/oem/99_custom.yaml", 27 | // RancherOS oem location 28 | "/oem/rancher/rancherd/config.yaml", 29 | // Standard cloud-config 30 | "/var/lib/cloud/instance/user-data.txt", 31 | } 32 | 33 | manifests = []string{ 34 | "/usr/share/oem/rancher/rancherd/manifests", 35 | "/usr/share/rancher/rancherd/manifests", 36 | "/etc/rancher/rancherd/manifests", 37 | // RancherOS OEM 38 | "/oem/rancher/rancherd/manifests", 39 | } 40 | ) 41 | 42 | type Config struct { 43 | RuntimeConfig 44 | KubernetesVersion string `json:"kubernetesVersion,omitempty"` 45 | RancherVersion string `json:"rancherVersion,omitempty"` 46 | Server string `json:"server,omitempty"` 47 | Discovery *DiscoveryConfig `json:"discovery,omitempty"` 48 | 49 | RancherValues map[string]interface{} `json:"rancherValues,omitempty"` 50 | PreInstructions []applyinator.Instruction `json:"preInstructions,omitempty"` 51 | PostInstructions []applyinator.Instruction `json:"postInstructions,omitempty"` 52 | // Deprecated, use Resources instead 53 | BootstrapResources []v1.GenericMap `json:"bootstrapResources,omitempty"` 54 | Resources []v1.GenericMap `json:"resources,omitempty"` 55 | 56 | RuntimeInstallerImage string `json:"runtimeInstallerImage,omitempty"` 57 | RancherInstallerImage string `json:"rancherInstallerImage,omitempty"` 58 | SystemDefaultRegistry string `json:"systemDefaultRegistry,omitempty"` 59 | Registries *registries.Registry `json:"registries,omitempty"` 60 | } 61 | 62 | type DiscoveryConfig struct { 63 | Params map[string]string `json:"params,omitempty"` 64 | ExpectedServers int `json:"expectedServers,omitempty"` 65 | // ServerCacheDuration will remember discovered servers for this amount of time. This 66 | // helps with some discovery protocols like mDNS that can be unreliable 67 | ServerCacheDuration string `json:"serverCacheDuration,omitempty"` 68 | } 69 | 70 | func paths() (result []string) { 71 | for _, file := range implicitPaths { 72 | result = append(result, file) 73 | 74 | files, err := ioutil.ReadDir(file) 75 | if err != nil { 76 | continue 77 | } 78 | 79 | for _, entry := range files { 80 | if isYAML(entry.Name()) { 81 | result = append(result, filepath.Join(file, entry.Name())) 82 | } 83 | } 84 | } 85 | return 86 | } 87 | 88 | func Load(path string) (result Config, err error) { 89 | var ( 90 | values = map[string]interface{}{} 91 | ) 92 | 93 | if err := populatedSystemResources(&result); err != nil { 94 | return result, err 95 | } 96 | 97 | for _, file := range paths() { 98 | newValues, err := mergeFile(values, file) 99 | if err == nil { 100 | values = newValues 101 | } else { 102 | logrus.Infof("failed to parse %s, skipping file: %v", file, err) 103 | } 104 | } 105 | 106 | if path != "" { 107 | values, err = mergeFile(values, path) 108 | if err != nil { 109 | return 110 | } 111 | } 112 | 113 | err = convert.ToObj(values, &result) 114 | if err != nil { 115 | return 116 | } 117 | 118 | return processRemote(result) 119 | } 120 | 121 | func populatedSystemResources(config *Config) error { 122 | resources, err := loadResources(manifests...) 123 | if err != nil { 124 | return err 125 | } 126 | config.Resources = append(config.Resources, config.BootstrapResources...) 127 | config.Resources = append(config.Resources, resources...) 128 | 129 | return nil 130 | } 131 | 132 | func isYAML(filename string) bool { 133 | lower := strings.ToLower(filename) 134 | return strings.HasSuffix(lower, ".yaml") || strings.HasSuffix(lower, ".yml") 135 | } 136 | 137 | func loadResources(dirs ...string) (result []v1.GenericMap, _ error) { 138 | for _, dir := range dirs { 139 | err := filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error { 140 | if err != nil { 141 | return err 142 | } 143 | if info.IsDir() || !isYAML(path) { 144 | return nil 145 | } 146 | 147 | f, err := os.Open(path) 148 | if err != nil { 149 | return err 150 | } 151 | defer f.Close() 152 | 153 | objs, err := yaml.ToObjects(f) 154 | if err != nil { 155 | return err 156 | } 157 | 158 | for _, obj := range objs { 159 | apiVersion, kind := obj.GetObjectKind().GroupVersionKind().ToAPIVersionAndKind() 160 | if apiVersion == "" || kind == "" { 161 | continue 162 | } 163 | data, err := convert.EncodeToMap(obj) 164 | if err != nil { 165 | return err 166 | } 167 | result = append(result, v1.GenericMap{ 168 | Data: data, 169 | }) 170 | } 171 | 172 | return nil 173 | }) 174 | if os.IsNotExist(err) { 175 | continue 176 | } 177 | } 178 | 179 | return 180 | } 181 | 182 | func mergeFile(result map[string]interface{}, file string) (map[string]interface{}, error) { 183 | bytes, err := ioutil.ReadFile(file) 184 | if err != nil && !os.IsNotExist(err) { 185 | return nil, err 186 | } 187 | 188 | files, err := dotDFiles(file) 189 | if err != nil { 190 | return nil, err 191 | } 192 | 193 | values := map[string]interface{}{} 194 | if len(bytes) > 0 { 195 | logrus.Infof("Loading config file [%s]", file) 196 | if err := yaml.Unmarshal(bytes, &values); err != nil { 197 | return nil, err 198 | } 199 | } 200 | 201 | if v, ok := values["rancherd"].(map[string]interface{}); ok { 202 | values = v 203 | } 204 | 205 | result = data.MergeMapsConcatSlice(result, values) 206 | for _, file := range files { 207 | result, err = mergeFile(result, file) 208 | if err != nil { 209 | return nil, err 210 | } 211 | } 212 | 213 | return result, nil 214 | } 215 | 216 | func dotDFiles(basefile string) (result []string, _ error) { 217 | files, err := ioutil.ReadDir(basefile + ".d") 218 | if os.IsNotExist(err) { 219 | return nil, nil 220 | } else if err != nil { 221 | return nil, err 222 | } 223 | for _, file := range files { 224 | if file.IsDir() || (!strings.HasSuffix(file.Name(), ".yaml") && !strings.HasSuffix(file.Name(), ".yml")) { 225 | continue 226 | } 227 | result = append(result, filepath.Join(basefile+".d", file.Name())) 228 | } 229 | return 230 | } 231 | -------------------------------------------------------------------------------- /pkg/discovery/discovery.go: -------------------------------------------------------------------------------- 1 | package discovery 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "encoding/json" 7 | "fmt" 8 | "io/ioutil" 9 | "log" 10 | "net" 11 | "net/http" 12 | "sort" 13 | "strconv" 14 | "sync" 15 | "time" 16 | 17 | "github.com/hashicorp/go-discover" 18 | "github.com/rancher/rancherd/pkg/config" 19 | "github.com/rancher/wrangler/pkg/data/convert" 20 | "github.com/rancher/wrangler/pkg/randomtoken" 21 | "github.com/rancher/wrangler/pkg/slice" 22 | "github.com/sirupsen/logrus" 23 | "k8s.io/client-go/util/cert" 24 | 25 | // Include kubernetes provider 26 | _ "github.com/hashicorp/go-discover/provider/k8s" 27 | ) 28 | 29 | var ( 30 | insecureHTTPClient = http.Client{ 31 | Timeout: 10 * time.Second, 32 | Transport: &http.Transport{ 33 | Proxy: http.ProxyFromEnvironment, 34 | TLSHandshakeTimeout: 5 * time.Second, 35 | TLSClientConfig: &tls.Config{ 36 | InsecureSkipVerify: true, 37 | }, 38 | }, 39 | } 40 | ) 41 | 42 | func DiscoverServerAndRole(ctx context.Context, cfg *config.Config) error { 43 | if cfg.Discovery == nil { 44 | if cfg.Server == "" && cfg.Role == "server" { 45 | cfg.Role = "cluster-init" 46 | } 47 | return nil 48 | } 49 | 50 | if cfg.Token == "" { 51 | return fmt.Errorf("token is required to be set when discovery is set") 52 | } 53 | 54 | server, clusterInit, err := discoverServerAndRole(ctx, cfg) 55 | if err != nil { 56 | return err 57 | } 58 | if clusterInit { 59 | cfg.Role = "cluster-init" 60 | } else if server != "" { 61 | cfg.Server = server 62 | } 63 | logrus.Infof("Using role=%s and server=%s", cfg.Role, cfg.Server) 64 | return nil 65 | 66 | } 67 | func discoverServerAndRole(ctx context.Context, cfg *config.Config) (string, bool, error) { 68 | discovery, err := discover.New() 69 | if err != nil { 70 | return "", false, err 71 | } 72 | 73 | port, err := convert.ToNumber(cfg.RancherValues["hostPort"]) 74 | if err != nil || port == 0 { 75 | port = 8443 76 | } 77 | 78 | ctx, cancel := context.WithCancel(ctx) 79 | defer cancel() 80 | 81 | server, err := newJoinServer(ctx, cfg.Discovery.ServerCacheDuration, port) 82 | if err != nil { 83 | return "", false, err 84 | } 85 | 86 | count := cfg.Discovery.ExpectedServers 87 | if count == 0 { 88 | count = 3 89 | } 90 | 91 | for { 92 | server, clusterInit := server.loop(ctx, count, cfg.Discovery.Params, port, discovery) 93 | if clusterInit { 94 | return "", true, nil 95 | } 96 | if server != "" { 97 | return server, false, nil 98 | } 99 | logrus.Info("Waiting to discover server") 100 | select { 101 | case <-ctx.Done(): 102 | return "", false, fmt.Errorf("interrupted waiting to discover server: %w", ctx.Err()) 103 | case <-time.After(5 * time.Second): 104 | } 105 | } 106 | } 107 | 108 | func (j *joinServer) addresses(params map[string]string, discovery *discover.Discover) ([]string, error) { 109 | if params["provider"] == "mdns" { 110 | params["v6"] = "false" 111 | } 112 | addrs, err := discovery.Addrs(discover.Config(params).String(), log.Default()) 113 | if err != nil { 114 | return nil, err 115 | } 116 | 117 | var ips []string 118 | for _, addr := range addrs { 119 | host, _, err := net.SplitHostPort(addr) 120 | if err == nil { 121 | ips = append(ips, host) 122 | } else { 123 | ips = append(ips, addr) 124 | } 125 | } 126 | 127 | return ips, nil 128 | } 129 | 130 | func (j *joinServer) loop(ctx context.Context, count int, params map[string]string, port int64, discovery *discover.Discover) (string, bool) { 131 | addrs, err := j.addresses(params, discovery) 132 | if err != nil { 133 | logrus.Errorf("failed to discover peers to: %v", err) 134 | return "", false 135 | } 136 | 137 | addrs = j.setPeers(addrs) 138 | 139 | var ( 140 | allAgree = true 141 | firstID = "" 142 | ) 143 | for i, addr := range addrs { 144 | url := fmt.Sprintf("https://%s:%d/cacerts", addr, port) 145 | req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) 146 | if err != nil { 147 | logrus.Errorf("failed to construct request for %s: %v", url, err) 148 | return "", false 149 | } 150 | resp, err := insecureHTTPClient.Do(req) 151 | if err != nil { 152 | logrus.Infof("failed to connect to %s: %v", url, err) 153 | allAgree = false 154 | continue 155 | } 156 | 157 | data, err := ioutil.ReadAll(resp.Body) 158 | resp.Body.Close() 159 | if err != nil || resp.StatusCode != http.StatusOK { 160 | logrus.Infof("failed to read response from %s: code %d: %v", url, resp.StatusCode, err) 161 | allAgree = false 162 | continue 163 | } 164 | 165 | rancherID := resp.Header.Get("X-Cattle-Rancherd-Id") 166 | if rancherID == "" { 167 | return fmt.Sprintf("https://%s", net.JoinHostPort(addr, strconv.FormatInt(port, 10))), false 168 | } 169 | if i == 0 { 170 | firstID = rancherID 171 | } 172 | 173 | var pingResponse pingResponse 174 | if err := json.Unmarshal(data, &pingResponse); err != nil { 175 | logrus.Errorf("failed to unmarshal response (%s) from %s: %v", data, url, err) 176 | allAgree = false 177 | continue 178 | } 179 | 180 | if !slice.StringsEqual(addrs, pingResponse.Peers) { 181 | logrus.Infof("Peer %s does not agree on peer list, %v != %v", addr, addrs, pingResponse.Peers) 182 | allAgree = false 183 | continue 184 | } 185 | } 186 | 187 | if len(addrs) == 0 { 188 | logrus.Infof("No available peers") 189 | return "", false 190 | } 191 | 192 | if firstID != j.id { 193 | logrus.Infof("Waiting for peer %s from %v to initialize", addrs[0], addrs) 194 | return "", false 195 | } 196 | 197 | if len(addrs) != count { 198 | logrus.Infof("Expecting %d servers currently have %v", count, addrs) 199 | return "", false 200 | } 201 | 202 | if !allAgree { 203 | logrus.Infof("All peers %v do not agree on the peer list", addrs) 204 | return "", false 205 | } 206 | 207 | logrus.Infof("Currently the elected leader %s from peers %v", firstID, addrs) 208 | return "", true 209 | } 210 | 211 | type joinServer struct { 212 | lock sync.Mutex 213 | id string 214 | peers []string 215 | peerSeen map[string]time.Time 216 | cacheDuration time.Duration 217 | } 218 | 219 | type pingResponse struct { 220 | Peers []string `json:"peers,omitempty"` 221 | } 222 | 223 | func newJoinServer(ctx context.Context, cacheDuration string, port int64) (*joinServer, error) { 224 | id, err := randomtoken.Generate() 225 | if err != nil { 226 | return nil, err 227 | } 228 | 229 | if cacheDuration == "" { 230 | cacheDuration = "1m" 231 | } 232 | 233 | duration, err := time.ParseDuration(cacheDuration) 234 | if err != nil { 235 | return nil, err 236 | } 237 | 238 | j := &joinServer{ 239 | id: id, 240 | cacheDuration: duration, 241 | peerSeen: map[string]time.Time{}, 242 | } 243 | 244 | cert, key, err := cert.GenerateSelfSignedCertKey("rancherd-bootstrap", nil, nil) 245 | if err != nil { 246 | return nil, err 247 | } 248 | certs, err := tls.X509KeyPair(cert, key) 249 | if err != nil { 250 | return nil, err 251 | } 252 | l, err := tls.Listen("tcp", fmt.Sprintf(":%d", port), &tls.Config{ 253 | Certificates: []tls.Certificate{ 254 | certs, 255 | }, 256 | }) 257 | if err != nil { 258 | return nil, err 259 | } 260 | server := &http.Server{ 261 | BaseContext: func(_ net.Listener) context.Context { 262 | return ctx 263 | }, 264 | Handler: j, 265 | } 266 | go func() { 267 | err := server.Serve(l) 268 | if err != nil { 269 | logrus.Errorf("failed to server bootstrap http server: %v", err) 270 | } 271 | }() 272 | go func() { 273 | <-ctx.Done() 274 | server.Shutdown(context.Background()) 275 | l.Close() 276 | }() 277 | 278 | return j, nil 279 | } 280 | 281 | func (j *joinServer) setPeers(peers []string) []string { 282 | j.lock.Lock() 283 | defer j.lock.Unlock() 284 | 285 | // purge 286 | now := time.Now() 287 | for k, v := range j.peerSeen { 288 | if v.Add(j.cacheDuration).Before(now) { 289 | logrus.Infof("Forgetting peer %s", k) 290 | delete(j.peerSeen, k) 291 | } 292 | } 293 | 294 | // add 295 | for _, peer := range peers { 296 | if _, ok := j.peerSeen[peer]; !ok { 297 | logrus.Infof("New peer discovered %s", peer) 298 | } 299 | j.peerSeen[peer] = now 300 | } 301 | 302 | // sort 303 | newPeers := make([]string, 0, len(j.peerSeen)) 304 | for k := range j.peerSeen { 305 | newPeers = append(newPeers, k) 306 | } 307 | sort.Strings(newPeers) 308 | 309 | j.peers = newPeers 310 | logrus.Infof("current set of peers: %v", j.peers) 311 | return j.peers 312 | } 313 | 314 | func (j *joinServer) ServeHTTP(rw http.ResponseWriter, _ *http.Request) { 315 | j.lock.Lock() 316 | defer j.lock.Unlock() 317 | 318 | rw.Header().Set("X-Cattle-Rancherd-Id", j.id) 319 | rw.Header().Set("Content-Type", "application/json") 320 | _ = json.NewEncoder(rw).Encode(pingResponse{ 321 | Peers: j.peers, 322 | }) 323 | } 324 | -------------------------------------------------------------------------------- /pkg/images/images.go: -------------------------------------------------------------------------------- 1 | package images 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/rancher/rancherd/pkg/config" 8 | ) 9 | 10 | const ( 11 | defaultSystemImagePrefix = "rancher/system-agent-installer" 12 | ) 13 | 14 | func GetRancherInstallerImage(imageOverride, imagePrefix, rancherVersion string) string { 15 | return getInstallerImage(imageOverride, imagePrefix, "rancher", rancherVersion) 16 | } 17 | 18 | func GetInstallerImage(imageOverride, imagePrefix, kubernetesVersion string) string { 19 | return getInstallerImage(imageOverride, imagePrefix, string(config.GetRuntime(kubernetesVersion)), kubernetesVersion) 20 | } 21 | 22 | func getInstallerImage(imageOverride, imagePrefix, component, version string) string { 23 | if imageOverride != "" { 24 | return imageOverride 25 | } 26 | if imagePrefix == "" { 27 | imagePrefix = defaultSystemImagePrefix 28 | } 29 | 30 | tag := strings.ReplaceAll(version, "+", "-") 31 | if tag == "" { 32 | tag = "latest" 33 | } 34 | return fmt.Sprintf("%s-%s:%s", imagePrefix, component, tag) 35 | } 36 | -------------------------------------------------------------------------------- /pkg/join/join.go: -------------------------------------------------------------------------------- 1 | package join 2 | 3 | import ( 4 | "encoding/base64" 5 | "fmt" 6 | "strings" 7 | 8 | "github.com/rancher/rancherd/pkg/cacerts" 9 | "github.com/rancher/rancherd/pkg/config" 10 | "github.com/rancher/rancherd/pkg/roles" 11 | "github.com/rancher/system-agent/pkg/applyinator" 12 | ) 13 | 14 | func addEnv(env []string, key, value string) []string { 15 | return append(env, fmt.Sprintf("%s=%s", key, value)) 16 | } 17 | 18 | func GetInstallScriptFile(dataDir string) string { 19 | return fmt.Sprintf("%s/install.sh", dataDir) 20 | } 21 | 22 | func ToScriptFile(config *config.Config, dataDir string) (*applyinator.File, error) { 23 | data, _, err := cacerts.Get(config.Server, config.Token, "/system-agent-install.sh") 24 | if err != nil { 25 | return nil, err 26 | } 27 | 28 | return &applyinator.File{ 29 | Content: base64.StdEncoding.EncodeToString(data), 30 | Path: GetInstallScriptFile(dataDir), 31 | }, nil 32 | } 33 | 34 | func ToInstruction(config *config.Config, dataDir string) (*applyinator.Instruction, error) { 35 | var ( 36 | etcd = roles.IsEtcd(config.Role) 37 | controlPlane = roles.IsControlPlane(config.Role) 38 | worker = roles.IsWorker(config.Role) 39 | ) 40 | 41 | if !etcd && !controlPlane && !worker { 42 | return nil, fmt.Errorf("invalid role (%s) defined", config.Role) 43 | } 44 | 45 | _, caChecksum, err := cacerts.CACerts(config.Server, config.Token, true) 46 | if err != nil { 47 | return nil, err 48 | } 49 | 50 | var env []string 51 | env = addEnv(env, "CATTLE_SERVER", config.Server) 52 | env = addEnv(env, "CATTLE_TOKEN", config.Token) 53 | env = addEnv(env, "CATTLE_CA_CHECKSUM", caChecksum) 54 | env = addEnv(env, "CATTLE_ADDRESS", config.Address) 55 | env = addEnv(env, "CATTLE_INTERNAL_ADDRESS", config.InternalAddress) 56 | env = addEnv(env, "CATTLE_LABELS", strings.Join(config.Labels, ",")) 57 | env = addEnv(env, "CATTLE_TAINTS", strings.Join(config.Taints, ",")) 58 | env = addEnv(env, "CATTLE_ROLE_ETCD", fmt.Sprint(etcd)) 59 | env = addEnv(env, "CATTLE_ROLE_CONTROLPLANE", fmt.Sprint(controlPlane)) 60 | env = addEnv(env, "CATTLE_ROLE_WORKER", fmt.Sprint(worker)) 61 | 62 | if config.NodeName != "" { 63 | env = addEnv(env, "CATTLE_NODE_NAME", config.NodeName) 64 | } 65 | 66 | return &applyinator.Instruction{ 67 | Name: "join", 68 | SaveOutput: true, 69 | Env: env, 70 | Args: []string{ 71 | "sh", GetInstallScriptFile(dataDir), 72 | }, 73 | Command: "/usr/bin/env", 74 | }, nil 75 | } 76 | -------------------------------------------------------------------------------- /pkg/kubectl/kubectl.go: -------------------------------------------------------------------------------- 1 | package kubectl 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/rancher/rancherd/pkg/config" 8 | ) 9 | 10 | var ( 11 | kubeconfigs = []string{ 12 | "/etc/rancher/k3s/k3s.yaml", 13 | "/etc/rancher/rke2/rke2.yaml", 14 | } 15 | ) 16 | 17 | func Env(k8sVersion string) []string { 18 | runtime := config.GetRuntime(k8sVersion) 19 | return []string{ 20 | fmt.Sprintf("KUBECONFIG=/etc/rancher/%s/%s.yaml", runtime, runtime), 21 | } 22 | } 23 | 24 | func Command(k8sVersion string) string { 25 | kubectl := "/usr/local/bin/kubectl" 26 | runtime := config.GetRuntime(k8sVersion) 27 | if runtime == config.RuntimeRKE2 { 28 | kubectl = "/var/lib/rancher/rke2/bin/kubectl" 29 | } 30 | return kubectl 31 | } 32 | 33 | func GetKubeconfig(kubeconfig string) (string, error) { 34 | if kubeconfig != "" { 35 | return kubeconfig, nil 36 | } 37 | 38 | for _, kubeconfig := range kubeconfigs { 39 | if _, err := os.Stat(kubeconfig); err == nil { 40 | return kubeconfig, nil 41 | } 42 | } 43 | return "", fmt.Errorf("failed to find kubeconfig file at %v", kubeconfigs) 44 | } 45 | -------------------------------------------------------------------------------- /pkg/os/os.go: -------------------------------------------------------------------------------- 1 | package os 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | 8 | "github.com/rancher/rancherd/pkg/kubectl" 9 | "github.com/rancher/rancherd/pkg/self" 10 | "github.com/rancher/system-agent/pkg/applyinator" 11 | ) 12 | 13 | func ToUpgradeInstruction(k8sVersion, rancherOSVersion string) (*applyinator.Instruction, error) { 14 | cmd, err := self.Self() 15 | if err != nil { 16 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 17 | } 18 | patch, err := json.Marshal(map[string]interface{}{ 19 | "spec": map[string]interface{}{ 20 | "osImage": rancherOSVersion, 21 | }, 22 | }) 23 | if err != nil { 24 | return nil, err 25 | } 26 | return &applyinator.Instruction{ 27 | Name: "patch-rancher-os-version", 28 | SaveOutput: true, 29 | Args: []string{"retry", kubectl.Command(k8sVersion), "--type=merge", "-n", "fleet-local", "patch", "managedosimages.rancheros.cattle.io", "default-os-image", "-p", string(patch)}, 30 | Env: kubectl.Env(k8sVersion), 31 | Command: cmd, 32 | }, nil 33 | } 34 | -------------------------------------------------------------------------------- /pkg/plan/bootstrap.go: -------------------------------------------------------------------------------- 1 | package plan 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/rancher/system-agent/pkg/applyinator" 8 | "golang.org/x/mod/semver" 9 | 10 | "github.com/rancher/rancherd/pkg/cacerts" 11 | "github.com/rancher/rancherd/pkg/config" 12 | "github.com/rancher/rancherd/pkg/discovery" 13 | "github.com/rancher/rancherd/pkg/join" 14 | "github.com/rancher/rancherd/pkg/kubectl" 15 | "github.com/rancher/rancherd/pkg/probe" 16 | "github.com/rancher/rancherd/pkg/rancher" 17 | "github.com/rancher/rancherd/pkg/registry" 18 | "github.com/rancher/rancherd/pkg/resources" 19 | "github.com/rancher/rancherd/pkg/runtime" 20 | "github.com/rancher/rancherd/pkg/versions" 21 | ) 22 | 23 | type plan applyinator.Plan 24 | 25 | func toInitPlan(config *config.Config, dataDir string) (*applyinator.Plan, error) { 26 | if err := assignTokenIfUnset(config); err != nil { 27 | return nil, err 28 | } 29 | 30 | plan := plan{} 31 | if err := plan.addFiles(config, dataDir); err != nil { 32 | return nil, err 33 | } 34 | 35 | if err := plan.addInstructions(config, dataDir); err != nil { 36 | return nil, err 37 | } 38 | 39 | if err := plan.addProbes(config); err != nil { 40 | return nil, err 41 | } 42 | 43 | return (*applyinator.Plan)(&plan), nil 44 | } 45 | 46 | func toJoinPlan(cfg *config.Config, dataDir string) (*applyinator.Plan, error) { 47 | if cfg.Server == "" { 48 | return nil, fmt.Errorf("server is required in config for all roles besides cluster-init") 49 | } 50 | if cfg.Token == "" { 51 | return nil, fmt.Errorf("token is required in config for all roles besides cluster-init") 52 | } 53 | 54 | plan := plan{} 55 | if err := plan.addFile(cacerts.ToFile(cfg.Server, cfg.Token)); err != nil { 56 | return nil, err 57 | } 58 | if err := plan.addFile(join.ToScriptFile(cfg, dataDir)); err != nil { 59 | return nil, err 60 | } 61 | if err := plan.addInstruction(cacerts.ToUpdateCACertificatesInstruction()); err != nil { 62 | return nil, err 63 | } 64 | if err := plan.addInstruction(join.ToInstruction(cfg, dataDir)); err != nil { 65 | return nil, err 66 | } 67 | if err := plan.addInstruction(probe.ToInstruction()); err != nil { 68 | return nil, err 69 | } 70 | if err := plan.addProbesForJoin(cfg); err != nil { 71 | return nil, err 72 | } 73 | 74 | return (*applyinator.Plan)(&plan), nil 75 | } 76 | 77 | func ToPlan(ctx context.Context, config *config.Config, dataDir string) (*applyinator.Plan, error) { 78 | newCfg := *config 79 | if err := discovery.DiscoverServerAndRole(ctx, &newCfg); err != nil { 80 | return nil, err 81 | } 82 | if newCfg.Role == "cluster-init" { 83 | return toInitPlan(&newCfg, dataDir) 84 | } 85 | return toJoinPlan(&newCfg, dataDir) 86 | } 87 | 88 | func (p *plan) addInstructions(cfg *config.Config, dataDir string) error { 89 | k8sVersion, err := versions.K8sVersion(cfg.KubernetesVersion) 90 | if err != nil { 91 | return err 92 | } 93 | 94 | if err := p.addInstruction(runtime.ToInstruction(cfg.RuntimeInstallerImage, cfg.SystemDefaultRegistry, k8sVersion)); err != nil { 95 | return err 96 | } 97 | 98 | if err := p.addInstruction(probe.ToInstruction()); err != nil { 99 | return err 100 | } 101 | 102 | rancherVersion, err := versions.RancherVersion(cfg.RancherVersion) 103 | if err != nil { 104 | return err 105 | } 106 | if err := p.addInstruction(rancher.ToInstruction(cfg.RancherInstallerImage, cfg.SystemDefaultRegistry, k8sVersion, rancherVersion, dataDir)); err != nil { 107 | return err 108 | } 109 | 110 | if err := p.addInstruction(rancher.ToWaitRancherInstruction(cfg.RancherInstallerImage, cfg.SystemDefaultRegistry, k8sVersion)); err != nil { 111 | return err 112 | } 113 | 114 | if err := p.addInstruction(rancher.ToWaitRancherWebhookInstruction(cfg.RancherInstallerImage, cfg.SystemDefaultRegistry, k8sVersion)); err != nil { 115 | return err 116 | } 117 | 118 | if err := p.addInstruction(rancher.ToWaitClusterClientSecretInstruction(cfg.RancherInstallerImage, cfg.SystemDefaultRegistry, k8sVersion)); err != nil { 119 | return err 120 | } 121 | 122 | if err := p.addInstruction(rancher.ToScaleDownFleetControllerInstruction(cfg.RancherInstallerImage, cfg.SystemDefaultRegistry, k8sVersion)); err != nil { 123 | return err 124 | } 125 | 126 | if err := p.addInstruction(rancher.ToUpdateClientSecretInstruction(cfg.RancherInstallerImage, cfg.SystemDefaultRegistry, k8sVersion)); err != nil { 127 | return err 128 | } 129 | 130 | if err := p.addInstruction(rancher.ToScaleUpFleetControllerInstruction(cfg.RancherInstallerImage, cfg.SystemDefaultRegistry, k8sVersion)); err != nil { 131 | return err 132 | } 133 | 134 | // Above Rancher v2.9.x, we cannot patch provisioing cluster with empty rkeConfig, 135 | // so we need to delete the webhook validation configuration. 136 | if semver.Compare(cfg.RancherVersion, "v2.9.0") >= 0 { 137 | if err := p.addInstruction(rancher.ToDeleteRancherWebhookValidationConfiguration(k8sVersion)); err != nil { 138 | return err 139 | } 140 | } 141 | 142 | // Patch local provisioning cluster status before installing bootstrap resources, 143 | // so bundles can be created first and managed charts can be installed smoothly. 144 | if semver.Compare(cfg.RancherVersion, "v2.8.0") >= 0 { 145 | if err := p.addInstruction(rancher.PatchLocalProvisioningClusterStatus(cfg.RancherInstallerImage, cfg.SystemDefaultRegistry, k8sVersion)); err != nil { 146 | return err 147 | } 148 | } 149 | 150 | // If clusterrepo check fails, it waits 5 minutes and retries. 151 | // Install harvester-cluster-repo deployment before clusterrepo, 152 | // so we can avoid the 5 minutes waiting time. 153 | if err := p.addInstruction(resources.ToHarvesterClusterRepoInstruction(cfg.RancherInstallerImage, cfg.SystemDefaultRegistry, k8sVersion, dataDir)); err != nil { 154 | return err 155 | } 156 | 157 | if err := p.addInstruction(resources.ToWaitHarvesterClusterRepoInstruction(k8sVersion)); err != nil { 158 | return err 159 | } 160 | 161 | if err := p.addInstruction(resources.ToInstruction(cfg.RancherInstallerImage, cfg.SystemDefaultRegistry, k8sVersion, dataDir)); err != nil { 162 | return err 163 | } 164 | 165 | if semver.Compare(cfg.RancherVersion, "v2.9.0") >= 0 { 166 | if err := p.addInstruction(rancher.ToRestartRancherWebhookInstruction(k8sVersion)); err != nil { 167 | return err 168 | } 169 | } 170 | 171 | if err := p.addInstruction(rancher.ToWaitSUCInstruction(cfg.RancherInstallerImage, cfg.SystemDefaultRegistry, k8sVersion)); err != nil { 172 | return err 173 | } 174 | 175 | // Rancher added stv-aggregation secret to system-agent-upgrader plan from v2.11.0. 176 | // We need to create the secret to make the plan ready. 177 | // https://github.com/rancher/rancher/commit/235c2c6a495743dfecafe40b5440fc96b67e2b43 178 | if semver.Compare(cfg.RancherVersion, "v2.11.0-alpha") >= 0 { 179 | if err := p.addInstruction(rancher.ToCreateStvAggregationSecret(k8sVersion)); err != nil { 180 | return err 181 | } 182 | } 183 | 184 | if err := p.addInstruction(rancher.ToWaitSUCPlanInstruction(cfg.RancherInstallerImage, cfg.SystemDefaultRegistry, k8sVersion)); err != nil { 185 | return err 186 | } 187 | 188 | if err := p.addInstruction(runtime.ToWaitKubernetesInstruction(cfg.RuntimeInstallerImage, cfg.SystemDefaultRegistry, k8sVersion)); err != nil { 189 | return err 190 | } 191 | 192 | p.addPrePostInstructions(cfg, k8sVersion) 193 | return nil 194 | } 195 | 196 | func (p *plan) addPrePostInstructions(cfg *config.Config, k8sVersion string) { 197 | var instructions []applyinator.Instruction 198 | 199 | for _, inst := range cfg.PreInstructions { 200 | if k8sVersion != "" { 201 | inst.Env = append(inst.Env, kubectl.Env(k8sVersion)...) 202 | } 203 | instructions = append(instructions, inst) 204 | } 205 | 206 | instructions = append(instructions, p.Instructions...) 207 | 208 | for _, inst := range cfg.PostInstructions { 209 | inst.Env = append(inst.Env, kubectl.Env(k8sVersion)...) 210 | instructions = append(instructions, inst) 211 | } 212 | 213 | p.Instructions = instructions 214 | return 215 | } 216 | 217 | func (p *plan) addInstruction(instruction *applyinator.Instruction, err error) error { 218 | if err != nil || instruction == nil { 219 | return err 220 | } 221 | 222 | p.Instructions = append(p.Instructions, *instruction) 223 | return nil 224 | } 225 | 226 | func (p *plan) addFiles(cfg *config.Config, dataDir string) error { 227 | k8sVersions, err := versions.K8sVersion(cfg.KubernetesVersion) 228 | if err != nil { 229 | return err 230 | } 231 | runtimeName := config.GetRuntime(k8sVersions) 232 | 233 | // config.yaml 234 | if err := p.addFile(runtime.ToFile(&cfg.RuntimeConfig, runtimeName, true)); err != nil { 235 | return err 236 | } 237 | 238 | // bootstrap config.yaml 239 | if err := p.addFile(runtime.ToBootstrapFile(runtimeName)); err != nil { 240 | return err 241 | } 242 | 243 | // registries.yaml 244 | if err := p.addFile(registry.ToFile(cfg.Registries, runtimeName)); err != nil { 245 | return err 246 | } 247 | 248 | // harvester-cluster-repo manifests 249 | if err := p.addFile(resources.ToHarvesterClusterRepoFile(resources.GetHarvesterClusterRepoManifests(dataDir))); err != nil { 250 | return err 251 | } 252 | 253 | // bootstrap manifests 254 | if err := p.addFile(resources.ToBootstrapFile(cfg, resources.GetBootstrapManifests(dataDir))); err != nil { 255 | return err 256 | } 257 | 258 | // rancher values.yaml 259 | return p.addFile(rancher.ToFile(cfg, dataDir)) 260 | 261 | } 262 | 263 | func (p *plan) addFile(file *applyinator.File, err error) error { 264 | if err != nil || file == nil { 265 | return err 266 | } 267 | p.Files = append(p.Files, *file) 268 | return nil 269 | } 270 | 271 | func (p *plan) addProbesForJoin(cfg *config.Config) error { 272 | p.Probes = probe.ProbesForJoin(&cfg.RuntimeConfig) 273 | return nil 274 | } 275 | 276 | func (p *plan) addProbes(cfg *config.Config) error { 277 | k8sVersion, err := versions.K8sVersion(cfg.KubernetesVersion) 278 | if err != nil { 279 | return err 280 | } 281 | p.Probes = probe.AllProbes(config.GetRuntime(k8sVersion)) 282 | return nil 283 | } 284 | -------------------------------------------------------------------------------- /pkg/plan/run.go: -------------------------------------------------------------------------------- 1 | package plan 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "context" 7 | "encoding/json" 8 | "io" 9 | "os" 10 | "path/filepath" 11 | 12 | "github.com/rancher/rancherd/pkg/config" 13 | "github.com/rancher/rancherd/pkg/registry" 14 | "github.com/rancher/rancherd/pkg/versions" 15 | "github.com/rancher/system-agent/pkg/applyinator" 16 | "github.com/rancher/system-agent/pkg/image" 17 | "github.com/sirupsen/logrus" 18 | ) 19 | 20 | func Run(ctx context.Context, cfg *config.Config, plan *applyinator.Plan, dataDir string) error { 21 | k8sVersion, err := versions.K8sVersion(cfg.KubernetesVersion) 22 | if err != nil { 23 | return err 24 | } 25 | return RunWithKubernetesVersion(ctx, k8sVersion, plan, dataDir) 26 | } 27 | 28 | func RunWithKubernetesVersion(ctx context.Context, k8sVersion string, plan *applyinator.Plan, dataDir string) error { 29 | runtime := config.GetRuntime(k8sVersion) 30 | 31 | if err := writePlan(plan, dataDir); err != nil { 32 | return err 33 | } 34 | 35 | images := image.NewUtility("", "", "", registry.GetConfigFile(runtime)) 36 | apply := applyinator.NewApplyinator(filepath.Join(dataDir, "plan", "work"), false, 37 | filepath.Join(dataDir, "plan", "applied"), images) 38 | 39 | output, err := apply.Apply(ctx, applyinator.CalculatedPlan{ 40 | Plan: *plan, 41 | }) 42 | if err != nil { 43 | return err 44 | } 45 | 46 | return saveOutput(output, dataDir) 47 | } 48 | 49 | func saveOutput(data []byte, dataDir string) error { 50 | planOutput := GetPlanOutput(dataDir) 51 | f, err := os.OpenFile(planOutput, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) 52 | if err != nil { 53 | return err 54 | } 55 | defer f.Close() 56 | 57 | in, err := gzip.NewReader(bytes.NewBuffer(data)) 58 | if err != nil { 59 | return err 60 | } 61 | _, err = io.Copy(f, in) 62 | return err 63 | } 64 | 65 | func writePlan(plan *applyinator.Plan, dataDir string) error { 66 | planFile := GetPlanFile(dataDir) 67 | if err := os.MkdirAll(filepath.Dir(planFile), 0755); err != nil { 68 | return err 69 | } 70 | 71 | logrus.Infof("Writing plan file to %s", planFile) 72 | f, err := os.OpenFile(planFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) 73 | if err != nil { 74 | return err 75 | } 76 | defer f.Close() 77 | 78 | enc := json.NewEncoder(f) 79 | enc.SetIndent("", " ") 80 | return enc.Encode(plan) 81 | } 82 | 83 | func GetPlanFile(dataDir string) string { 84 | return filepath.Join(dataDir, "plan", "plan.json") 85 | } 86 | 87 | func GetPlanOutput(dataDir string) string { 88 | return filepath.Join(dataDir, "plan", "plan-output.json") 89 | } 90 | -------------------------------------------------------------------------------- /pkg/plan/token.go: -------------------------------------------------------------------------------- 1 | package plan 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | 7 | "github.com/rancher/rancherd/pkg/config" 8 | "github.com/rancher/rancherd/pkg/runtime" 9 | "github.com/rancher/rancherd/pkg/versions" 10 | "github.com/rancher/wrangler/pkg/data/convert" 11 | "github.com/rancher/wrangler/pkg/randomtoken" 12 | "github.com/rancher/wrangler/pkg/yaml" 13 | ) 14 | 15 | func assignTokenIfUnset(cfg *config.Config) error { 16 | if cfg.Token != "" { 17 | return nil 18 | } 19 | 20 | token, err := existingToken(cfg) 21 | if err != nil { 22 | return err 23 | } 24 | 25 | if token == "" { 26 | token, err = randomtoken.Generate() 27 | if err != nil { 28 | return err 29 | } 30 | } 31 | 32 | cfg.Token = token 33 | return nil 34 | } 35 | 36 | func existingToken(cfg *config.Config) (string, error) { 37 | k8sVersion, err := versions.K8sVersion(cfg.KubernetesVersion) 38 | if err != nil { 39 | return "", err 40 | } 41 | 42 | cfgFile := runtime.GetConfigLocation(config.GetRuntime(k8sVersion)) 43 | data, err := ioutil.ReadFile(cfgFile) 44 | if os.IsNotExist(err) { 45 | return "", nil 46 | } else if err != nil { 47 | return "", err 48 | } 49 | 50 | configMap := map[string]interface{}{} 51 | if err := yaml.Unmarshal(data, &configMap); err != nil { 52 | return "", err 53 | } 54 | 55 | return convert.ToString(configMap["token"]), nil 56 | } 57 | -------------------------------------------------------------------------------- /pkg/plan/upgrade.go: -------------------------------------------------------------------------------- 1 | package plan 2 | 3 | import ( 4 | "github.com/rancher/rancherd/pkg/config" 5 | "github.com/rancher/rancherd/pkg/os" 6 | "github.com/rancher/rancherd/pkg/rancher" 7 | "github.com/rancher/rancherd/pkg/runtime" 8 | "github.com/rancher/system-agent/pkg/applyinator" 9 | ) 10 | 11 | func Upgrade(cfg *config.Config, k8sVersion, rancherVersion, rancherOSVersion, dataDir string) (*applyinator.Plan, error) { 12 | p := plan{} 13 | 14 | if rancherVersion != "" { 15 | if err := p.addInstruction(rancher.ToUpgradeInstruction("", cfg.SystemDefaultRegistry, k8sVersion, rancherVersion, dataDir)); err != nil { 16 | return nil, err 17 | } 18 | if err := p.addInstruction(rancher.ToWaitRancherInstruction("", cfg.SystemDefaultRegistry, k8sVersion)); err != nil { 19 | return nil, err 20 | } 21 | } 22 | 23 | if k8sVersion != "" { 24 | if err := p.addInstruction(runtime.ToUpgradeInstruction(k8sVersion)); err != nil { 25 | return nil, err 26 | } 27 | if err := p.addInstruction(runtime.ToWaitKubernetesInstruction("", cfg.SystemDefaultRegistry, k8sVersion)); err != nil { 28 | return nil, err 29 | } 30 | } 31 | 32 | if rancherOSVersion != "" { 33 | if err := p.addInstruction(os.ToUpgradeInstruction(k8sVersion, rancherOSVersion)); err != nil { 34 | return nil, err 35 | } 36 | } 37 | 38 | return (*applyinator.Plan)(&p), nil 39 | } 40 | -------------------------------------------------------------------------------- /pkg/probe/probe.go: -------------------------------------------------------------------------------- 1 | package probe 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strings" 7 | 8 | "github.com/rancher/rancherd/pkg/config" 9 | "github.com/rancher/rancherd/pkg/roles" 10 | "github.com/rancher/rancherd/pkg/self" 11 | "github.com/rancher/system-agent/pkg/applyinator" 12 | "github.com/rancher/system-agent/pkg/prober" 13 | ) 14 | 15 | var probes = map[string]prober.Probe{ 16 | "kube-apiserver": { 17 | InitialDelaySeconds: 1, 18 | TimeoutSeconds: 5, 19 | SuccessThreshold: 1, 20 | FailureThreshold: 2, 21 | HTTPGetAction: prober.HTTPGetAction{ 22 | URL: "https://127.0.0.1:6443/readyz", 23 | CACert: "/var/lib/rancher/%s/server/tls/server-ca.crt", 24 | ClientCert: "/var/lib/rancher/%s/server/tls/client-kube-apiserver.crt", 25 | ClientKey: "/var/lib/rancher/%s/server/tls/client-kube-apiserver.key", 26 | }, 27 | }, 28 | "kube-scheduler": { 29 | InitialDelaySeconds: 1, 30 | TimeoutSeconds: 5, 31 | SuccessThreshold: 1, 32 | FailureThreshold: 2, 33 | HTTPGetAction: prober.HTTPGetAction{ 34 | URL: "https://127.0.0.1:10259/healthz", 35 | Insecure: true, 36 | }, 37 | }, 38 | "kube-controller-manager": { 39 | InitialDelaySeconds: 1, 40 | TimeoutSeconds: 5, 41 | SuccessThreshold: 1, 42 | FailureThreshold: 2, 43 | HTTPGetAction: prober.HTTPGetAction{ 44 | URL: "https://127.0.0.1:10257/healthz", 45 | Insecure: true, 46 | }, 47 | }, 48 | "kubelet": { 49 | InitialDelaySeconds: 1, 50 | TimeoutSeconds: 5, 51 | SuccessThreshold: 1, 52 | FailureThreshold: 2, 53 | HTTPGetAction: prober.HTTPGetAction{ 54 | URL: "http://127.0.0.1:10248/healthz", 55 | }, 56 | }, 57 | } 58 | 59 | func replaceRuntime(str string, runtime config.Runtime) string { 60 | if !strings.Contains(str, "%s") { 61 | return str 62 | } 63 | return fmt.Sprintf(str, runtime) 64 | } 65 | 66 | func ProbesForJoin(cfg *config.RuntimeConfig) map[string]prober.Probe { 67 | if roles.IsControlPlane(cfg.Role) { 68 | return AllProbes(config.RuntimeUnknown) 69 | } 70 | return replaceRuntimeForProbes(map[string]prober.Probe{ 71 | "kubelet": probes["kubelet"], 72 | }, config.RuntimeUnknown) 73 | } 74 | 75 | func AllProbes(runtime config.Runtime) map[string]prober.Probe { 76 | return replaceRuntimeForProbes(probes, runtime) 77 | } 78 | 79 | func replaceRuntimeForProbes(probes map[string]prober.Probe, runtime config.Runtime) map[string]prober.Probe { 80 | result := map[string]prober.Probe{} 81 | for k, v := range probes { 82 | // we don't know the runtime to find the file 83 | if runtime == config.RuntimeUnknown && (v.HTTPGetAction.CACert+ 84 | v.HTTPGetAction.ClientCert+ 85 | v.HTTPGetAction.ClientKey) != "" { 86 | continue 87 | } 88 | v.HTTPGetAction.CACert = replaceRuntime(v.HTTPGetAction.CACert, runtime) 89 | v.HTTPGetAction.ClientCert = replaceRuntime(v.HTTPGetAction.ClientCert, runtime) 90 | v.HTTPGetAction.ClientKey = replaceRuntime(v.HTTPGetAction.ClientKey, runtime) 91 | result[k] = v 92 | } 93 | return result 94 | } 95 | 96 | func ToInstruction() (*applyinator.Instruction, error) { 97 | cmd, err := self.Self() 98 | if err != nil { 99 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 100 | } 101 | return &applyinator.Instruction{ 102 | Name: "probes", 103 | SaveOutput: true, 104 | Args: []string{"probe"}, 105 | Command: cmd, 106 | }, nil 107 | } 108 | -------------------------------------------------------------------------------- /pkg/probe/run.go: -------------------------------------------------------------------------------- 1 | package probe 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "os" 8 | "time" 9 | 10 | "github.com/rancher/system-agent/pkg/applyinator" 11 | "github.com/rancher/system-agent/pkg/prober" 12 | "github.com/sirupsen/logrus" 13 | ) 14 | 15 | func RunProbes(_ context.Context, planFile string, interval time.Duration) error { 16 | f, err := os.Open(planFile) 17 | if err != nil { 18 | return fmt.Errorf("opening plan %s: %w", planFile, err) 19 | } 20 | defer f.Close() 21 | 22 | plan := &applyinator.Plan{} 23 | if err := json.NewDecoder(f).Decode(plan); err != nil { 24 | return err 25 | } 26 | 27 | if len(plan.Probes) == 0 { 28 | logrus.Infof("No probes defined in %s", planFile) 29 | return nil 30 | } 31 | logrus.Infof("Running probes defined in %s", planFile) 32 | 33 | initial := true 34 | probeStatuses := map[string]prober.ProbeStatus{} 35 | for { 36 | newProbeStatuses := map[string]prober.ProbeStatus{} 37 | for k, v := range probeStatuses { 38 | newProbeStatuses[k] = v 39 | } 40 | prober.DoProbes(plan.Probes, newProbeStatuses, initial) 41 | 42 | allGood := true 43 | for probeName, probeStatus := range newProbeStatuses { 44 | if !probeStatus.Healthy { 45 | allGood = false 46 | } 47 | 48 | oldProbeStatus, ok := probeStatuses[probeName] 49 | if !ok || oldProbeStatus.Healthy != probeStatus.Healthy { 50 | if probeStatus.Healthy { 51 | logrus.Infof("Probe [%s] is healthy", probeName) 52 | } else { 53 | logrus.Infof("Probe [%s] is unhealthy", probeName) 54 | } 55 | } 56 | } 57 | 58 | if allGood { 59 | logrus.Info("All probes are healthy") 60 | break 61 | } 62 | 63 | probeStatuses = newProbeStatuses 64 | initial = false 65 | time.Sleep(interval) 66 | } 67 | 68 | return nil 69 | } 70 | -------------------------------------------------------------------------------- /pkg/rancher/cluster.go: -------------------------------------------------------------------------------- 1 | package rancher 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/sirupsen/logrus" 8 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/runtime/schema" 10 | "k8s.io/client-go/dynamic" 11 | "k8s.io/client-go/kubernetes" 12 | "k8s.io/client-go/tools/clientcmd" 13 | 14 | "github.com/rancher/rancherd/pkg/kubectl" 15 | ) 16 | 17 | const ( 18 | rancherSettingInternalServerURL = "internal-server-url" 19 | rancherSettingInternalCACerts = "internal-cacerts" 20 | clusterClientSecret = "local-kubeconfig" 21 | clusterNamespace = "fleet-local" 22 | ) 23 | 24 | type Options struct { 25 | Kubeconfig string 26 | } 27 | 28 | // Update cluster client secret (fleet-local/local-kubeconfig): 29 | // apiServerURL: value of Rancher setting "internal-server-url" 30 | // apiServerCA: value of Rancher setting "internal-cacerts" 31 | // Fleet needs these values to be set after Rancher v2.7.5 to provision a local cluster 32 | func UpdateClientSecret(ctx context.Context, opts *Options) error { 33 | if opts == nil { 34 | opts = &Options{} 35 | } 36 | 37 | kubeconfig, err := kubectl.GetKubeconfig(opts.Kubeconfig) 38 | if err != nil { 39 | return err 40 | } 41 | 42 | conf, err := clientcmd.BuildConfigFromFlags("", kubeconfig) 43 | if err != nil { 44 | return err 45 | } 46 | 47 | client := dynamic.NewForConfigOrDie(conf) 48 | settingClient := client.Resource(schema.GroupVersionResource{ 49 | Group: "management.cattle.io", 50 | Version: "v3", 51 | Resource: "settings", 52 | }) 53 | 54 | internalServerURLSetting, err := settingClient.Get(ctx, rancherSettingInternalServerURL, v1.GetOptions{}) 55 | if err != nil { 56 | return err 57 | } 58 | internalServerURL := internalServerURLSetting.Object["value"].(string) 59 | logrus.Infof("Rancher setting %s is %q", rancherSettingInternalServerURL, internalServerURL) 60 | 61 | internalCACertSetting, err := settingClient.Get(ctx, rancherSettingInternalCACerts, v1.GetOptions{}) 62 | if err != nil { 63 | return err 64 | } 65 | internalCACerts := internalCACertSetting.Object["value"].(string) 66 | logrus.Infof("Rancher setting %s is %q", rancherSettingInternalCACerts, internalCACerts) 67 | 68 | if internalServerURL == "" || internalCACerts == "" { 69 | return fmt.Errorf("both %s and %s settings must be configured", rancherSettingInternalCACerts, rancherSettingInternalCACerts) 70 | } 71 | 72 | k8s, err := kubernetes.NewForConfig(conf) 73 | if err != nil { 74 | return err 75 | } 76 | 77 | secret, err := k8s.CoreV1().Secrets(clusterNamespace).Get(ctx, clusterClientSecret, v1.GetOptions{}) 78 | if err != nil { 79 | return err 80 | } 81 | 82 | toUpdate := secret.DeepCopy() 83 | toUpdate.Data["apiServerURL"] = []byte(internalServerURL) 84 | toUpdate.Data["apiServerCA"] = []byte(internalCACerts) 85 | _, err = k8s.CoreV1().Secrets(clusterNamespace).Update(ctx, toUpdate, v1.UpdateOptions{}) 86 | 87 | if err == nil { 88 | fmt.Println("Cluster client secret is updated.") 89 | } 90 | 91 | return err 92 | } 93 | -------------------------------------------------------------------------------- /pkg/rancher/run.go: -------------------------------------------------------------------------------- 1 | package rancher 2 | 3 | import ( 4 | "encoding/base64" 5 | "fmt" 6 | 7 | "github.com/rancher/rancherd/pkg/config" 8 | "github.com/rancher/rancherd/pkg/images" 9 | "github.com/rancher/rancherd/pkg/kubectl" 10 | "github.com/rancher/system-agent/pkg/applyinator" 11 | "github.com/rancher/wrangler/pkg/data" 12 | "sigs.k8s.io/yaml" 13 | ) 14 | 15 | var defaultValues = map[string]interface{}{ 16 | "ingress": map[string]interface{}{ 17 | "enabled": false, 18 | }, 19 | "features": "multi-cluster-management=false", 20 | "antiAffinity": "required", 21 | "replicas": -3, 22 | "tls": "external", 23 | "hostPort": 8443, 24 | } 25 | 26 | func GetRancherValues(dataDir string) string { 27 | return fmt.Sprintf("%s/rancher/values.yaml", dataDir) 28 | } 29 | 30 | func ToFile(cfg *config.Config, dataDir string) (*applyinator.File, error) { 31 | values := data.MergeMaps(defaultValues, map[string]interface{}{ 32 | "systemDefaultRegistry": cfg.SystemDefaultRegistry, 33 | }) 34 | values = data.MergeMaps(values, cfg.RancherValues) 35 | 36 | data, err := yaml.Marshal(values) 37 | if err != nil { 38 | return nil, fmt.Errorf("marshalling Rancher values.yaml: %w", err) 39 | } 40 | 41 | return &applyinator.File{ 42 | Content: base64.StdEncoding.EncodeToString(data), 43 | Path: GetRancherValues(dataDir), 44 | }, nil 45 | } 46 | 47 | func ToInstruction(imageOverride, systemDefaultRegistry, k8sVersion, rancherVersion, dataDir string) (*applyinator.Instruction, error) { 48 | return &applyinator.Instruction{ 49 | Name: "rancher", 50 | SaveOutput: true, 51 | Image: images.GetRancherInstallerImage(imageOverride, systemDefaultRegistry, rancherVersion), 52 | Env: append(kubectl.Env(k8sVersion), fmt.Sprintf("RANCHER_VALUES=%s", GetRancherValues(dataDir))), 53 | }, nil 54 | } 55 | 56 | func ToUpgradeInstruction(imageOverride, systemDefaultRegistry, k8sVersion, rancherVersion, _ string) (*applyinator.Instruction, error) { 57 | return &applyinator.Instruction{ 58 | Name: "rancher", 59 | SaveOutput: true, 60 | Image: images.GetRancherInstallerImage(imageOverride, systemDefaultRegistry, rancherVersion), 61 | Env: kubectl.Env(k8sVersion), 62 | }, nil 63 | } 64 | -------------------------------------------------------------------------------- /pkg/rancher/wait.go: -------------------------------------------------------------------------------- 1 | package rancher 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/rancher/system-agent/pkg/applyinator" 8 | 9 | "github.com/rancher/rancherd/pkg/kubectl" 10 | "github.com/rancher/rancherd/pkg/self" 11 | ) 12 | 13 | func ToWaitRancherInstruction(_, _, k8sVersion string) (*applyinator.Instruction, error) { 14 | cmd, err := self.Self() 15 | if err != nil { 16 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 17 | } 18 | return &applyinator.Instruction{ 19 | Name: "wait-rancher", 20 | SaveOutput: true, 21 | Args: []string{"retry", kubectl.Command(k8sVersion), "-n", "cattle-system", "rollout", "status", "-w", "deploy/rancher"}, 22 | Env: kubectl.Env(k8sVersion), 23 | Command: cmd, 24 | }, nil 25 | } 26 | 27 | func ToWaitRancherWebhookInstruction(_, _, k8sVersion string) (*applyinator.Instruction, error) { 28 | cmd, err := self.Self() 29 | if err != nil { 30 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 31 | } 32 | return &applyinator.Instruction{ 33 | Name: "wait-rancher-webhook", 34 | SaveOutput: true, 35 | Args: []string{"retry", kubectl.Command(k8sVersion), "-n", "cattle-system", "rollout", "status", "-w", "deploy/rancher-webhook"}, 36 | Env: kubectl.Env(k8sVersion), 37 | Command: cmd, 38 | }, nil 39 | } 40 | 41 | func ToWaitSUCInstruction(_, _, k8sVersion string) (*applyinator.Instruction, error) { 42 | cmd, err := self.Self() 43 | if err != nil { 44 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 45 | } 46 | return &applyinator.Instruction{ 47 | Name: "wait-system-upgrade-controller", 48 | SaveOutput: true, 49 | Args: []string{"retry", kubectl.Command(k8sVersion), "-n", "cattle-system", "rollout", "status", "-w", "deploy/system-upgrade-controller"}, 50 | Env: kubectl.Env(k8sVersion), 51 | Command: cmd, 52 | }, nil 53 | } 54 | 55 | func ToWaitSUCPlanInstruction(_, _, k8sVersion string) (*applyinator.Instruction, error) { 56 | cmd, err := self.Self() 57 | if err != nil { 58 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 59 | } 60 | return &applyinator.Instruction{ 61 | Name: "wait-suc-plan-resolved", 62 | SaveOutput: true, 63 | Args: []string{"retry", kubectl.Command(k8sVersion), "-n", "cattle-system", "wait", 64 | "--for=condition=LatestResolved=true", "plans.upgrade.cattle.io", "system-agent-upgrader"}, 65 | Env: kubectl.Env(k8sVersion), 66 | Command: cmd, 67 | }, nil 68 | } 69 | 70 | func ToWaitClusterClientSecretInstruction(_, _, k8sVersion string) (*applyinator.Instruction, error) { 71 | cmd, err := self.Self() 72 | if err != nil { 73 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 74 | } 75 | return &applyinator.Instruction{ 76 | Name: "wait-cluster-client-secret-resolved", 77 | SaveOutput: true, 78 | Args: []string{"retry", kubectl.Command(k8sVersion), "-n", clusterNamespace, "get", 79 | "secret", clusterClientSecret}, 80 | Env: kubectl.Env(k8sVersion), 81 | Command: cmd, 82 | }, nil 83 | } 84 | 85 | func ToUpdateClientSecretInstruction(_, _, k8sVersion string) (*applyinator.Instruction, error) { 86 | cmd, err := self.Self() 87 | if err != nil { 88 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 89 | } 90 | return &applyinator.Instruction{ 91 | Name: "update-client-secret", 92 | SaveOutput: true, 93 | Args: []string{"update-client-secret"}, 94 | Env: kubectl.Env(k8sVersion), 95 | Command: cmd, 96 | }, nil 97 | } 98 | 99 | func ToScaleDownFleetControllerInstruction(_, _, k8sVersion string) (*applyinator.Instruction, error) { 100 | cmd, err := self.Self() 101 | if err != nil { 102 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 103 | } 104 | return &applyinator.Instruction{ 105 | Name: "scale-down-fleet-controller", 106 | SaveOutput: true, 107 | Args: []string{"retry", kubectl.Command(k8sVersion), "-n", "cattle-fleet-system", "scale", "--replicas", "0", "deploy/fleet-controller"}, 108 | Env: kubectl.Env(k8sVersion), 109 | Command: cmd, 110 | }, nil 111 | } 112 | 113 | func ToScaleUpFleetControllerInstruction(_, _, k8sVersion string) (*applyinator.Instruction, error) { 114 | cmd, err := self.Self() 115 | if err != nil { 116 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 117 | } 118 | return &applyinator.Instruction{ 119 | Name: "scale-up-fleet-controller", 120 | SaveOutput: true, 121 | Args: []string{"retry", kubectl.Command(k8sVersion), "-n", "cattle-fleet-system", "scale", "--replicas", "1", "deploy/fleet-controller"}, 122 | Env: kubectl.Env(k8sVersion), 123 | Command: cmd, 124 | }, nil 125 | } 126 | 127 | func ToDeleteRancherWebhookValidationConfiguration(k8sVersion string) (*applyinator.Instruction, error) { 128 | cmd, err := self.Self() 129 | if err != nil { 130 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 131 | } 132 | return &applyinator.Instruction{ 133 | Name: "delete-rancher-webhook-validation-configuration", 134 | SaveOutput: true, 135 | Args: []string{"retry", kubectl.Command(k8sVersion), "delete", "validatingwebhookconfiguration", "rancher.cattle.io"}, 136 | Env: kubectl.Env(k8sVersion), 137 | Command: cmd, 138 | }, nil 139 | } 140 | 141 | func ToRestartRancherWebhookInstruction(k8sVersion string) (*applyinator.Instruction, error) { 142 | cmd, err := self.Self() 143 | if err != nil { 144 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 145 | } 146 | return &applyinator.Instruction{ 147 | Name: "wait-rancher-webhook", 148 | SaveOutput: true, 149 | Args: []string{"retry", kubectl.Command(k8sVersion), "-n", "cattle-system", "rollout", "restart", "deploy/rancher-webhook"}, 150 | Env: kubectl.Env(k8sVersion), 151 | Command: cmd, 152 | }, nil 153 | } 154 | 155 | func ToCreateStvAggregationSecret(k8sVersion string) (*applyinator.Instruction, error) { 156 | cmd, err := self.Self() 157 | if err != nil { 158 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 159 | } 160 | return &applyinator.Instruction{ 161 | Name: "create-stv-aggregation-secret", 162 | SaveOutput: true, 163 | Args: []string{"retry", kubectl.Command(k8sVersion), "create", "secret", "generic", "-n", "cattle-system", "stv-aggregation"}, 164 | Env: kubectl.Env(k8sVersion), 165 | Command: cmd, 166 | }, nil 167 | } 168 | 169 | // Needs to patch status subresource 170 | // k patch cluster.provisioning local -n fleet-local --subresource=status --type=merge --patch '{"status":{"fleetWorkspaceName": "fleet-local"}}' 171 | func PatchLocalProvisioningClusterStatus(_, _, k8sVersion string) (*applyinator.Instruction, error) { 172 | cmd, err := self.Self() 173 | if err != nil { 174 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 175 | } 176 | return &applyinator.Instruction{ 177 | Name: "patch-provisioning-cluster-status", 178 | SaveOutput: true, 179 | Args: []string{"retry", kubectl.Command(k8sVersion), "-n", "fleet-local", "patch", "cluster.provisioning", "local", "--subresource=status", "--type=merge", "--patch", "{\"status\":{\"fleetWorkspaceName\": \"fleet-local\"}}"}, 180 | Env: kubectl.Env(k8sVersion), 181 | Command: cmd, 182 | }, nil 183 | } 184 | -------------------------------------------------------------------------------- /pkg/rancherd/rancher.go: -------------------------------------------------------------------------------- 1 | package rancherd 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "path/filepath" 8 | "time" 9 | 10 | "github.com/rancher/rancherd/pkg/config" 11 | "github.com/rancher/rancherd/pkg/plan" 12 | "github.com/rancher/rancherd/pkg/version" 13 | "github.com/rancher/rancherd/pkg/versions" 14 | "github.com/sirupsen/logrus" 15 | "sigs.k8s.io/yaml" 16 | ) 17 | 18 | const ( 19 | // DefaultDataDir is the location of all state for rancherd 20 | DefaultDataDir = "/var/lib/rancher/rancherd" 21 | // DefaultConfigFile is the location of the rancherd config 22 | DefaultConfigFile = "/etc/rancher/rancherd/config.yaml" 23 | ) 24 | 25 | type Config struct { 26 | Force bool 27 | DataDir string 28 | ConfigPath string 29 | } 30 | 31 | type UpgradeConfig struct { 32 | RancherVersion string 33 | KubernetesVersion string 34 | RancherOSVersion string 35 | Force bool 36 | } 37 | 38 | type Rancherd struct { 39 | cfg Config 40 | } 41 | 42 | func New(cfg Config) *Rancherd { 43 | return &Rancherd{ 44 | cfg: cfg, 45 | } 46 | } 47 | 48 | func (r *Rancherd) Info(ctx context.Context) error { 49 | rancherVersion, k8sVersion, rancherOSVersion := r.getExistingVersions(ctx) 50 | 51 | fmt.Printf(" Rancher: %s\n", rancherVersion) 52 | fmt.Printf(" Kubernetes: %s\n", k8sVersion) 53 | if rancherOSVersion != "" { 54 | fmt.Printf(" RancherOS: %s\n", rancherOSVersion) 55 | } 56 | fmt.Printf(" Rancherd: %s\n\n", version.FriendlyVersion()) 57 | return nil 58 | } 59 | 60 | func (r *Rancherd) Upgrade(ctx context.Context, upgradeConfig UpgradeConfig) error { 61 | cfg, err := config.Load(r.cfg.ConfigPath) 62 | if err != nil { 63 | return fmt.Errorf("loading config: %w", err) 64 | } 65 | 66 | rancherVersion, err := versions.RancherVersion(upgradeConfig.RancherVersion) 67 | if err != nil { 68 | return err 69 | } 70 | 71 | k8sVersion, err := versions.K8sVersion(upgradeConfig.KubernetesVersion) 72 | if err != nil { 73 | return err 74 | } 75 | 76 | rancherOSVersion, err := versions.RancherOSVersion(upgradeConfig.RancherOSVersion) 77 | if err != nil { 78 | return err 79 | } 80 | 81 | existingRancherVersion, existingK8sVersion, existingRancherOSVersion := r.getExistingVersions(ctx) 82 | if existingRancherVersion == rancherVersion && 83 | existingK8sVersion == k8sVersion && 84 | (existingRancherOSVersion == "" || existingRancherOSVersion == rancherOSVersion) { 85 | fmt.Printf("\nNothing to upgrade:\n\n") 86 | fmt.Printf(" Rancher: %s\n", rancherVersion) 87 | if existingRancherOSVersion != "" { 88 | fmt.Printf(" RancherOS: %s\n", rancherOSVersion) 89 | } 90 | fmt.Printf(" Kubernetes: %s\n\n", k8sVersion) 91 | return nil 92 | } 93 | 94 | if existingRancherVersion == rancherVersion { 95 | rancherVersion = "" 96 | } 97 | if existingK8sVersion == k8sVersion { 98 | k8sVersion = "" 99 | } 100 | if existingRancherOSVersion == "" || existingRancherOSVersion == rancherOSVersion { 101 | rancherOSVersion = "" 102 | } 103 | 104 | if k8sVersion != "" && existingK8sVersion != "" { 105 | existingRuntime := config.GetRuntime(existingK8sVersion) 106 | newRuntime := config.GetRuntime(k8sVersion) 107 | if existingRuntime != newRuntime { 108 | return fmt.Errorf("existing %s version %s is not compatible with %s version %s", 109 | existingRuntime, existingK8sVersion, newRuntime, k8sVersion) 110 | } 111 | } 112 | 113 | fmt.Printf("\nUpgrading to:\n\n") 114 | if rancherVersion != "" { 115 | fmt.Printf(" Rancher: %s => %s\n", existingRancherVersion, rancherVersion) 116 | } 117 | if k8sVersion != "" { 118 | fmt.Printf(" Kubernetes: %s => %s\n", existingK8sVersion, k8sVersion) 119 | } 120 | if rancherOSVersion != "" { 121 | fmt.Printf(" RancherOS: %s => %s\n", existingRancherOSVersion, rancherOSVersion) 122 | } 123 | 124 | if !r.cfg.Force { 125 | go func() { 126 | <-ctx.Done() 127 | logrus.Fatalf("Aborting") 128 | }() 129 | 130 | fmt.Printf("\nPress any key to continue, or CTRL+C to cancel\n") 131 | _, err := os.Stdin.Read(make([]byte, 1)) 132 | if err != nil { 133 | return err 134 | } 135 | } 136 | 137 | nodePlan, err := plan.Upgrade(&cfg, k8sVersion, rancherVersion, rancherOSVersion, DefaultDataDir) 138 | if err != nil { 139 | return err 140 | } 141 | 142 | return plan.RunWithKubernetesVersion(ctx, k8sVersion, nodePlan, DefaultDataDir) 143 | } 144 | 145 | func (r *Rancherd) execute(ctx context.Context) error { 146 | cfg, err := config.Load(r.cfg.ConfigPath) 147 | if err != nil { 148 | return fmt.Errorf("loading config: %w", err) 149 | } 150 | 151 | if err := r.setWorking(cfg); err != nil { 152 | return fmt.Errorf("saving working config to %s: %w", r.WorkingStamp(), err) 153 | } 154 | 155 | if cfg.Role == "" { 156 | logrus.Infof("No role defined, skipping bootstrap") 157 | return nil 158 | } 159 | 160 | k8sVersion, err := versions.K8sVersion(cfg.KubernetesVersion) 161 | if err != nil { 162 | return err 163 | } 164 | 165 | rancherVersion, err := versions.RancherVersion(cfg.RancherVersion) 166 | if err != nil { 167 | return err 168 | } 169 | 170 | logrus.Infof("Bootstrapping Rancher (%s/%s)", rancherVersion, k8sVersion) 171 | 172 | nodePlan, err := plan.ToPlan(ctx, &cfg, r.cfg.DataDir) 173 | if err != nil { 174 | return fmt.Errorf("generating plan: %w", err) 175 | } 176 | 177 | if err := plan.Run(ctx, &cfg, nodePlan, r.cfg.DataDir); err != nil { 178 | return fmt.Errorf("running plan: %w", err) 179 | } 180 | 181 | if err := r.setDone(cfg); err != nil { 182 | return err 183 | } 184 | 185 | logrus.Infof("Successfully Bootstrapped Rancher (%s/%s)", rancherVersion, k8sVersion) 186 | return nil 187 | } 188 | 189 | func (r *Rancherd) Run(ctx context.Context) error { 190 | if done, err := r.done(); err != nil { 191 | return fmt.Errorf("checking done stamp [%s]: %w", r.DoneStamp(), err) 192 | } else if done { 193 | logrus.Infof("System is already bootstrapped. To force the system to be bootstrapped again run with the --force flag") 194 | return nil 195 | } 196 | 197 | for { 198 | err := r.execute(ctx) 199 | if err == nil { 200 | return nil 201 | } 202 | logrus.Infof("failed to bootstrap system, will retry: %v", err) 203 | select { 204 | case <-ctx.Done(): 205 | return ctx.Err() 206 | case <-time.After(15 * time.Second): 207 | } 208 | } 209 | } 210 | 211 | func (r *Rancherd) writeConfig(path string, cfg config.Config) error { 212 | if err := os.MkdirAll(filepath.Dir(path), 0600); err != nil { 213 | return fmt.Errorf("mkdir %s: %w", filepath.Dir(path), err) 214 | } 215 | f, err := os.Create(path) 216 | if err != nil { 217 | return err 218 | } 219 | defer f.Close() 220 | data, err := yaml.Marshal(cfg) 221 | if err != nil { 222 | return err 223 | } 224 | _, err = f.Write(data) 225 | return err 226 | } 227 | 228 | func (r *Rancherd) setWorking(cfg config.Config) error { 229 | return r.writeConfig(r.WorkingStamp(), cfg) 230 | } 231 | 232 | func (r *Rancherd) setDone(cfg config.Config) error { 233 | return r.writeConfig(r.DoneStamp(), cfg) 234 | } 235 | 236 | func (r *Rancherd) done() (bool, error) { 237 | if r.cfg.Force { 238 | _ = os.Remove(r.DoneStamp()) 239 | return false, nil 240 | } 241 | _, err := os.Stat(r.DoneStamp()) 242 | if err == nil { 243 | return true, nil 244 | } else if os.IsNotExist(err) { 245 | return false, nil 246 | } 247 | return false, err 248 | } 249 | 250 | func (r *Rancherd) DoneStamp() string { 251 | return filepath.Join(r.cfg.DataDir, "bootstrapped") 252 | } 253 | 254 | func (r *Rancherd) WorkingStamp() string { 255 | return filepath.Join(r.cfg.DataDir, "working") 256 | } 257 | -------------------------------------------------------------------------------- /pkg/rancherd/versions.go: -------------------------------------------------------------------------------- 1 | package rancherd 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "compress/gzip" 7 | "context" 8 | "encoding/base64" 9 | "encoding/json" 10 | "io/ioutil" 11 | "runtime" 12 | "strings" 13 | 14 | "github.com/rancher/rancherd/pkg/kubectl" 15 | data2 "github.com/rancher/wrangler/pkg/data" 16 | "github.com/rancher/wrangler/pkg/data/convert" 17 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 18 | "k8s.io/client-go/kubernetes" 19 | "k8s.io/client-go/tools/clientcmd" 20 | ) 21 | 22 | func (r *Rancherd) getExistingVersions(ctx context.Context) (rancherVersion, k8sVersion, rancherOSVersion string) { 23 | kubeConfig, err := kubectl.GetKubeconfig("") 24 | if err != nil { 25 | return "", "", "" 26 | } 27 | 28 | data, err := ioutil.ReadFile(kubeConfig) 29 | if err != nil { 30 | return "", "", "" 31 | } 32 | 33 | restConfig, err := clientcmd.RESTConfigFromKubeConfig(data) 34 | if err != nil { 35 | return "", "", "" 36 | } 37 | 38 | k8s, err := kubernetes.NewForConfig(restConfig) 39 | if err != nil { 40 | return "", "", "" 41 | } 42 | 43 | return getRancherVersion(ctx, k8s), getK8sVersion(ctx, k8s), getRancherOSVersion() 44 | } 45 | 46 | func getRancherVersion(ctx context.Context, k8s kubernetes.Interface) string { 47 | secrets, err := k8s.CoreV1().Secrets("cattle-system").List(ctx, metav1.ListOptions{ 48 | LabelSelector: "name=rancher,status=deployed", 49 | }) 50 | if err != nil || len(secrets.Items) == 0 { 51 | return "" 52 | } 53 | 54 | data, err := base64.StdEncoding.DecodeString(string(secrets.Items[0].Data["release"])) 55 | if err != nil { 56 | return "" 57 | } 58 | 59 | gz, err := gzip.NewReader(bytes.NewReader(data)) 60 | if err != nil { 61 | return "" 62 | } 63 | 64 | release := map[string]interface{}{} 65 | if err := json.NewDecoder(gz).Decode(&release); err != nil { 66 | return "" 67 | } 68 | 69 | version := convert.ToString(data2.GetValueN(release, "chart", "metadata", "version")) 70 | if version == "" { 71 | return "" 72 | } 73 | 74 | return "v" + version 75 | } 76 | 77 | func getK8sVersion(ctx context.Context, k8s kubernetes.Interface) string { 78 | nodes, err := k8s.CoreV1().Nodes().List(ctx, metav1.ListOptions{ 79 | LabelSelector: "node-role.kubernetes.io/control-plane=true", 80 | }) 81 | if err != nil || len(nodes.Items) == 0 { 82 | return "" 83 | } 84 | return nodes.Items[0].Status.NodeInfo.KubeletVersion 85 | } 86 | 87 | func getRancherOSVersion() string { 88 | data, err := ioutil.ReadFile("/usr/lib/rancheros-release") 89 | if err != nil { 90 | return "" 91 | } 92 | 93 | scan := bufio.NewScanner(bytes.NewBuffer(data)) 94 | for scan.Scan() { 95 | if strings.HasPrefix(scan.Text(), "IMAGE=") { 96 | return strings.TrimSuffix(strings.TrimPrefix(scan.Text(), "IMAGE="), "-"+runtime.GOARCH) 97 | } 98 | } 99 | return "" 100 | } 101 | -------------------------------------------------------------------------------- /pkg/registry/registry.go: -------------------------------------------------------------------------------- 1 | package registry 2 | 3 | import ( 4 | "encoding/base64" 5 | "fmt" 6 | 7 | "github.com/rancher/rancherd/pkg/config" 8 | "github.com/rancher/system-agent/pkg/applyinator" 9 | "github.com/rancher/wharfie/pkg/registries" 10 | "sigs.k8s.io/yaml" 11 | ) 12 | 13 | func ToFile(registry *registries.Registry, runtime config.Runtime) (*applyinator.File, error) { 14 | if registry == nil { 15 | return nil, nil 16 | } 17 | 18 | data, err := yaml.Marshal(registry) 19 | if err != nil { 20 | return nil, err 21 | } 22 | 23 | return &applyinator.File{ 24 | Content: base64.StdEncoding.EncodeToString(data), 25 | Path: GetConfigFile(runtime), 26 | Permissions: "0400", 27 | }, nil 28 | 29 | } 30 | 31 | func GetConfigFile(runtime config.Runtime) string { 32 | return fmt.Sprintf("/etc/rancher/%s/registries.yaml", runtime) 33 | } 34 | -------------------------------------------------------------------------------- /pkg/resources/resources.go: -------------------------------------------------------------------------------- 1 | package resources 2 | 3 | import ( 4 | "encoding/base64" 5 | "fmt" 6 | "io/ioutil" 7 | "os" 8 | "strings" 9 | 10 | v1 "github.com/rancher/rancher/pkg/apis/rke.cattle.io/v1" 11 | "github.com/rancher/system-agent/pkg/applyinator" 12 | "github.com/rancher/wrangler/pkg/data/convert" 13 | "github.com/rancher/wrangler/pkg/randomtoken" 14 | "github.com/rancher/wrangler/pkg/yaml" 15 | "github.com/sirupsen/logrus" 16 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 17 | "k8s.io/apimachinery/pkg/runtime" 18 | 19 | "github.com/rancher/rancherd/pkg/config" 20 | "github.com/rancher/rancherd/pkg/images" 21 | "github.com/rancher/rancherd/pkg/kubectl" 22 | "github.com/rancher/rancherd/pkg/self" 23 | "github.com/rancher/rancherd/pkg/versions" 24 | ) 25 | 26 | const ( 27 | localRKEStateSecretType = "rke.cattle.io/cluster-state" 28 | ) 29 | 30 | func writeCattleID(id string) error { 31 | if err := os.MkdirAll("/etc/rancher", 0755); err != nil { 32 | return fmt.Errorf("mkdir /etc/rancher: %w", err) 33 | } 34 | if err := os.MkdirAll("/etc/rancher/agent", 0700); err != nil { 35 | return fmt.Errorf("mkdir /etc/rancher/agent: %w", err) 36 | } 37 | return ioutil.WriteFile("/etc/rancher/agent/cattle-id", []byte(id), 0400) 38 | } 39 | 40 | func getCattleID() (string, error) { 41 | data, err := ioutil.ReadFile("/etc/rancher/agent/cattle-id") 42 | if err != nil && !os.IsNotExist(err) { 43 | return "", err 44 | } 45 | id := strings.TrimSpace(string(data)) 46 | if id == "" { 47 | id, err = randomtoken.Generate() 48 | if err != nil { 49 | return "", err 50 | } 51 | return id, writeCattleID(id) 52 | } 53 | return id, nil 54 | } 55 | 56 | func ToBootstrapFile(config *config.Config, path string) (*applyinator.File, error) { 57 | nodeName := config.NodeName 58 | if nodeName == "" { 59 | hostname, err := os.Hostname() 60 | if err != nil { 61 | return nil, fmt.Errorf("looking up hostname: %w", err) 62 | } 63 | nodeName = strings.Split(hostname, ".")[0] 64 | } 65 | 66 | k8sVersion, err := versions.K8sVersion(config.KubernetesVersion) 67 | if err != nil { 68 | return nil, err 69 | } 70 | 71 | token := config.Token 72 | if token == "" { 73 | token, err = randomtoken.Generate() 74 | if err != nil { 75 | return nil, err 76 | } 77 | } 78 | 79 | resources := config.Resources 80 | return ToFile(append(resources, v1.GenericMap{ 81 | Data: map[string]interface{}{ 82 | "kind": "Node", 83 | "apiVersion": "v1", 84 | "metadata": map[string]interface{}{ 85 | "name": nodeName, 86 | "labels": map[string]interface{}{ 87 | "node-role.kubernetes.io/etcd": "true", 88 | }, 89 | }, 90 | }, 91 | }, v1.GenericMap{ 92 | Data: map[string]interface{}{ 93 | "kind": "Namespace", 94 | "apiVersion": "v1", 95 | "metadata": map[string]interface{}{ 96 | "name": "fleet-local", 97 | }, 98 | }, 99 | }, v1.GenericMap{ 100 | Data: map[string]interface{}{ 101 | "kind": "Cluster", 102 | "apiVersion": "provisioning.cattle.io/v1", 103 | "metadata": map[string]interface{}{ 104 | "name": "local", 105 | "namespace": "fleet-local", 106 | "labels": map[string]interface{}{ 107 | "provisioning.cattle.io/management-cluster-name": "local", 108 | }, 109 | }, 110 | "spec": map[string]interface{}{ 111 | "kubernetesVersion": k8sVersion, 112 | // Rancher needs a non-null rkeConfig to apply system-upgrade-controller managed chart. 113 | "rkeConfig": map[string]interface{}{}, 114 | }, 115 | }, 116 | }, v1.GenericMap{ 117 | Data: map[string]interface{}{ 118 | "kind": "Secret", 119 | "apiVersion": "v1", 120 | "metadata": map[string]interface{}{ 121 | "name": "local-rke-state", 122 | "namespace": "fleet-local", 123 | }, 124 | "type": localRKEStateSecretType, 125 | "data": map[string]interface{}{ 126 | "serverToken": []byte(token), 127 | "agentToken": []byte(token), 128 | }, 129 | }, 130 | }, v1.GenericMap{ 131 | Data: map[string]interface{}{ 132 | "kind": "ClusterRegistrationToken", 133 | "apiVersion": "management.cattle.io/v3", 134 | "metadata": map[string]interface{}{ 135 | "name": "default-token", 136 | "namespace": "local", 137 | }, 138 | "spec": map[string]interface{}{ 139 | "clusterName": "local", 140 | }, 141 | "status": map[string]interface{}{ 142 | "token": token, 143 | }, 144 | }, 145 | }, v1.GenericMap{ 146 | Data: map[string]interface{}{ 147 | "apiVersion": "catalog.cattle.io/v1", 148 | "kind": "ClusterRepo", 149 | "metadata": map[string]interface{}{ 150 | "name": "rancher-stable", 151 | }, 152 | "spec": map[string]interface{}{ 153 | "url": "https://releases.rancher.com/server-charts/stable", 154 | }, 155 | }, 156 | }), path) 157 | } 158 | 159 | func ToHarvesterClusterRepoFile(path string) (*applyinator.File, error) { 160 | file := "/usr/share/rancher/rancherd/config.yaml.d/91-harvester-bootstrap-repo.yaml" 161 | bytes, err := os.ReadFile(file) 162 | if err != nil && !os.IsNotExist(err) { 163 | return nil, err 164 | } 165 | 166 | logrus.Infof("Loading config file [%s]", file) 167 | values := map[string]interface{}{} 168 | if err := yaml.Unmarshal(bytes, &values); err != nil { 169 | return nil, err 170 | } 171 | 172 | result := config.Config{} 173 | convert.ToObj(values, &result) 174 | 175 | resources := []v1.GenericMap{} 176 | for _, resource := range result.Resources { 177 | if resource.Data["kind"] == "Deployment" || resource.Data["kind"] == "Service" { 178 | resources = append(resources, resource) 179 | } 180 | } 181 | return ToFile(resources, path) 182 | } 183 | 184 | func ToFile(resources []v1.GenericMap, path string) (*applyinator.File, error) { 185 | if len(resources) == 0 { 186 | return nil, nil 187 | } 188 | 189 | var objs []runtime.Object 190 | for _, resource := range resources { 191 | objs = append(objs, &unstructured.Unstructured{ 192 | Object: resource.Data, 193 | }) 194 | } 195 | 196 | data, err := yaml.ToBytes(objs) 197 | if err != nil { 198 | return nil, err 199 | } 200 | 201 | return &applyinator.File{ 202 | Content: base64.StdEncoding.EncodeToString(data), 203 | Path: path, 204 | }, nil 205 | } 206 | 207 | func GetBootstrapManifests(dataDir string) string { 208 | return fmt.Sprintf("%s/bootstrapmanifests/rancherd.yaml", dataDir) 209 | } 210 | 211 | func ToInstruction(imageOverride, systemDefaultRegistry, k8sVersion, dataDir string) (*applyinator.Instruction, error) { 212 | bootstrap := GetBootstrapManifests(dataDir) 213 | cmd, err := self.Self() 214 | if err != nil { 215 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 216 | } 217 | return &applyinator.Instruction{ 218 | Name: "bootstrap", 219 | SaveOutput: true, 220 | Image: images.GetInstallerImage(imageOverride, systemDefaultRegistry, k8sVersion), 221 | Args: []string{"retry", kubectl.Command(k8sVersion), "apply", "--validate=false", "-f", bootstrap}, 222 | Command: cmd, 223 | Env: kubectl.Env(k8sVersion), 224 | }, nil 225 | } 226 | 227 | func GetHarvesterClusterRepoManifests(dataDir string) string { 228 | return fmt.Sprintf("%s/bootstrapmanifests/harvester-cluster-repo.yaml", dataDir) 229 | } 230 | 231 | func ToHarvesterClusterRepoInstruction(imageOverride, systemDefaultRegistry, k8sVersion, dataDir string) (*applyinator.Instruction, error) { 232 | bootstrap := GetHarvesterClusterRepoManifests(dataDir) 233 | cmd, err := self.Self() 234 | if err != nil { 235 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 236 | } 237 | return &applyinator.Instruction{ 238 | Name: "harvester-cluster-repo", 239 | SaveOutput: true, 240 | Image: images.GetInstallerImage(imageOverride, systemDefaultRegistry, k8sVersion), 241 | Args: []string{"retry", kubectl.Command(k8sVersion), "apply", "--validate=false", "-f", bootstrap}, 242 | Command: cmd, 243 | Env: kubectl.Env(k8sVersion), 244 | }, nil 245 | } 246 | 247 | func ToWaitHarvesterClusterRepoInstruction(k8sVersion string) (*applyinator.Instruction, error) { 248 | cmd, err := self.Self() 249 | if err != nil { 250 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 251 | } 252 | return &applyinator.Instruction{ 253 | Name: "wait-harvester-cluster-repo", 254 | SaveOutput: true, 255 | Args: []string{"retry", kubectl.Command(k8sVersion), "-n", "cattle-system", "rollout", "status", "-w", "deploy/harvester-cluster-repo"}, 256 | Env: kubectl.Env(k8sVersion), 257 | Command: cmd, 258 | }, nil 259 | } 260 | -------------------------------------------------------------------------------- /pkg/retry/apply.go: -------------------------------------------------------------------------------- 1 | package retry 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "os/exec" 7 | "time" 8 | 9 | "github.com/sirupsen/logrus" 10 | ) 11 | 12 | func Retry(ctx context.Context, interval time.Duration, args []string) error { 13 | for { 14 | cmd := exec.Command(args[0], args[1:]...) 15 | cmd.Stdout = os.Stdout 16 | cmd.Stdin = os.Stdin 17 | cmd.Stderr = os.Stderr 18 | err := cmd.Run() 19 | if err != nil { 20 | logrus.Errorf("will retry failed command %v: %v", args, err) 21 | select { 22 | case <-time.After(interval): 23 | continue 24 | case <-ctx.Done(): 25 | return ctx.Err() 26 | } 27 | } 28 | return nil 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /pkg/roles/role.go: -------------------------------------------------------------------------------- 1 | package roles 2 | 3 | import "strings" 4 | 5 | func IsEtcd(role string) bool { 6 | return strings.Contains(role, "server") || 7 | strings.Contains(role, "etcd") || 8 | strings.Contains(role, "cluster-init") 9 | } 10 | 11 | func IsControlPlane(role string) bool { 12 | return strings.Contains(role, "server") || 13 | strings.Contains(role, "cluster-init") || 14 | strings.Contains(role, "control-plane") || 15 | strings.Contains(role, "controlplane") 16 | } 17 | 18 | func IsWorker(role string) bool { 19 | return strings.Contains(role, "worker") || 20 | strings.Contains(role, "cluster-init") || 21 | strings.Contains(role, "agent") || 22 | strings.Contains(role, "server") 23 | } 24 | -------------------------------------------------------------------------------- /pkg/runtime/instruction.go: -------------------------------------------------------------------------------- 1 | package runtime 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | 8 | "github.com/rancher/rancherd/pkg/config" 9 | "github.com/rancher/rancherd/pkg/images" 10 | "github.com/rancher/rancherd/pkg/kubectl" 11 | "github.com/rancher/rancherd/pkg/self" 12 | "github.com/rancher/system-agent/pkg/applyinator" 13 | ) 14 | 15 | func ToInstruction(imageOverride string, systemDefaultRegistry string, k8sVersion string) (*applyinator.Instruction, error) { 16 | runtime := config.GetRuntime(k8sVersion) 17 | return &applyinator.Instruction{ 18 | Name: string(runtime), 19 | Env: []string{ 20 | "RESTART_STAMP=" + images.GetInstallerImage(imageOverride, systemDefaultRegistry, k8sVersion), 21 | }, 22 | Image: images.GetInstallerImage(imageOverride, systemDefaultRegistry, k8sVersion), 23 | SaveOutput: true, 24 | }, nil 25 | } 26 | 27 | func ToUpgradeInstruction(k8sVersion string) (*applyinator.Instruction, error) { 28 | cmd, err := self.Self() 29 | if err != nil { 30 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 31 | } 32 | patch, err := json.Marshal(map[string]interface{}{ 33 | "spec": map[string]interface{}{ 34 | "kubernetesVersion": k8sVersion, 35 | }, 36 | }) 37 | if err != nil { 38 | return nil, err 39 | } 40 | return &applyinator.Instruction{ 41 | Name: "patch-kubernetes-version", 42 | SaveOutput: true, 43 | Args: []string{"retry", kubectl.Command(k8sVersion), "--type=merge", "-n", "fleet-local", "patch", "clusters.provisioning.cattle.io", "local", "-p", string(patch)}, 44 | Env: kubectl.Env(k8sVersion), 45 | Command: cmd, 46 | }, nil 47 | } 48 | -------------------------------------------------------------------------------- /pkg/runtime/role.go: -------------------------------------------------------------------------------- 1 | package runtime 2 | 3 | import ( 4 | "encoding/base64" 5 | "encoding/json" 6 | "fmt" 7 | "strings" 8 | 9 | "github.com/rancher/rancherd/pkg/config" 10 | "github.com/rancher/system-agent/pkg/applyinator" 11 | "github.com/rancher/wrangler/pkg/data/convert" 12 | "sigs.k8s.io/yaml" 13 | ) 14 | 15 | var ( 16 | normalizeNames = map[string]string{ 17 | "tlsSans": "tls-san", 18 | "nodeName": "node-name", 19 | "internalAddress": "internal-address", 20 | "taints": "node-taint", 21 | "labels": "node-label", 22 | } 23 | ) 24 | 25 | func ToBootstrapFile(runtime config.Runtime) (*applyinator.File, error) { 26 | if runtime != config.RuntimeK3S { 27 | return nil, nil 28 | } 29 | data, err := json.Marshal(map[string]interface{}{ 30 | "cluster-init": "true", 31 | }) 32 | if err != nil { 33 | return nil, err 34 | } 35 | return &applyinator.File{ 36 | Content: base64.StdEncoding.EncodeToString(data), 37 | Path: GetRancherConfigLocation(runtime), 38 | }, nil 39 | } 40 | 41 | func ToFile(config *config.RuntimeConfig, runtime config.Runtime, clusterInit bool) (*applyinator.File, error) { 42 | data, err := ToConfig(config, clusterInit) 43 | if err != nil { 44 | return nil, err 45 | } 46 | return &applyinator.File{ 47 | Content: base64.StdEncoding.EncodeToString(data), 48 | Path: GetConfigLocation(runtime), 49 | }, nil 50 | } 51 | 52 | func ToConfig(config *config.RuntimeConfig, clusterInit bool) ([]byte, error) { 53 | configObjects := []interface{}{ 54 | config.ConfigValues, 55 | } 56 | 57 | if clusterInit { 58 | configObjects = append(configObjects, config) 59 | } 60 | 61 | result := map[string]interface{}{} 62 | for _, data := range configObjects { 63 | data, err := convert.EncodeToMap(data) 64 | if err != nil { 65 | return nil, err 66 | } 67 | delete(data, "extraConfig") 68 | delete(data, "role") 69 | for oldKey, newKey := range normalizeNames { 70 | value, ok := data[oldKey] 71 | if !ok { 72 | continue 73 | } 74 | delete(data, oldKey) 75 | data[newKey] = value 76 | } 77 | for k, v := range data { 78 | newKey := strings.ReplaceAll(convert.ToYAMLKey(k), "_", "-") 79 | result[newKey] = v 80 | } 81 | } 82 | 83 | return yaml.Marshal(result) 84 | } 85 | 86 | func GetConfigLocation(runtime config.Runtime) string { 87 | return fmt.Sprintf("/etc/rancher/%s/config.yaml.d/40-rancherd.yaml", runtime) 88 | } 89 | 90 | func GetRancherConfigLocation(runtime config.Runtime) string { 91 | return fmt.Sprintf("/etc/rancher/%s/config.yaml.d/50-rancher.yaml", runtime) 92 | } 93 | -------------------------------------------------------------------------------- /pkg/runtime/wait.go: -------------------------------------------------------------------------------- 1 | package runtime 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/rancher/rancherd/pkg/kubectl" 8 | "github.com/rancher/rancherd/pkg/self" 9 | "github.com/rancher/system-agent/pkg/applyinator" 10 | ) 11 | 12 | func ToWaitKubernetesInstruction(_, _, k8sVersion string) (*applyinator.Instruction, error) { 13 | cmd, err := self.Self() 14 | if err != nil { 15 | return nil, fmt.Errorf("resolving location of %s: %w", os.Args[0], err) 16 | } 17 | return &applyinator.Instruction{ 18 | Name: "wait-kubernetes-provisioned", 19 | SaveOutput: true, 20 | Args: []string{"retry", kubectl.Command(k8sVersion), "-n", "fleet-local", "wait", 21 | "--for=condition=Provisioned=true", "clusters.provisioning.cattle.io", "local"}, 22 | Env: kubectl.Env(k8sVersion), 23 | Command: cmd, 24 | }, nil 25 | } 26 | -------------------------------------------------------------------------------- /pkg/self/self.go: -------------------------------------------------------------------------------- 1 | package self 2 | 3 | import ( 4 | "os" 5 | "os/exec" 6 | "path/filepath" 7 | ) 8 | 9 | func Self() (string, error) { 10 | cmd := os.Args[0] 11 | if _, err := os.Stat(cmd); err == nil { 12 | return filepath.Abs(cmd) 13 | } 14 | cmd, err := exec.LookPath(cmd) 15 | if err != nil { 16 | return "", err 17 | } 18 | return filepath.Abs(cmd) 19 | } 20 | -------------------------------------------------------------------------------- /pkg/token/token.go: -------------------------------------------------------------------------------- 1 | package token 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rancher/rancherd/pkg/kubectl" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 9 | "k8s.io/apimachinery/pkg/runtime/schema" 10 | "k8s.io/client-go/dynamic" 11 | "k8s.io/client-go/tools/clientcmd" 12 | ) 13 | 14 | func GetToken(ctx context.Context, kubeconfig string) (string, error) { 15 | kubeconfig, err := kubectl.GetKubeconfig(kubeconfig) 16 | if err != nil { 17 | return "", err 18 | } 19 | 20 | conf, err := clientcmd.BuildConfigFromFlags("", kubeconfig) 21 | if err != nil { 22 | return "", err 23 | } 24 | 25 | client, err := dynamic.NewForConfig(conf) 26 | if err != nil { 27 | return "", err 28 | } 29 | 30 | resource, err := client.Resource(schema.GroupVersionResource{ 31 | Group: "management.cattle.io", 32 | Version: "v3", 33 | Resource: "clusterregistrationtokens", 34 | }).Namespace("local").Get(ctx, "default-token", metav1.GetOptions{}) 35 | if err != nil { 36 | return "", err 37 | } 38 | 39 | str, _, err := unstructured.NestedString(resource.Object, "status", "token") 40 | return str, err 41 | } 42 | -------------------------------------------------------------------------------- /pkg/tpm/get.go: -------------------------------------------------------------------------------- 1 | package tpm 2 | 3 | import ( 4 | "crypto/tls" 5 | "crypto/x509" 6 | "encoding/json" 7 | "fmt" 8 | "io/ioutil" 9 | "net/http" 10 | "strings" 11 | "time" 12 | 13 | "github.com/google/go-attestation/attest" 14 | "github.com/gorilla/websocket" 15 | "github.com/pkg/errors" 16 | "github.com/sirupsen/logrus" 17 | ) 18 | 19 | func Get(cacerts []byte, url string, header http.Header) ([]byte, error) { 20 | dialer := websocket.DefaultDialer 21 | if len(cacerts) > 0 { 22 | pool := x509.NewCertPool() 23 | pool.AppendCertsFromPEM(cacerts) 24 | dialer = &websocket.Dialer{ 25 | Proxy: http.ProxyFromEnvironment, 26 | HandshakeTimeout: 45 * time.Second, 27 | TLSClientConfig: &tls.Config{ 28 | RootCAs: pool, 29 | }, 30 | } 31 | } 32 | 33 | attestationData, aikBytes, err := getAttestationData() 34 | if err != nil { 35 | return nil, err 36 | } 37 | 38 | hash, err := GetPubHash() 39 | if err != nil { 40 | return nil, err 41 | } 42 | 43 | token, err := getToken(attestationData) 44 | if err != nil { 45 | return nil, err 46 | } 47 | 48 | if header == nil { 49 | header = http.Header{} 50 | } 51 | header.Add("Authorization", token) 52 | wsURL := strings.Replace(url, "http", "ws", 1) 53 | logrus.Infof("Using TPMHash %s to dial %s", hash, wsURL) 54 | conn, resp, err := dialer.Dial(wsURL, header) 55 | if err != nil { 56 | if resp != nil && resp.StatusCode == http.StatusUnauthorized { 57 | data, err := ioutil.ReadAll(resp.Body) 58 | if err == nil { 59 | return nil, errors.New(string(data)) 60 | } 61 | } 62 | return nil, err 63 | } 64 | defer conn.Close() 65 | 66 | _, msg, err := conn.NextReader() 67 | if err != nil { 68 | return nil, fmt.Errorf("reading challenge: %w", err) 69 | } 70 | 71 | var challenge Challenge 72 | if err := json.NewDecoder(msg).Decode(&challenge); err != nil { 73 | return nil, fmt.Errorf("unmarshaling Challenge: %w", err) 74 | } 75 | 76 | challengeResp, err := getChallengeResponse(challenge.EC, aikBytes) 77 | if err != nil { 78 | return nil, err 79 | } 80 | 81 | writer, err := conn.NextWriter(websocket.BinaryMessage) 82 | if err != nil { 83 | return nil, err 84 | } 85 | defer writer.Close() 86 | 87 | if err := json.NewEncoder(writer).Encode(challengeResp); err != nil { 88 | return nil, fmt.Errorf("encoding ChallengeResponse: %w", err) 89 | } 90 | 91 | if err := writer.Close(); err != nil { 92 | return nil, fmt.Errorf("closing websocket writer: %w", err) 93 | } 94 | 95 | _, msg, err = conn.NextReader() 96 | if err != nil { 97 | return nil, fmt.Errorf("reading payload from tpm get: %w", err) 98 | } 99 | 100 | return ioutil.ReadAll(msg) 101 | } 102 | 103 | func getChallengeResponse(ec *attest.EncryptedCredential, aikBytes []byte) (*ChallengeResponse, error) { 104 | tpm, err := attest.OpenTPM(&attest.OpenConfig{ 105 | TPMVersion: attest.TPMVersion20, 106 | }) 107 | if err != nil { 108 | return nil, fmt.Errorf("opening tpm: %w", err) 109 | } 110 | defer tpm.Close() 111 | 112 | aik, err := tpm.LoadAK(aikBytes) 113 | if err != nil { 114 | return nil, err 115 | } 116 | defer aik.Close(tpm) 117 | 118 | secret, err := aik.ActivateCredential(tpm, *ec) 119 | if err != nil { 120 | return nil, fmt.Errorf("failed to activate credential: %w", err) 121 | } 122 | return &ChallengeResponse{ 123 | Secret: secret, 124 | }, nil 125 | } 126 | -------------------------------------------------------------------------------- /pkg/tpm/tpm.go: -------------------------------------------------------------------------------- 1 | package tpm 2 | 3 | import ( 4 | "encoding/base64" 5 | "encoding/json" 6 | "fmt" 7 | "strings" 8 | 9 | "github.com/google/go-attestation/attest" 10 | ) 11 | 12 | func ResolveToken(token string) (bool, string, error) { 13 | if !strings.HasPrefix(token, "tpm://") { 14 | return false, token, nil 15 | } 16 | 17 | hash, err := GetPubHash() 18 | return true, hash, err 19 | } 20 | 21 | func GetPubHash() (string, error) { 22 | ek, err := getEK() 23 | if err != nil { 24 | return "", fmt.Errorf("getting EK: %w", err) 25 | } 26 | 27 | hash, err := getPubHash(ek) 28 | if err != nil { 29 | return "", fmt.Errorf("hashing EK: %w", err) 30 | } 31 | 32 | return hash, nil 33 | } 34 | 35 | func getEK() (*attest.EK, error) { 36 | var err error 37 | tpm, err := attest.OpenTPM(&attest.OpenConfig{ 38 | TPMVersion: attest.TPMVersion20, 39 | }) 40 | if err != nil { 41 | return nil, fmt.Errorf("opening tpm: %w", err) 42 | } 43 | defer tpm.Close() 44 | 45 | eks, err := tpm.EKs() 46 | if err != nil { 47 | return nil, err 48 | } 49 | 50 | if len(eks) == 0 { 51 | return nil, fmt.Errorf("failed to find EK") 52 | } 53 | 54 | return &eks[0], nil 55 | } 56 | 57 | func getToken(data *AttestationData) (string, error) { 58 | bytes, err := json.Marshal(data) 59 | if err != nil { 60 | return "", fmt.Errorf("marshalling attestation data: %w", err) 61 | } 62 | 63 | return "Bearer TPM" + base64.StdEncoding.EncodeToString(bytes), nil 64 | } 65 | 66 | func getAttestationData() (*AttestationData, []byte, error) { 67 | var err error 68 | tpm, err := attest.OpenTPM(&attest.OpenConfig{ 69 | TPMVersion: attest.TPMVersion20, 70 | }) 71 | if err != nil { 72 | return nil, nil, fmt.Errorf("opening tpm: %w", err) 73 | } 74 | defer tpm.Close() 75 | 76 | eks, err := tpm.EKs() 77 | if err != nil { 78 | return nil, nil, err 79 | } 80 | ak, err := tpm.NewAK(nil) 81 | if err != nil { 82 | return nil, nil, err 83 | } 84 | defer ak.Close(tpm) 85 | params := ak.AttestationParameters() 86 | 87 | if len(eks) == 0 { 88 | return nil, nil, fmt.Errorf("failed to find EK") 89 | } 90 | 91 | ek := &eks[0] 92 | ekBytes, err := EncodeEK(ek) 93 | if err != nil { 94 | return nil, nil, err 95 | } 96 | 97 | aikBytes, err := ak.Marshal() 98 | if err != nil { 99 | return nil, nil, fmt.Errorf("marshaling AK: %w", err) 100 | } 101 | 102 | return &AttestationData{ 103 | EK: ekBytes, 104 | AK: ¶ms, 105 | }, aikBytes, nil 106 | } 107 | -------------------------------------------------------------------------------- /pkg/tpm/tpm_attestor.go: -------------------------------------------------------------------------------- 1 | /* 2 | ** Copyright 2019 Bloomberg Finance L.P. 3 | ** 4 | ** Licensed under the Apache License, Version 2.0 (the "License"); 5 | ** you may not use this file except in compliance with the License. 6 | ** You may obtain a copy of the License at 7 | ** 8 | ** http://www.apache.org/licenses/LICENSE-2.0 9 | ** 10 | ** Unless required by applicable law or agreed to in writing, software 11 | ** distributed under the License is distributed on an "AS IS" BASIS, 12 | ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | ** See the License for the specific language governing permissions and 14 | ** limitations under the License. 15 | */ 16 | 17 | package tpm 18 | 19 | import ( 20 | "crypto/sha256" 21 | "encoding/pem" 22 | "fmt" 23 | 24 | "github.com/google/certificate-transparency-go/x509" 25 | "github.com/google/go-attestation/attest" 26 | ) 27 | 28 | type AttestationData struct { 29 | EK []byte 30 | AK *attest.AttestationParameters 31 | } 32 | 33 | type Challenge struct { 34 | EC *attest.EncryptedCredential 35 | } 36 | 37 | type KeyData struct { 38 | Keys []string `json:"keys"` 39 | } 40 | 41 | type ChallengeResponse struct { 42 | Secret []byte 43 | } 44 | 45 | func getPubHash(ek *attest.EK) (string, error) { 46 | data, err := pubBytes(ek) 47 | if err != nil { 48 | return "", err 49 | } 50 | pubHash := sha256.Sum256(data) 51 | hashEncoded := fmt.Sprintf("%x", pubHash) 52 | return hashEncoded, nil 53 | } 54 | 55 | func EncodeEK(ek *attest.EK) ([]byte, error) { 56 | if ek.Certificate != nil { 57 | return pem.EncodeToMemory(&pem.Block{ 58 | Type: "CERTIFICATE", 59 | Bytes: ek.Certificate.Raw, 60 | }), nil 61 | } 62 | 63 | data, err := pubBytes(ek) 64 | if err != nil { 65 | return nil, err 66 | } 67 | 68 | return pem.EncodeToMemory(&pem.Block{ 69 | Type: "PUBLIC KEY", 70 | Bytes: data, 71 | }), nil 72 | } 73 | 74 | func pubBytes(ek *attest.EK) ([]byte, error) { 75 | data, err := x509.MarshalPKIXPublicKey(ek.Public) 76 | if err != nil { 77 | return nil, fmt.Errorf("error marshaling ec public key: %v", err) 78 | } 79 | return data, nil 80 | } 81 | -------------------------------------------------------------------------------- /pkg/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | var ( 8 | Version = "dev" 9 | GitCommit = "Head" 10 | ) 11 | 12 | func FriendlyVersion() string { 13 | return fmt.Sprintf("%s (%s)", Version, GitCommit) 14 | } 15 | -------------------------------------------------------------------------------- /pkg/versions/versions.go: -------------------------------------------------------------------------------- 1 | package versions 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "path" 7 | "strings" 8 | "sync" 9 | 10 | "github.com/sirupsen/logrus" 11 | "gopkg.in/yaml.v3" 12 | ) 13 | 14 | var ( 15 | cachedK8sVersion = map[string]string{} 16 | cachedOSVersion = map[string]string{} 17 | cachedRancherVersion = map[string]string{} 18 | cachedLock sync.Mutex 19 | redirectClient = &http.Client{ 20 | CheckRedirect: func(*http.Request, []*http.Request) error { 21 | return http.ErrUseLastResponse 22 | }, 23 | } 24 | ) 25 | 26 | func getVersionOrURL(urlFormat, def, version string) (_ string, isURL bool) { 27 | if version == "" { 28 | version = def 29 | } 30 | 31 | if strings.HasPrefix(version, "v") && len(strings.Split(version, ".")) > 2 { 32 | return version, false 33 | } 34 | 35 | channelURL := version 36 | if !strings.HasPrefix(channelURL, "https://") && 37 | !strings.HasPrefix(channelURL, "http://") { 38 | if strings.HasSuffix(channelURL, "-head") || strings.Contains(channelURL, "/") { 39 | return channelURL, false 40 | } 41 | channelURL = fmt.Sprintf(urlFormat, version) 42 | } 43 | 44 | return channelURL, true 45 | } 46 | 47 | func K8sVersion(kubernetesVersion string) (string, error) { 48 | cachedLock.Lock() 49 | defer cachedLock.Unlock() 50 | 51 | cached, ok := cachedK8sVersion[kubernetesVersion] 52 | if ok { 53 | return cached, nil 54 | } 55 | 56 | urlFormat := "https://update.k3s.io/v1-release/channels/%s" 57 | if strings.HasSuffix(kubernetesVersion, ":k3s") { 58 | kubernetesVersion = strings.TrimSuffix(kubernetesVersion, ":k3s") 59 | } else if strings.HasSuffix(kubernetesVersion, ":rke2") { 60 | urlFormat = "https://update.rke2.io/v1-release/channels/%s" 61 | kubernetesVersion = strings.TrimSuffix(kubernetesVersion, ":rke2") 62 | } 63 | 64 | versionOrURL, isURL := getVersionOrURL(urlFormat, "stable", kubernetesVersion) 65 | if !isURL { 66 | return versionOrURL, nil 67 | } 68 | 69 | resp, err := redirectClient.Get(versionOrURL) 70 | if err != nil { 71 | return "", fmt.Errorf("getting channel version from (%s): %w", versionOrURL, err) 72 | } 73 | defer resp.Body.Close() 74 | 75 | url, err := resp.Location() 76 | if err != nil { 77 | return "", fmt.Errorf("getting channel version URL from (%s): %w", versionOrURL, err) 78 | } 79 | 80 | resolved := path.Base(url.Path) 81 | cachedK8sVersion[kubernetesVersion] = resolved 82 | logrus.Infof("Resolving Kubernetes version [%s] to %s from %s ", kubernetesVersion, resolved, versionOrURL) 83 | return resolved, nil 84 | } 85 | 86 | func RancherVersion(rancherVersion string) (string, error) { 87 | cachedLock.Lock() 88 | defer cachedLock.Unlock() 89 | 90 | cached, ok := cachedRancherVersion[rancherVersion] 91 | if ok { 92 | return cached, nil 93 | } 94 | 95 | versionOrURL, isURL := getVersionOrURL("https://releases.rancher.com/server-charts/%s/index.yaml", "stable", rancherVersion) 96 | if !isURL { 97 | return versionOrURL, nil 98 | } 99 | 100 | resp, err := http.Get(versionOrURL) 101 | if err != nil { 102 | return "", fmt.Errorf("getting rancher channel version from (%s): %w", versionOrURL, err) 103 | } 104 | defer resp.Body.Close() 105 | 106 | index := &chartIndex{} 107 | if err := yaml.NewDecoder(resp.Body).Decode(index); err != nil { 108 | return "", fmt.Errorf("unmarshalling rancher channel version from (%s): %w", versionOrURL, err) 109 | } 110 | 111 | versions := index.Entries["rancher"] 112 | if len(versions) == 0 { 113 | return "", fmt.Errorf("failed to find version for rancher chart at (%s)", versionOrURL) 114 | } 115 | 116 | version := "v" + versions[0].Version 117 | 118 | logrus.Infof("Resolving RancherVersion version [%s] to %s from %s ", rancherVersion, version, versionOrURL) 119 | cachedRancherVersion[rancherVersion] = version 120 | return version, nil 121 | } 122 | 123 | func RancherOSVersion(rancherOSVersion string) (string, error) { 124 | cachedLock.Lock() 125 | defer cachedLock.Unlock() 126 | 127 | cached, ok := cachedOSVersion[rancherOSVersion] 128 | if ok { 129 | return cached, nil 130 | } 131 | 132 | urlFormat := "https://github.com/rancher/os2/releases/%s" 133 | versionOrURL, isURL := getVersionOrURL(urlFormat, "latest", rancherOSVersion) 134 | if !isURL { 135 | return versionOrURL, nil 136 | } 137 | 138 | resp, err := redirectClient.Get(versionOrURL) 139 | if err != nil { 140 | return "", fmt.Errorf("getting channel version from (%s): %w", versionOrURL, err) 141 | } 142 | defer resp.Body.Close() 143 | 144 | url, err := resp.Location() 145 | if err != nil { 146 | return "", fmt.Errorf("getting channel version URL from (%s): %w", versionOrURL, err) 147 | } 148 | 149 | resolved := "rancher/os2:" + path.Base(url.Path) 150 | cachedOSVersion[rancherOSVersion] = resolved 151 | logrus.Infof("Resolving RancherOS version [%s] to %s from %s ", rancherOSVersion, resolved, versionOrURL) 152 | return resolved, nil 153 | } 154 | 155 | type chartIndex struct { 156 | Entries map[string][]struct { 157 | Version string `yaml:"version"` 158 | } `yaml:"entries"` 159 | } 160 | -------------------------------------------------------------------------------- /rancherd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Rancher Bootstrap 3 | Documentation=https://github.com/rancher/rancherd 4 | Wants=network-online.target 5 | After=network-online.target 6 | 7 | [Install] 8 | WantedBy=multi-user.target 9 | 10 | [Service] 11 | Type=oneshot 12 | EnvironmentFile=-/etc/default/%N 13 | EnvironmentFile=-/etc/sysconfig/%N 14 | KillMode=process 15 | # Having non-zero Limit*s causes performance problems due to accounting overhead 16 | # in the kernel. We recommend using cgroups to do container-local accounting. 17 | LimitNOFILE=1048576 18 | LimitNPROC=infinity 19 | LimitCORE=infinity 20 | TasksMax=infinity 21 | TimeoutStartSec=0 22 | ExecStart=/usr/local/bin/rancherd bootstrap 23 | -------------------------------------------------------------------------------- /scripts/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright YEAR Rancher Labs, Inc. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | -------------------------------------------------------------------------------- /scripts/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | source $(dirname $0)/version 5 | 6 | cd $(dirname $0)/.. 7 | 8 | mkdir -p bin 9 | if [ "$(uname)" = "Linux" ]; then 10 | OTHER_LINKFLAGS="-extldflags -static -s" 11 | fi 12 | LINKFLAGS="-X github.com/rancher/rancherd/pkg/version.Version=$VERSION" 13 | LINKFLAGS="-X github.com/rancher/rancherd/pkg/version.GitCommit=$COMMIT $LINKFLAGS" 14 | CGO_ENABLED=0 GOARCH=amd64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/rancherd-amd64 ./cmd/rancherd 15 | CGO_ENABLED=0 GOARCH=arm64 go build -ldflags "$LINKFLAGS $OTHER_LINKFLAGS" -o bin/rancherd-arm64 ./cmd/rancherd 16 | -------------------------------------------------------------------------------- /scripts/build-sha-file: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | source $(dirname $0)/version 5 | 6 | cd $(dirname $0)/.. 7 | 8 | mkdir -p dist/artifacts/ 9 | pushd bin/ 10 | sha256sum rancherd-amd64 > ../dist/artifacts/sha256sum-amd64.txt 11 | sha256sum rancherd-arm64 > ../dist/artifacts/sha256sum-arm64.txt 12 | popd 13 | -------------------------------------------------------------------------------- /scripts/ci: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | cd $(dirname $0) 5 | 6 | ./build 7 | ./test 8 | ./validate 9 | ./package 10 | ./build-sha-file 11 | -------------------------------------------------------------------------------- /scripts/entry: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | mkdir -p bin dist 5 | if [ -e ./scripts/$1 ]; then 6 | ./scripts/"$@" 7 | else 8 | exec "$@" 9 | fi 10 | 11 | chown -R $DAPPER_UID:$DAPPER_GID . 12 | -------------------------------------------------------------------------------- /scripts/package: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | source $(dirname $0)/version 5 | 6 | cd $(dirname $0)/.. 7 | 8 | mkdir -p dist/artifacts 9 | cp bin/rancherd-* dist/artifacts/ 10 | -------------------------------------------------------------------------------- /scripts/release: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | exec $(dirname $0)/ci 4 | -------------------------------------------------------------------------------- /scripts/test: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | cd $(dirname $0)/.. 5 | 6 | echo Running tests 7 | go test -cover -tags=test ./... 8 | -------------------------------------------------------------------------------- /scripts/validate: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | cd $(dirname $0)/.. 5 | 6 | echo Running validation 7 | 8 | PACKAGES="$(go list ./...)" 9 | 10 | if ! command -v golangci-lint; then 11 | echo Skipping validation: no golangci-lint available 12 | exit 13 | fi 14 | 15 | echo Running validation 16 | 17 | echo Running: golangci-lint 18 | golangci-lint run 19 | 20 | echo Running: go fmt 21 | test -z "$(go fmt ${PACKAGES} | tee /dev/stderr)" 22 | -------------------------------------------------------------------------------- /scripts/version: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -n "$(git status --porcelain --untracked-files=no)" ]; then 4 | DIRTY="-dirty" 5 | fi 6 | 7 | COMMIT=$(git rev-parse --short HEAD) 8 | GIT_TAG=${DRONE_TAG:-$(git tag -l --contains HEAD | head -n 1)} 9 | 10 | if [[ -z "$DIRTY" && -n "$GIT_TAG" ]]; then 11 | VERSION=$GIT_TAG 12 | else 13 | VERSION="${COMMIT}${DIRTY}" 14 | fi 15 | 16 | if [ -z "$ARCH" ]; then 17 | ARCH=$(go env GOHOSTARCH) 18 | fi 19 | 20 | SUFFIX="-${ARCH}" 21 | 22 | TAG=${TAG:-${VERSION}${SUFFIX}} 23 | REPO=${REPO:-rancher} 24 | 25 | if echo $TAG | grep -q dirty; then 26 | TAG=dev 27 | fi 28 | --------------------------------------------------------------------------------