├── .dockerignore ├── .gitignore ├── .tool-versions ├── Dockerfile ├── Gemfile ├── Gemfile.lock ├── README.md ├── Rakefile ├── bin └── kube_backup ├── deploy ├── 0_namespace.yaml ├── 1_service_account.yaml ├── 2_config_map.yaml └── 3_cronjob.yaml ├── lib ├── kube_backup.rb └── kube_backup │ ├── cli.rb │ ├── cmd_utils.rb │ ├── log_util.rb │ ├── logger.rb │ ├── plugins │ └── grafana.rb │ ├── version.rb │ └── writter.rb └── test ├── cleanup_test.rb ├── samples ├── config_map.yaml ├── deployment.yaml └── single_pod.yaml ├── sort_keys_test.rb └── test_helper.rb /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | node_modules 3 | kube_state 4 | deploy 5 | .DS_Store 6 | Dockerfile 7 | .dockerignore 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .ruby-version 2 | .DS_Store 3 | TODO.md 4 | try_* 5 | kube_state/ -------------------------------------------------------------------------------- /.tool-versions: -------------------------------------------------------------------------------- 1 | kubectl 1.11.0 2 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM lachlanevenson/k8s-kubectl 2 | 3 | MAINTAINER Pavel Evstigneev 4 | 5 | RUN apk upgrade --update-cache --available && \ 6 | apk add curl openssl openssh git bash ruby ruby-json && \ 7 | update-ca-certificates && \ 8 | rm -rf /var/cache/apk/* 9 | 10 | # https://git.wiki.kernel.org/index.php/GitHosting 11 | RUN mkdir -p ~/.ssh && \ 12 | ssh-keyscan -t rsa,dsa github.com gitlab.com bitbucket.org codebasehq.com >> /root/.ssh/known_hosts 13 | 14 | RUN gem install bundler -v 2.0.1 --no-doc 15 | 16 | RUN mkdir -p /opt/app 17 | WORKDIR /opt/app 18 | 19 | ADD . /opt/app 20 | 21 | RUN bundle install --retry 10 --system 22 | 23 | ENV PATH $PATH:/opt/app/bin 24 | 25 | 26 | ENTRYPOINT ["sh", "-c"] 27 | CMD ["kube_backup backup && kube_backup push"] 28 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source "https://rubygems.org" 2 | 3 | gem 'commander' 4 | gem 'colorize' 5 | gem 'excon' 6 | -------------------------------------------------------------------------------- /Gemfile.lock: -------------------------------------------------------------------------------- 1 | GEM 2 | remote: https://rubygems.org/ 3 | specs: 4 | colorize (0.8.1) 5 | commander (4.4.7) 6 | highline (~> 2.0.0) 7 | excon (0.71.0) 8 | highline (2.0.3) 9 | 10 | PLATFORMS 11 | ruby 12 | 13 | DEPENDENCIES 14 | colorize 15 | commander 16 | excon 17 | 18 | BUNDLED WITH 19 | 2.0.1 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kube-backup 2 | 3 | Kubernetes resource state backup to git 4 | 5 | ### Git structure 6 | 7 | ``` 8 | _global_ - global resources such as Node, ClusterRole, StorageClass 9 | _grafana_ - grafana configs (when grafana enabled) 10 | - such as kube-system, default, etc... 11 | - folder for each resource type 12 | - file for each resource 13 | ``` 14 | 15 | ### Screenshots 16 | 17 | 18 | ### Deployment 19 | 20 | Yaml manifests are in [deploy folder](https://github.com/kuberhost/kube-backup/tree/master/deploy). 21 | 22 | #### Create Deployment Key 23 | 24 | Github and gitlab support adding key only for one repository 25 | 26 | * Create repo 27 | * Generate ssh key `ssh-keygen -f ./new_key` 28 | * Add new ssh key to repo with write access 29 | * Save key to [2_config_map.yaml](https://github.com/kuberhost/kube-backup/blob/master/deploy/2_config_map.yaml) (see comments in file) 30 | 31 | #### Testing Deployment 32 | 33 | I recommend to run it periodically with kubernetes' CronJob resource, if you want to test how it works without waiting then can change running schedule or create pod with same parameters 34 | 35 | ### Commands 36 | 37 | * `kube_backup backup` - pull remote git repository, save kubernetes state, make git commit in local repository 38 | * `kube_backup push` - push changes to remote repository 39 | * `kube_backup help` - shows help 40 | 41 | Docker image by default runs `kube_backup backup && kube_backup push` 42 | 43 | ### Config 44 | 45 | * `GIT_REPO_URL` - remote git URL like `git@github.com:kuberhost/kube-backup.git` (required) 46 | * `BACKUP_VERBOSE` use 1 to enable verbose logging 47 | * `TARGET_PATH` - local git repository folder, default `./kube_state` 48 | * `SKIP_NAMESPACES` - namespaces to exclude, separated by coma (,) 49 | * `ONLY_NAMESPACES` - whitelist namespaces 50 | * `GLOBAL_RESOURCES` - override global resources list, default is `node, apiservice, clusterrole, clusterrolebinding, podsecuritypolicy, storageclass, persistentvolume, customresourcedefinition, mutatingwebhookconfiguration, validatingwebhookconfiguration, priorityclass` 51 | * `EXTRA_GLOBAL_RESOURCES` - use it to add resources to `GLOBAL_RESOURCES` list 52 | * `SKIP_GLOBAL_RESOURCES` - blacklist global resources 53 | * `RESOURCES` - default list of namespaces resources, see `KubeBackup::TYPES` 54 | * `EXTRA_RESOURCES` - use it to add resources to `RESOURCES` list 55 | * `SKIP_RESOURCES` - exclude resources 56 | * `SKIP_OBJECTS` - use it to skip individual objects, such as `kube-backup/ConfigMap/kube-backup-ssh-config` (separated by coma, spaces around coma ignored) 57 | * `GIT_USER` - default is `kube-backup` 58 | * `GIT_EMAIL` - default is `kube-backup@$(HOSTNAME)` 59 | * `GIT_BRANCH` - Git branch, default is `master` 60 | * `GIT_PREFIX` - Path to the subdirectory in your repository 61 | * `GRAFANA_URL` - grafana api URL, e.g. `https://grafana.my-cluster.com` 62 | * `GRAFANA_TOKEN` - grafana API token, create at https://your-grafana/org/apikeys 63 | * `TZ` - timezone of commit times. e.g. `:Europe/Berlin` 64 | 65 | ### Security 66 | 67 | To avoid man in a middle attack it's recommended to provide `known_hosts` file. Default `known_hosts` contain keys for github.com, gitlab.com and bitbucket.org 68 | 69 | #### Custom Resources 70 | 71 | Let's say we have a cluster with prometheus and certmanager, they register custom resources and we want to add them in backup. 72 | 73 | Get list of custom resource definitions: 74 | ``` 75 | $ kubectl get crd 76 | 77 | NAME CREATED AT 78 | alertmanagers.monitoring.coreos.com 2018-06-27T10:33:00Z 79 | certificates.certmanager.k8s.io 2018-06-27T09:39:43Z 80 | clusterissuers.certmanager.k8s.io 2018-06-27T09:39:43Z 81 | issuers.certmanager.k8s.io 2018-06-27T09:39:44Z 82 | prometheuses.monitoring.coreos.com 2018-06-27T10:33:00Z 83 | prometheusrules.monitoring.coreos.com 2018-06-27T10:33:00Z 84 | servicemonitors.monitoring.coreos.com 2018-06-27T10:33:00Z 85 | ``` 86 | 87 | Or get more useful output: 88 | ``` 89 | $ kubectl get crd -o json | jq -r '.items | (.[] | [.spec.names.singular, .spec.group, .spec.scope]) | @tsv' 90 | alertmanager monitoring.coreos.com Namespaced 91 | certificate certmanager.k8s.io Namespaced 92 | clusterissuer certmanager.k8s.io Cluster 93 | issuer certmanager.k8s.io Namespaced 94 | prometheus monitoring.coreos.com Namespaced 95 | prometheusrule monitoring.coreos.com Namespaced 96 | servicemonitor monitoring.coreos.com Namespaced 97 | ``` 98 | 99 | Set env variables in container spec: 100 | ```yaml 101 | env: 102 | - name: EXTRA_GLOBAL_RESOURCES 103 | value: clusterissuer 104 | - name: EXTRA_RESOURCES 105 | value: alertmanager, prometheus, prometheusrule, servicemonitor, certificate, issuer 106 | ``` 107 | 108 | --- 109 | 110 | Special thanks to Pieter Lange for [original idea](https://github.com/pieterlange/kube-backup/) 111 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | require 'rake/testtask' 2 | 3 | Rake::TestTask.new do |test| 4 | test.test_files = Dir.glob('test/**/*_test.rb') 5 | end 6 | -------------------------------------------------------------------------------- /bin/kube_backup: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require_relative '../lib/kube_backup' 4 | require_relative '../lib/kube_backup/cli' 5 | 6 | KubeBackup::CLI.new.run 7 | -------------------------------------------------------------------------------- /deploy/0_namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: kube-backup 5 | -------------------------------------------------------------------------------- /deploy/1_service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: kube-backup-user 5 | namespace: kube-backup 6 | 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: ClusterRole 10 | metadata: 11 | name: kube-backup-view-all 12 | rules: 13 | - apiGroups: ["*"] 14 | resources: ["*"] 15 | verbs: ["get", "list", "watch"] 16 | - nonResourceURLs: ["*"] 17 | verbs: ["get", "list", "watch"] 18 | 19 | --- 20 | kind: ClusterRoleBinding 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | metadata: 23 | name: kube-backup-user 24 | subjects: 25 | - kind: ServiceAccount 26 | name: kube-backup-user 27 | namespace: kube-backup 28 | roleRef: 29 | kind: ClusterRole 30 | name: kube-backup-view-all 31 | apiGroup: rbac.authorization.k8s.io 32 | 33 | --- 34 | apiVersion: rbac.authorization.k8s.io/v1 35 | kind: RoleBinding 36 | metadata: 37 | name: psp:unprivileged 38 | namespace: kube-backup 39 | roleRef: 40 | apiGroup: rbac.authorization.k8s.io 41 | kind: ClusterRole 42 | name: podsecuritypolicy:unprivileged 43 | subjects: 44 | - kind: Group 45 | name: system:serviceaccounts:kube-backup 46 | apiGroup: rbac.authorization.k8s.io 47 | -------------------------------------------------------------------------------- /deploy/2_config_map.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: kube-backup-ssh-config 5 | namespace: kube-backup 6 | data: 7 | # generate new key with "ssh-keygen -f ./new_key" (press enter for all questions) 8 | id_rsa: | 9 | -----BEGIN RSA PRIVATE KEY----- 10 | << new_key content goes here >> 11 | -----END RSA PRIVATE KEY----- 12 | id_rsa.pub: | 13 | << new_key.pub content goes here >> 14 | -------------------------------------------------------------------------------- /deploy/3_cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: kube-system-backup 5 | namespace: kube-backup 6 | spec: 7 | successfulJobsHistoryLimit: 2 8 | failedJobsHistoryLimit: 2 9 | concurrencyPolicy: Forbid 10 | schedule: "05,57 * * * *" 11 | jobTemplate: 12 | spec: 13 | template: 14 | spec: 15 | restartPolicy: OnFailure 16 | serviceAccount: kube-backup-user 17 | containers: 18 | - name: backup 19 | image: kuberhost/kube-backup 20 | imagePullPolicy: Always 21 | env: 22 | - name: BACKUP_VERBOSE 23 | value: "1" 24 | - name: GIT_REPO_URL 25 | value: git@gitlab.com:user/repo.git # <-- fill this in 26 | - name: EXTRA_RESOURCES 27 | # just example, use `kubectl get crd` to see custom resources in your cluster 28 | value: certificate,issuer,alertmanager,prometheus,prometheusrule,servicemonitor 29 | - name: TZ 30 | value: :Europe/Berlin 31 | #- name: GRAFANA_URL 32 | # value: https://grafana.your-cluster.com 33 | #- name: GRAFANA_TOKEN 34 | # value: aaabbbbcccDDDeee 35 | volumeMounts: 36 | - name: ssh-config 37 | mountPath: /root/.ssh/id_rsa 38 | subPath: id_rsa 39 | - name: ssh-config 40 | mountPath: /root/.ssh/id_rsa.pub 41 | subPath: id_rsa.pub 42 | volumes: 43 | - name: ssh-config 44 | configMap: 45 | name: kube-backup-ssh-config 46 | defaultMode: 256 47 | -------------------------------------------------------------------------------- /lib/kube_backup.rb: -------------------------------------------------------------------------------- 1 | require_relative './kube_backup/version' 2 | require_relative './kube_backup/cmd_utils' 3 | require_relative './kube_backup/log_util' 4 | require_relative './kube_backup/writter' 5 | require_relative './kube_backup/logger' 6 | require_relative './kube_backup/plugins/grafana' 7 | 8 | require 'json' 9 | require 'yaml' 10 | require 'colorize' 11 | 12 | module KubeBackup 13 | module Plugins; end 14 | 15 | extend KubeBackup::CmdUtils 16 | extend KubeBackup::Logger 17 | 18 | GLOBAL_TYPES = [ 19 | :node, 20 | :apiservice, 21 | :clusterrole, 22 | :clusterrolebinding, 23 | :podsecuritypolicy, 24 | :storageclass, 25 | :persistentvolume, 26 | :customresourcedefinition, 27 | :mutatingwebhookconfiguration, 28 | :validatingwebhookconfiguration, 29 | :priorityclass 30 | ].freeze 31 | 32 | TYPES = [ 33 | :serviceaccount, 34 | :secret, 35 | :deployment, 36 | :daemonset, 37 | :statefulset, 38 | :configmap, 39 | :cronjob, 40 | :job, 41 | :ingress, 42 | :networkpolicy, 43 | :persistentvolumeclaim, 44 | :role, 45 | :rolebinding, 46 | :service, 47 | :pod, 48 | :endpoints, 49 | :resourcequota, 50 | :horizontalpodautoscaler, 51 | :limitrange, 52 | :podtemplate, 53 | :poddisruptionbudget 54 | ].freeze 55 | 56 | SKIP_POD_OWNERS = [ 57 | "DaemonSet", 58 | "ReplicaSet", 59 | "Job", 60 | "StatefulSet" 61 | ].freeze 62 | 63 | SKIP_JOB_OWNERS = [ 64 | "CronJob", 65 | ].freeze 66 | 67 | 68 | def self.perform_backup!(options = {}) 69 | logger.info "Args: #{LogUtil.hash(options)}" 70 | 71 | if !options[:repo_url] || options[:repo_url] == '' 72 | raise OptionParser::MissingArgument, "Git repo-url is required, please specify --repo-url or GIT_REPO_URL" 73 | end 74 | 75 | global_types = combine_types(GLOBAL_TYPES.dup, 76 | extras: options[:extra_global_resources], 77 | exclude: options[:skip_global_resources], 78 | only: options[:global_resources] 79 | ) 80 | if global_types != GLOBAL_TYPES 81 | logger.info "Global Types: #{LogUtil.dump(global_types)}" 82 | end 83 | 84 | types = combine_types(TYPES.dup, 85 | extras: options[:extra_resources], 86 | exclude: options[:skip_resources], 87 | only: options[:resources] 88 | ) 89 | if types != TYPES 90 | logger.info "Types: #{LogUtil.dump(types)}" 91 | end 92 | 93 | skip_patterns = (options[:skip_objects] || "").split(",").map(&:strip) 94 | global_skip_patterns = skip_patterns.select {|pattern| pattern.scan("/").size == 1 } 95 | skip_patterns -= global_skip_patterns 96 | 97 | if global_skip_patterns.size > 0 98 | logger.info "Global Skip Patterns: #{LogUtil.dump(global_skip_patterns)}" 99 | end 100 | if skip_patterns.size > 0 101 | logger.info "Skip Patterns: #{LogUtil.dump(skip_patterns)}" 102 | end 103 | 104 | skip_namespaces = options[:skip_namespaces] ? options[:skip_namespaces].split(",") : [] 105 | only_namespaces = options[:only_namespaces] ? options[:only_namespaces].split(",") : nil 106 | 107 | writter = Writter.new(options) 108 | writter.init_repo! 109 | 110 | global_types.each do |type| 111 | resources = kubectl(:get, type) 112 | puts "Got #{resources["items"].size} #{type}s" 113 | 114 | resources["items"].each do |item| 115 | 116 | if skip_object?(item, global_skip_patterns) 117 | name = item.dig("metadata", "name") 118 | logger.info "skip resource #{item["kind"]}/#{name}" 119 | next 120 | end 121 | 122 | clean_resource!(item) 123 | writter.write_res(item) 124 | end 125 | end 126 | 127 | types.each do |type| 128 | resources = kubectl(:get, type, "all-namespaces" => nil) 129 | puts "Got #{resources["items"].size} #{type}s" 130 | 131 | if !resources["items"] 132 | logger.error "Can not get resource #{type}" 133 | puts JSON.pretty_generate(resources) 134 | exit(1) 135 | end 136 | 137 | resources["items"].each do |item| 138 | 139 | if item["kind"] == "Secret" && item["type"] == "kubernetes.io/service-account-token" 140 | next 141 | end 142 | 143 | # skip pods with ownerReferences (means created by deployment, cronjob, daemonset) 144 | if item["kind"] == "Pod" && item.dig("metadata", "ownerReferences") 145 | if item["metadata"]["ownerReferences"].size > 1 146 | puts YAML.dump(item) 147 | raise "many ownerReferences" 148 | end 149 | 150 | ref = item["metadata"]["ownerReferences"].first 151 | next if SKIP_POD_OWNERS.include?(ref["kind"]) 152 | end 153 | 154 | if item["kind"] == "Job" && item.dig("metadata", "ownerReferences") 155 | if item["metadata"]["ownerReferences"].size > 1 156 | puts YAML.dump(item) 157 | raise "many ownerReferences" 158 | end 159 | ref = item["metadata"]["ownerReferences"].first 160 | next if SKIP_JOB_OWNERS.include?(ref["kind"]) 161 | end 162 | 163 | if item["kind"] == "Endpoints" 164 | if item["subsets"] && item["subsets"][0] 165 | if addresses = item["subsets"][0]["addresses"] || addresses = item["subsets"][0]["notReadyAddresses"] 166 | if addresses[0] && addresses[0]["targetRef"] && addresses[0]["targetRef"]["kind"] == "Pod" 167 | # skip endpoints created by services 168 | next 169 | end 170 | end 171 | end 172 | end 173 | 174 | namespace = item.dig("metadata", "namespace") 175 | 176 | if skip_namespaces.include?(namespace) 177 | name = item.dig("metadata", "name") 178 | logger.info "skip resource #{namespace}/#{item["kind"]}/#{name} by namespace filter" 179 | next 180 | end 181 | 182 | if only_namespaces && !only_namespaces.include?(namespace) 183 | name = item.dig("metadata", "name") 184 | logger.info "skip resource #{namespace}/#{item["kind"]}/#{name} by namespace filter" 185 | next 186 | end 187 | 188 | if skip_object?(item, skip_patterns) 189 | name = item.dig("metadata", "name") 190 | logger.info "skip resource #{namespace}/#{item["kind"]}/#{name}" 191 | next 192 | end 193 | 194 | clean_resource!(item) 195 | item = sort_keys!(item) 196 | writter.write_ns_res(item) 197 | end 198 | end 199 | 200 | Plugins::Grafana.new(writter).run 201 | 202 | writter.print_changed_files 203 | end 204 | 205 | def self.kubectl(command, resource, options = {}) 206 | options[:o] ||= 'json' 207 | 208 | args = options.to_a.map do |key, value| 209 | key = key.to_s 210 | key = "-#{key.size > 1 ? "-" : ""}#{key}" 211 | 212 | if value.nil? 213 | [key] 214 | else 215 | [key, "#{value}"] 216 | end 217 | end.flatten 218 | 219 | res = cmd("kubectl", command, resource, *args, ENV.to_h) 220 | 221 | if !res[:success] 222 | logger.error res[:stderr] 223 | end 224 | 225 | if res[:stdout] && res[:stdout].size > 0 226 | JSON.parse(res[:stdout]) 227 | else 228 | {"items" => []} # dummy 229 | end 230 | end 231 | 232 | def self.clean_resource!(resource) 233 | resource.delete("status") 234 | 235 | if resource["metadata"] 236 | resource["metadata"].delete("creationTimestamp") 237 | resource["metadata"].delete("selfLink") 238 | resource["metadata"].delete("uid") 239 | resource["metadata"].delete("resourceVersion") 240 | resource["metadata"].delete("generation") 241 | 242 | if resource["metadata"]["annotations"] 243 | resource["metadata"]["annotations"].delete("kubectl.kubernetes.io/last-applied-configuration") 244 | resource["metadata"]["annotations"].delete("control-plane.alpha.kubernetes.io/leader") 245 | resource["metadata"]["annotations"].delete("deployment.kubernetes.io/revision") 246 | 247 | if resource["metadata"]["annotations"] == {} 248 | resource["metadata"].delete("annotations") 249 | end 250 | end 251 | 252 | if resource["metadata"]["namespace"] == '' 253 | resource["metadata"].delete("namespace") 254 | end 255 | 256 | if resource["metadata"] == {} 257 | resource.delete("metadata") 258 | end 259 | end 260 | 261 | if resource["kind"] == "Service" && resource["spec"] 262 | if resource["spec"]["clusterIP"] != "None" 263 | resource["spec"].delete("clusterIP") 264 | end 265 | if resource["spec"] == {} 266 | resource.delete("spec") 267 | end 268 | end 269 | 270 | if resource["kind"] == "Pod" 271 | resource["spec"].delete("nodeName") 272 | resource["spec"].delete("tolerations") 273 | 274 | _cleanup_pod_spec(resource["spec"]) 275 | end 276 | 277 | if resource["kind"] == "Deployment" || resource["kind"] == "DaemonSet" || resource["kind"] == "StatefulSet" 278 | meta = resource.dig('spec', 'template', 'metadata') 279 | if meta.has_key?('creationTimestamp') && meta['creationTimestamp'].nil? 280 | meta.delete('creationTimestamp') 281 | if meta == {} 282 | resource['spec']['template'].delete('metadata') 283 | end 284 | end 285 | if resource['spec']['progressDeadlineSeconds'] == 600 286 | resource['spec'].delete('progressDeadlineSeconds') 287 | end 288 | _cleanup_pod_spec(resource.dig('spec', 'template', 'spec')) 289 | end 290 | 291 | if resource["kind"] == "CronJob" 292 | meta = resource.dig('spec', 'jobTemplate', 'metadata') 293 | if meta.has_key?('creationTimestamp') && meta['creationTimestamp'].nil? 294 | meta.delete('creationTimestamp') 295 | if meta == {} 296 | resource['spec']['jobTemplate'].delete('metadata') 297 | end 298 | end 299 | if resource['spec']['progressDeadlineSeconds'] == 600 300 | resource['spec'].delete('progressDeadlineSeconds') 301 | end 302 | _cleanup_pod_spec(resource.dig('spec', 'jobTemplate', 'spec')) 303 | end 304 | 305 | 306 | resource 307 | end 308 | 309 | def self._cleanup_pod_spec(pod_spec) 310 | if pod_spec['restartPolicy'] == "Always" 311 | pod_spec.delete('restartPolicy') 312 | end 313 | if pod_spec['schedulerName'] == "default-scheduler" 314 | pod_spec.delete('schedulerName') 315 | end 316 | if pod_spec['securityContext'] == {} 317 | pod_spec.delete('securityContext') 318 | end 319 | if pod_spec['terminationGracePeriodSeconds'] == 30 320 | pod_spec.delete('terminationGracePeriodSeconds') 321 | end 322 | if pod_spec['dnsPolicy'] == 'ClusterFirst' 323 | pod_spec.delete('dnsPolicy') 324 | end 325 | 326 | (pod_spec["containers"] || []).each do |container| 327 | _cleanup_container(container) 328 | end 329 | end 330 | 331 | def self._cleanup_container(container) 332 | if container['terminationMessagePath'] == "/dev/termination-log" 333 | container.delete('terminationMessagePath') 334 | end 335 | if container['terminationMessagePolicy'] == "File" 336 | container.delete('terminationMessagePolicy') 337 | end 338 | end 339 | 340 | def self.sort_keys!(resource) 341 | resource.sort_by do |k, v| 342 | if k == "apiVersion" 343 | "_0" 344 | elsif k == "kind" 345 | "_1" 346 | elsif k == "metadata" 347 | "_2" 348 | elsif k == "type" 349 | "_3" 350 | else 351 | k 352 | end 353 | end.to_h 354 | end 355 | 356 | def self.combine_types(types, extras:, exclude:, only:) 357 | if only 358 | return only.downcase.split(",").map(&:strip).map(&:to_sym) 359 | end 360 | 361 | if extras 362 | extras = extras.downcase.split(",").map(&:strip).map(&:to_sym) 363 | types.push(*extras) 364 | end 365 | 366 | if exclude 367 | exclude = exclude.downcase.split(",").map(&:strip).map(&:to_sym) 368 | types.delete_if {|r| exclude.include?(r) } 369 | end 370 | 371 | types 372 | end 373 | 374 | def self.skip_object?(item, patterns) 375 | return false if patterns.size == 0 376 | 377 | ns = item.dig("metadata", "namespace") 378 | ns = nil if ns == '' 379 | 380 | object_parts = [ns, item["kind"], item.dig("metadata", "name")].compact 381 | 382 | patterns.each do |pattern| 383 | pattern = pattern.downcase 384 | 385 | if pattern == object_parts.join("/").downcase 386 | return true 387 | end 388 | 389 | pattern_parts = pattern.split("/") 390 | mismatch = false 391 | object_parts.each_with_index do |part, index| 392 | if pattern_parts[index] == "*" || part.downcase == pattern_parts[index] 393 | # good 394 | else 395 | mismatch = true 396 | end 397 | end 398 | 399 | return true if !mismatch 400 | end 401 | 402 | return false 403 | end 404 | 405 | def self.push_changes!(options) 406 | writter = Writter.new(options) 407 | 408 | changes_list = writter.get_changes 409 | 410 | if changes_list 411 | changes_lines = changes_list.split("\n") 412 | namespaces = [] 413 | resources = [] 414 | 415 | prefix = options[:git_prefix] ? options[:git_prefix].sub(/\/$/, '') + "/" : false 416 | 417 | changes_lines.each do |line| 418 | line = line.strip.gsub('"', '') 419 | info = line.match(/^(?.+?)\s+(?.+)$/) 420 | info["file"].sub!(prefix, '') if prefix 421 | file_parts = info["file"].sub(/\.yaml$/, '').split("/") 422 | 423 | if file_parts[0] != "_global_" 424 | namespaces << file_parts[0] 425 | end 426 | resources << file_parts[1] 427 | end 428 | namespaces.uniq! 429 | resources.uniq! 430 | 431 | message = [ 432 | "Updated", 433 | resources.size > 0 ? "#{resources.join(", ")}" : nil, 434 | namespaces.size > 0 ? "in namespace#{namespaces.size > 1 ? "s" : ""} #{namespaces.join(", ")}." : nil, 435 | "#{changes_lines.size} item#{changes_lines.size > 1 ? "s" : ""}" 436 | ].compact.join(" ") 437 | 438 | writter.push_changes!(message) 439 | end 440 | end 441 | 442 | end 443 | -------------------------------------------------------------------------------- /lib/kube_backup/cli.rb: -------------------------------------------------------------------------------- 1 | require 'commander' 2 | 3 | class KubeBackup::CLI 4 | include Commander::Methods 5 | 6 | DEFAULT_VALUES = {target_path: "./kube_state"} 7 | 8 | def default_args_from_env(defaults = {}) 9 | args = defaults 10 | 11 | if ENV['GIT_REPO_URL'] 12 | args[:repo_url] = ENV['GIT_REPO_URL'] 13 | end 14 | 15 | vars = [ 16 | :target_path, :skip_namespaces, :only_namespaces, 17 | :global_resources, :extra_global_resources, :skip_global_resources, 18 | :resources, :extra_resources, :skip_resources, :skip_objects, 19 | :git_user, :git_email, :git_branch, :git_prefix 20 | ] 21 | 22 | vars.each do |var| 23 | env_value = ENV[var.to_s.upcase] 24 | if env_value 25 | args[var] = env_value 26 | end 27 | end 28 | 29 | if ENV['BACKUP_VERBOSE'] 30 | args[:verbose] = ["1", "true", "yes", "ya"].include?(ENV['BACKUP_VERBOSE'].downcase) 31 | if args[:verbose] 32 | KubeBackup.verbose_logger! 33 | end 34 | end 35 | 36 | args 37 | end 38 | 39 | def run 40 | program :name, 'kube_backup' 41 | program :version, KubeBackup::VERSION 42 | program :description, 'Backup kubernetes resources to git' 43 | program :help_formatter, :compact 44 | program :help_paging, false 45 | 46 | global_option('--verbose', 'Verbose logging (env var BACKUP_VERBOSE)') { 47 | $verbose = true 48 | KubeBackup.verbose_logger! 49 | } 50 | 51 | command :backup do |c| 52 | c.syntax = 'kube_backup backup [options]' 53 | c.summary = 'Perform backup to local git repo' 54 | c.description = 'Create backup and save it in local folder' 55 | 56 | c.option '--repo-url VAL', 'Git repo URL (env var GIT_REPO_URL)' 57 | c.option '--target-path VAL', 'Local git path (env var TARGET_PATH)' 58 | 59 | c.option '--skip-namespaces VAL', 'Namespaces to skip, separated by coma' 60 | c.option '--only-namespaces VAL', 'Namespaces whitelist, separated by coma' 61 | 62 | c.option '--global-resources VAL', 'Override global resources list' 63 | c.option '--extra-global-resources VAL', 'Additional global resources' 64 | c.option '--skip-global-resources VAL', 'Global resources to exclude' 65 | 66 | c.option '--resources VAL', 'Override global resources list' 67 | c.option '--extra-resources VAL', 'Additional global resources' 68 | c.option '--skip-resources VAL', 'Resources to exclude' 69 | 70 | c.option '--skip-objects VAL', 'Skip objects, as namespaces/ObjectType/name. Also can use * for any segment as default/Secret/*,app/Pod/*' 71 | 72 | c.option '--git-branch VAL', 'Git branch, default is master (env var GIT_BRANCH)' 73 | c.option '--git-prefix VAL', 'Path to the subdirectory in your repository (env var GIT_PREFIX)' 74 | 75 | c.action do |args, options| 76 | options.default(default_args_from_env(DEFAULT_VALUES)) 77 | KubeBackup.perform_backup!(options.__hash__) 78 | end 79 | end 80 | 81 | command :push do |c| 82 | c.syntax = 'kube_backup push [options]' 83 | c.summary = 'Push changes to remote git repo' 84 | c.description = 'Commit latest changes and put to remove repository' 85 | 86 | c.option '--repo-url VAL', 'Git repo URL (env var GIT_REPO_URL)' 87 | c.option '--target VAL', 'Local git path (env var TARGET_PATH)' 88 | 89 | c.option '--git-user VAL', 'Git username for commit (env var GIT_USER)' 90 | c.option '--git-email VAL', 'Git email for commit (env var GIT_EMAIL)' 91 | c.option '--git-branch VAL', 'Git branch, default is master (env var GIT_BRANCH)' 92 | c.option '--git-prefix VAL', 'Path to the subdirectory in your repository (env var GIT_PREFIX)' 93 | 94 | c.action do |args, options| 95 | options.default(default_args_from_env(DEFAULT_VALUES)) 96 | 97 | KubeBackup.push_changes!(options.__hash__) 98 | end 99 | end 100 | 101 | default_command :backup 102 | 103 | run! 104 | end 105 | end 106 | -------------------------------------------------------------------------------- /lib/kube_backup/cmd_utils.rb: -------------------------------------------------------------------------------- 1 | require 'shellwords' 2 | require 'open3' 3 | 4 | module KubeBackup 5 | module CmdUtils 6 | def pipe_stream(from, to, buffer: nil, skip_piping: false) 7 | thread = Thread.new do 8 | begin 9 | while char = from.getc 10 | to.write(char) if !skip_piping 11 | buffer << char if buffer 12 | end 13 | rescue IOError => error 14 | #p error 15 | end 16 | 17 | #remaining = from.read 18 | #to.write(remaining) if !skip_piping 19 | #buffer << remaining if buffer 20 | end 21 | 22 | #thread.abort_on_exception = true 23 | end 24 | 25 | def record_stream(from, buffer: nil) 26 | pipe_stream(from, nil, buffer: buffer, skip_piping: true) 27 | end 28 | 29 | def cmd(command, *args) 30 | args = args.flatten 31 | 32 | env_vars = args.last.is_a?(Hash) ? args.pop : {} 33 | env_vars = env_vars.dup 34 | modified_env_vars = env_vars.dup 35 | 36 | ENV.each do |key, value| 37 | env_vars[key] ||= value 38 | end 39 | 40 | # make it always use english 41 | env_vars["LC_ALL"] = "C" 42 | 43 | escaped_args = args.map do |arg| 44 | if arg && arg.to_s.start_with?("|", ">", "<", "&") 45 | arg.to_s 46 | else 47 | Shellwords.escape(arg) 48 | end 49 | end 50 | 51 | command = "#{command} #{escaped_args.join(" ")}".strip 52 | 53 | #if verbose_logging? 54 | KubeBackup.logger.info "RUN #{command.colorize(:green)}" 55 | #end 56 | 57 | KubeBackup.logger.debug "ENV #{KubeBackup::LogUtil.hash(modified_env_vars)}" if modified_env_vars.size > 0 58 | 59 | stdout_str = "" 60 | stderr_str = "" 61 | exit_status = nil 62 | start_time = Time.now.to_f 63 | #process_error = nil 64 | #io_threads = [] 65 | 66 | stdout_str, stderr_str, exit_status = Open3.capture3(env_vars, command) 67 | 68 | if exit_status != 0 69 | KubeBackup.logger.warn "Process #{exit_status.pid} exit with code #{exit_status.exitstatus}" 70 | end 71 | 72 | # Open3.popen3(env_vars, command) do |stdin, stdout, stderr, wait_thr| 73 | # begin 74 | # #if KubeBackup.verbose_logging? 75 | # # io_threads << pipe_stream(stdout, STDOUT, buffer: stdout_str) 76 | # # io_threads << pipe_stream(stderr, STDERR, buffer: stderr_str) 77 | # #else 78 | # # io_threads << record_stream(stdout, buffer: stdout_str) 79 | # # io_threads << record_stream(stderr, buffer: stderr_str) 80 | # #end 81 | # 82 | # 83 | # puts "complet 0" 84 | # 85 | # exit_status = wait_thr.value 86 | # 87 | # puts "complete 1" 88 | # p exit_status 89 | # 90 | # stdout_str = stdout.read 91 | # stderr_str = stderr.read 92 | # 93 | # puts "complete 2" 94 | # 95 | # 96 | # rescue => error 97 | # p error 98 | # process_error = error 99 | # end 100 | # end 101 | 102 | #io_threads.each(&:value) 103 | 104 | # raise process_error if process_error 105 | 106 | { 107 | exit_code: exit_status.exitstatus, 108 | pid: exit_status.pid, 109 | stdout: stdout_str, 110 | stderr: stderr_str, 111 | success: exit_status.success?, 112 | time: Time.now.to_f - start_time 113 | } 114 | end 115 | end 116 | end 117 | -------------------------------------------------------------------------------- /lib/kube_backup/log_util.rb: -------------------------------------------------------------------------------- 1 | module KubeBackup 2 | module LogUtil 3 | extend self 4 | 5 | def dump(object) 6 | if object.is_a?(Hash) 7 | hash(object) 8 | elsif object.is_a?(Time) 9 | object.to_s 10 | #elsif object.is_a?(BigDecimal) 11 | # object.to_s("F") 12 | elsif object.is_a?(Range) && object.first.is_a?(Time) 13 | middle = object.exclude_end? ? "..." : ".." 14 | "#{object.first.to_s}#{middle}#{object.last.to_s}" 15 | else 16 | object.inspect 17 | end 18 | end 19 | 20 | def hash(object) 21 | values = [] 22 | object.each do |key, value| 23 | separator = ":" 24 | key_str = if key.is_a?(Symbol) 25 | key.to_s =~ /\s/ ? key.to_s.inspect : key.to_s 26 | else 27 | separator = " =>" unless key.is_a?(String) 28 | key.inspect 29 | end 30 | values << "#{key_str}#{separator} #{dump(value)}" 31 | end 32 | 33 | "{#{values.join(", ")}}" 34 | end 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /lib/kube_backup/logger.rb: -------------------------------------------------------------------------------- 1 | module KubeBackup 2 | module Logger 3 | 4 | def logger 5 | @logger ||= begin 6 | require 'logger' 7 | logger = ::Logger.new(STDOUT) 8 | logger.level = ::Logger::INFO 9 | 10 | logger.formatter = proc { |severity, datetime, progname, msg| 11 | res = "#{datetime.strftime("%T.%L")}: #{msg}\n" 12 | if severity == "WARN" 13 | puts res.colorize(:yellow) 14 | elsif severity == "ERROR" 15 | puts res.colorize(:red) 16 | elsif severity == "DEBUG" 17 | puts res.colorize(:light_black) 18 | else 19 | puts res 20 | end 21 | } 22 | 23 | logger 24 | end 25 | end 26 | 27 | def verbose_logger! 28 | self.logger.level = ::Logger::DEBUG 29 | end 30 | 31 | def verbose_logging? 32 | self.logger.level == ::Logger::DEBUG 33 | end 34 | 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /lib/kube_backup/plugins/grafana.rb: -------------------------------------------------------------------------------- 1 | module KubeBackup; end 2 | module KubeBackup::Plugins 3 | 4 | class Grafana 5 | 6 | class FetchError < StandardError; end 7 | 8 | def initialize(writter) 9 | @grafana_url = ENV['GRAFANA_URL'] 10 | @grafana_token = ENV['GRAFANA_TOKEN'] 11 | @writter = writter 12 | end 13 | 14 | def run 15 | if !@grafana_url || @grafana_url == '' 16 | KubeBackup.logger.info "Skip Grafana plugin" 17 | return 18 | end 19 | 20 | backup_dashboards 21 | backup_data_sources 22 | backup_frontend_settings 23 | backup_org 24 | backup_org_users 25 | rescue FetchError => error 26 | KubeBackup.logger.error("#{error.class}: #{error.message}\n#{error.backtrace.join("\n")}") 27 | @writter.restore("_grafana_") 28 | end 29 | 30 | def backup_dashboards 31 | dashboards = get_json('/api/search') 32 | #puts JSON.pretty_generate(dashboards) 33 | 34 | dashboards.each do |dashboard| 35 | if dashboard['type'] == "dash-db" || dashboard['type'] == "dash-folder" 36 | dash_name = dashboard['uri'].sub(%r{^db/}, '') 37 | 38 | KubeBackup.logger.info "Saving dashboard #{dashboard['folderTitle']}/#{dash_name}" 39 | 40 | dashboard_json = get_json("/api/dashboards/#{dashboard['uri']}") 41 | 42 | file_path = [ 43 | "_grafana_/dashboards", 44 | dashboard['folderTitle'], 45 | dash_name 46 | ].compact.join('/') 47 | @writter.write_raw("#{file_path}.json", JSON.pretty_generate(dashboard_json)) 48 | else 49 | raise "unknown type '#{dashboard['type']}'" 50 | end 51 | end 52 | end 53 | 54 | 55 | def backup_data_sources 56 | datasources = get_json('/api/datasources/') 57 | #puts JSON.pretty_generate(datasources) 58 | 59 | datasources.each do |datasource| 60 | KubeBackup.logger.info "Saving datasource #{datasource['name']}" 61 | @writter.write_raw("_grafana_/datasources/#{datasource['name']}.json", JSON.pretty_generate(datasource)) 62 | end 63 | end 64 | 65 | def backup_frontend_settings 66 | settings = get_json('/api/frontend/settings') 67 | #puts JSON.pretty_generate(settings) 68 | 69 | KubeBackup.logger.info "Saving frontend_settings" 70 | @writter.write_raw("_grafana_/frontend_settings.json", JSON.pretty_generate(settings)) 71 | end 72 | 73 | def backup_org 74 | org = get_json('/api/org') 75 | #puts JSON.pretty_generate(users) 76 | 77 | KubeBackup.logger.info "Saving organization" 78 | @writter.write_raw("_grafana_/org.json", JSON.pretty_generate(org)) 79 | end 80 | 81 | def backup_org_users 82 | users = get_json('/api/org/users') 83 | #puts JSON.pretty_generate(users) 84 | 85 | users.each do |user| 86 | KubeBackup.logger.info "Saving user #{user['login']}" 87 | user.delete('lastSeenAtAge') 88 | user.delete('lastSeenAt') 89 | @writter.write_raw("_grafana_/users/#{user['login']}.json", JSON.pretty_generate(user)) 90 | end 91 | end 92 | 93 | def get_json(path) 94 | require 'json' 95 | require 'excon' 96 | 97 | url = @grafana_url + path 98 | KubeBackup.logger.debug "GET #{url}" 99 | 100 | response = Excon.get(@grafana_url + path, { 101 | headers: { 102 | 'Authorization' => "Bearer #{@grafana_token}" 103 | } 104 | }) 105 | 106 | if response.status >= 400 107 | raise FetchError, "Can not connect to grafana: #{url} - #{response.status} #{response.status_line}" 108 | end 109 | 110 | JSON.parse(response.body) 111 | end 112 | end 113 | 114 | end 115 | -------------------------------------------------------------------------------- /lib/kube_backup/version.rb: -------------------------------------------------------------------------------- 1 | module KubeBackup 2 | VERSION = "0.1.0" 3 | end 4 | -------------------------------------------------------------------------------- /lib/kube_backup/writter.rb: -------------------------------------------------------------------------------- 1 | require 'shellwords' 2 | require 'yaml' 3 | require 'fileutils' 4 | require 'socket' 5 | 6 | module KubeBackup 7 | class Writter 8 | def initialize(options = {}) 9 | @options = options 10 | @target = options[:target_path] 11 | @git_url = options[:repo_url] 12 | @git_branch = options[:git_branch] || 'master' 13 | @git_prefix = options[:git_prefix] || '.' 14 | end 15 | 16 | def init_repo! 17 | clone_repo! 18 | remove_repo_content! 19 | end 20 | 21 | def get_changes 22 | Dir.chdir(@target) do 23 | changes = KubeBackup.cmd(%{git status --porcelain "#{@git_prefix}" --untracked-files=all}) 24 | 25 | unless changes[:success] 26 | KubeBackup.logger.error changes[:stderr] 27 | raise changes[:stderr] || "git status error" 28 | end 29 | 30 | if changes[:stdout] == '' 31 | KubeBackup.logger.info "No changes" 32 | return false 33 | else 34 | puts changes[:stdout] 35 | return changes[:stdout] 36 | end 37 | end 38 | end 39 | 40 | def push_changes!(message) 41 | Dir.chdir(@target) do 42 | email = @options[:git_email] || "kube-backup@#{Socket.gethostname}" 43 | name = @options[:git_name] || "kube-backup" 44 | 45 | run_cmd! %{git config user.email "#{email}"} 46 | run_cmd! %{git config user.name "#{name}"} 47 | 48 | run_cmd! %{git add "#{@git_prefix}" --all} 49 | run_cmd! %{git commit -m "#{message}"} 50 | 51 | res = run_cmd! %{git push origin "#{@git_branch}"} 52 | 53 | KubeBackup.logger.error res[:stdout] if res[:stdout] != '' 54 | KubeBackup.logger.error res[:stderr] if res[:stderr] != '' 55 | end 56 | end 57 | 58 | def run_cmd!(command) 59 | res = KubeBackup.cmd(command) 60 | 61 | unless res[:success] 62 | KubeBackup.logger.error res[:stdout] if res[:stdout] != '' 63 | KubeBackup.logger.error res[:stderr] if res[:stderr] != '' 64 | raise res[:stderr] || "git command error" 65 | end 66 | 67 | res 68 | end 69 | 70 | def print_changed_files 71 | Dir.chdir(@target) do 72 | res = KubeBackup.cmd(%{git status --porcelain "#{@git_prefix}"}) 73 | if res[:stdout] == '' 74 | KubeBackup.logger.info "No changes" 75 | else 76 | KubeBackup.logger.info "Changes:\n#{res[:stdout]}" 77 | end 78 | end 79 | end 80 | 81 | def write_res(resource) 82 | type = resource["kind"] 83 | write_yaml("_global_/#{type}/#{resource["metadata"]["name"]}.yaml", resource) 84 | end 85 | 86 | def write_raw(path, content) 87 | full_path = File.join(@target, @git_prefix, path) 88 | full_path.gsub!(":", "_") 89 | 90 | dirname = File.dirname(full_path) 91 | 92 | if dirname != @target 93 | FileUtils.mkdir_p(dirname) 94 | end 95 | 96 | File.open(full_path, 'w') do |f| 97 | f.write(content) 98 | end 99 | end 100 | 101 | def write_ns_res(resource) 102 | ns = resource["metadata"]["namespace"] 103 | type = resource["kind"] #gsub(/(.)([A-Z])/,'\1_\2').downcase 104 | 105 | write_yaml("#{ns}/#{type}/#{resource["metadata"]["name"]}.yaml", resource) 106 | end 107 | 108 | def write_yaml(path, data) 109 | full_path = File.join(@target, @git_prefix, path) 110 | full_path.gsub!(":", "_") 111 | 112 | dirname = File.dirname(full_path) 113 | 114 | if dirname != @target 115 | FileUtils.mkdir_p(dirname) 116 | end 117 | 118 | File.open(full_path, 'w') do |f| 119 | f.write(YAML.dump(data)) 120 | end 121 | end 122 | 123 | def restore(path) 124 | full_path = File.join(@git_prefix, path) 125 | 126 | Dir.chdir(@target) do 127 | res = KubeBackup.cmd(%{git checkout -f HEAD -- #{Shellwords.escape(full_path)}}) 128 | if res[:success] 129 | KubeBackup.logger.info "Restored #{full_path} from HEAD" 130 | else 131 | KubeBackup.logger.error res[:stderr] 132 | raise res[:stderr] || "git reset error" 133 | end 134 | end 135 | end 136 | 137 | def remove_repo_content! 138 | objects = Dir.entries(File.join(@target, @git_prefix)).map do |object| 139 | if object.start_with?(".") 140 | nil 141 | else 142 | File.join(@target, object) 143 | end 144 | end.compact 145 | 146 | FileUtils.rm_r(objects, verbose: false) 147 | end 148 | 149 | def clone_repo! 150 | check_known_hosts! 151 | 152 | res = KubeBackup.cmd(%{git clone -b "#{@git_branch}" --depth 10 "#{@git_url}" "#{@target}"}) 153 | FileUtils.mkdir_p(File.join(@target, @git_prefix)) 154 | 155 | unless res[:success] 156 | KubeBackup.logger.error(res[:stderr]) 157 | if res[:stderr] =~ /Remote branch #{@git_branch} not found in upstream origin/ 158 | Dir.chdir(@target) do 159 | KubeBackup.logger.info("Init new repo..") 160 | cmd_res = KubeBackup.cmd(%{git init .}) 161 | KubeBackup.logger.error(res[:stderr]) unless cmd_res[:success] 162 | cmd_res = KubeBackup.cmd(%{git remote add origin "#{@git_url}"}) 163 | KubeBackup.logger.error(res[:stderr]) unless cmd_res[:success] 164 | end 165 | else 166 | raise res[:stderr] || "git clone error" 167 | end 168 | end 169 | end 170 | 171 | def check_known_hosts! 172 | git_host = if m = @git_url.match(/.+@(.+?):/) 173 | m[1] 174 | elsif m = @git_url.match(/https?:\/\/(.+?)\//) 175 | m[1] 176 | else 177 | KubeBackup.logger.warn "Can't parse git url, skip ssh-keyscan" 178 | nil 179 | end 180 | 181 | if git_host 182 | known_hosts = "#{ENV['HOME']}/.ssh/known_hosts" 183 | 184 | if File.exist?(known_hosts) 185 | content = File.open(known_hosts, 'r:utf-8', &:read) 186 | if content.split("\n").any? {|l| l.strip.start_with?("#{git_host},", "#{git_host} ") } 187 | KubeBackup.logger.info "File #{known_hosts} already contain #{git_host}" 188 | return 189 | end 190 | end 191 | 192 | res = KubeBackup.cmd(%{ssh-keyscan -H #{git_host} >> #{known_hosts}}) 193 | 194 | if res[:success] 195 | KubeBackup.logger.info "Added #{git_host} to #{known_hosts}" 196 | else 197 | KubeBackup.logger.error res[:stderr] 198 | raise res[:stderr] || "git clone error" 199 | end 200 | end 201 | end 202 | end 203 | end 204 | -------------------------------------------------------------------------------- /test/cleanup_test.rb: -------------------------------------------------------------------------------- 1 | require_relative 'test_helper' 2 | 3 | describe "KubeBackup.clean_resource!" do 4 | it "should cleanup pod" do 5 | pod = load_sample(:single_pod) 6 | cleaned = load_sample(:single_pod) 7 | 8 | KubeBackup.clean_resource!(cleaned) 9 | 10 | assert_equal(pod.keys - cleaned.keys, ["status"]) 11 | assert_equal(pod['spec'].keys - cleaned['spec'].keys, [ 12 | "dnsPolicy", "nodeName", "restartPolicy", "schedulerName", 13 | "securityContext", "terminationGracePeriodSeconds", "tolerations" 14 | ]) 15 | assert_equal(pod['spec']['containers'][0].keys - cleaned['spec']['containers'][0].keys, [ 16 | "terminationMessagePath", "terminationMessagePolicy" 17 | ]) 18 | end 19 | 20 | it "should cleanup deployment" do 21 | deploy = load_sample(:deployment) 22 | cleaned = load_sample(:deployment) 23 | 24 | KubeBackup.clean_resource!(cleaned) 25 | 26 | assert_equal(deploy.keys - cleaned.keys, ["status"]) 27 | assert_equal(deploy['spec'].keys - cleaned['spec'].keys, ["progressDeadlineSeconds"]) 28 | assert_equal(deploy['spec'].keys - cleaned['spec'].keys, ["progressDeadlineSeconds"]) 29 | assert_equal(deploy['spec']['template']['spec'].keys - cleaned['spec']['template']['spec'].keys, [ 30 | "dnsPolicy", "restartPolicy", "schedulerName", 31 | "securityContext", "terminationGracePeriodSeconds" 32 | ]) 33 | end 34 | end 35 | -------------------------------------------------------------------------------- /test/samples/config_map.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | data: 4 | DATABASE_NAME: my_app 5 | kind: ConfigMap 6 | metadata: 7 | name: general-config 8 | namespace: app 9 | -------------------------------------------------------------------------------- /test/samples/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/revision: "2" 6 | creationTimestamp: 2019-12-07T16:47:57Z 7 | generation: 7 8 | name: rabbitmq 9 | namespace: garuda 10 | resourceVersion: "1332053" 11 | selfLink: /apis/apps/v1/namespaces/garuda/deployments/rabbitmq 12 | uid: 4502f9a3-9280-484b-b4a1-a3ed58efd10f 13 | spec: 14 | progressDeadlineSeconds: 600 15 | replicas: 1 16 | revisionHistoryLimit: 10 17 | selector: 18 | matchLabels: 19 | app: rabbitmq 20 | strategy: 21 | type: Recreate 22 | template: 23 | metadata: 24 | creationTimestamp: null 25 | labels: 26 | app: rabbitmq 27 | spec: 28 | containers: 29 | - args: 30 | - chown -R rabbitmq:rabbitmq /etc/rabbitmq/ ; docker-entrypoint.sh rabbitmq-server 31 | command: 32 | - /bin/sh 33 | - -c 34 | env: 35 | - name: RABBITMQ_NODENAME 36 | value: rabbitmq 37 | - name: RABBITMQ_ERL_COOKIE 38 | value: OWWCHZMTPYXSATTIVCJG 39 | image: rabbitmq:3.8.2-management-alpine 40 | imagePullPolicy: IfNotPresent 41 | name: rabbitmq 42 | ports: 43 | - containerPort: 15672 44 | name: rabbitmq-admin 45 | protocol: TCP 46 | - containerPort: 5672 47 | name: rabbitmq 48 | protocol: TCP 49 | readinessProbe: 50 | exec: 51 | command: 52 | - rabbitmqctl 53 | - status 54 | failureThreshold: 3 55 | initialDelaySeconds: 30 56 | periodSeconds: 90 57 | successThreshold: 1 58 | timeoutSeconds: 15 59 | resources: 60 | limits: 61 | cpu: 150m 62 | memory: 300Mi 63 | requests: 64 | cpu: 100m 65 | memory: 300Mi 66 | terminationMessagePath: /dev/termination-log 67 | terminationMessagePolicy: File 68 | volumeMounts: 69 | - mountPath: /var/lib/rabbitmq 70 | name: rabbitmq-storage 71 | dnsPolicy: ClusterFirst 72 | hostname: rabbitmq-rc1 73 | restartPolicy: Always 74 | schedulerName: default-scheduler 75 | securityContext: {} 76 | terminationGracePeriodSeconds: 30 77 | volumes: 78 | - hostPath: 79 | path: /opt/rabbitmq 80 | type: "" 81 | name: rabbitmq-storage 82 | status: 83 | availableReplicas: 1 84 | conditions: 85 | - lastTransitionTime: 2019-12-07T16:47:57Z 86 | lastUpdateTime: 2019-12-14T18:12:57Z 87 | message: ReplicaSet "rabbitmq-5565966f45" has successfully progressed. 88 | reason: NewReplicaSetAvailable 89 | status: "True" 90 | type: Progressing 91 | - lastTransitionTime: 2019-12-14T18:21:16Z 92 | lastUpdateTime: 2019-12-14T18:21:16Z 93 | message: Deployment has minimum availability. 94 | reason: MinimumReplicasAvailable 95 | status: "True" 96 | type: Available 97 | observedGeneration: 7 98 | readyReplicas: 1 99 | replicas: 1 100 | updatedReplicas: 1 -------------------------------------------------------------------------------- /test/samples/single_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | annotations: 5 | kubectl.kubernetes.io/last-applied-configuration: | 6 | {"apiVersion":"v1","kind":"Pod","metadata":{"annotations":{},"name":"kt","namespace":"app"},"spec":{"containers":[{"args":["sleep 1000000"],"command":["/bin/sh","-c"],"image":"evpavel/kt:0.4","name":"kt","resources":{"limits":{"cpu":"100m","memory":"300Mi"},"requests":{"cpu":"100m","memory":"200Mi"}}}]}} 7 | creationTimestamp: 2019-01-24T06:12:51Z 8 | name: kt 9 | namespace: app 10 | resourceVersion: "173803040" 11 | selfLink: /api/v1/namespaces/app/pods/kt 12 | uid: 14ba4c6d-1f9f-11e9-be97-00505600fb02 13 | spec: 14 | containers: 15 | - args: 16 | - sleep 1000000 17 | command: 18 | - /bin/sh 19 | - -c 20 | image: evpavel/kt:0.4 21 | imagePullPolicy: IfNotPresent 22 | name: kt 23 | resources: 24 | limits: 25 | cpu: 100m 26 | memory: 300Mi 27 | requests: 28 | cpu: 100m 29 | memory: 200Mi 30 | terminationMessagePath: /dev/termination-log 31 | terminationMessagePolicy: File 32 | volumeMounts: 33 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 34 | name: default-token-vg27v 35 | readOnly: true 36 | dnsPolicy: ClusterFirst 37 | nodeName: node-a1 38 | restartPolicy: Always 39 | schedulerName: default-scheduler 40 | securityContext: {} 41 | serviceAccount: default 42 | serviceAccountName: default 43 | terminationGracePeriodSeconds: 30 44 | tolerations: 45 | - effect: NoExecute 46 | key: node.alpha.kubernetes.io/notReady 47 | operator: Exists 48 | tolerationSeconds: 300 49 | - effect: NoExecute 50 | key: node.alpha.kubernetes.io/unreachable 51 | operator: Exists 52 | tolerationSeconds: 300 53 | volumes: 54 | - name: default-token-vg27v 55 | secret: 56 | defaultMode: 420 57 | secretName: default-token-vg27v 58 | status: 59 | conditions: 60 | - lastProbeTime: null 61 | lastTransitionTime: 2019-01-24T06:12:51Z 62 | status: "True" 63 | type: Initialized 64 | - lastProbeTime: null 65 | lastTransitionTime: 2019-03-23T03:06:32Z 66 | status: "True" 67 | type: Ready 68 | - lastProbeTime: null 69 | lastTransitionTime: 2019-01-24T06:12:51Z 70 | status: "True" 71 | type: PodScheduled 72 | containerStatuses: 73 | - containerID: docker://61941b15a5564bc4876fca83801d798c1f8ff91647f117fe109aa45227b8670d 74 | image: evpavel/kt:0.4 75 | imageID: docker-pullable://evpavel/kt@sha256:8e3d6197fbc75264e6271c0f9bdc7e8cc911ac41a9caa0d00541a8be6ab85a41 76 | lastState: 77 | terminated: 78 | containerID: docker://2349193cf8738648a359cc0f32edf5184d5f596f985269820fd4941f5c1beb0f 79 | exitCode: 0 80 | finishedAt: 2019-03-23T03:06:30Z 81 | reason: Completed 82 | startedAt: 2019-03-11T13:19:50Z 83 | name: kt 84 | ready: true 85 | restartCount: 5 86 | state: 87 | running: 88 | startedAt: 2019-03-23T03:06:32Z 89 | hostIP: 123.123.123.123 90 | phase: Running 91 | podIP: 123.123.123.123 92 | qosClass: Burstable 93 | startTime: 2019-01-24T06:12:51Z 94 | -------------------------------------------------------------------------------- /test/sort_keys_test.rb: -------------------------------------------------------------------------------- 1 | require_relative 'test_helper' 2 | 3 | describe "KubeBackup.sort_keys!" do 4 | it "should sort keys in configmap" do 5 | config_map = load_sample(:config_map) 6 | 7 | config_map = KubeBackup.sort_keys!(config_map) 8 | 9 | assert_equal(config_map.keys, ["apiVersion", "kind", "metadata", "data"]) 10 | end 11 | end 12 | -------------------------------------------------------------------------------- /test/test_helper.rb: -------------------------------------------------------------------------------- 1 | require_relative '../lib/kube_backup' 2 | 3 | gem 'minitest' 4 | 5 | require 'minitest/autorun' 6 | require 'yaml' 7 | 8 | def load_sample(file) 9 | YAML::load_file(File.join(__dir__, "samples/#{file}.yaml")) 10 | end 11 | --------------------------------------------------------------------------------