├── spec
├── fixtures
│ ├── stacks
│ │ ├── empty
│ │ │ ├── empty.yml
│ │ │ └── test.yml
│ │ ├── test
│ │ │ └── test.yml
│ │ └── multidoc
│ │ │ └── resources
│ │ │ └── multidoc.yml
│ ├── cluster.minimal.yml
│ ├── yaml
│ │ └── erb
│ │ │ ├── no_erb.yml
│ │ │ ├── with_erb_no_extension.yml
│ │ │ ├── with_unknown_variable.yml.erb
│ │ │ ├── with_unknown_variable_conditional.yml.erb
│ │ │ ├── with_unknown_variable_assignment.yml.erb
│ │ │ └── with_erb.yml.erb
│ ├── cluster.yml
│ ├── cluster.yml.erb
│ ├── cluster.master_and_worker.yml
│ ├── secrets_cfg_no_encryption.yaml
│ ├── cluster_with_repos.yml
│ ├── secrets_cfg.yaml
│ └── terraform
│ │ ├── tf.json
│ │ ├── with_addons.json
│ │ └── with_api_endpoint.json
├── pharos
│ ├── configuration
│ │ ├── os_release_spec.rb
│ │ └── cpu_arch_spec.rb
│ ├── phases
│ │ ├── migrate_master_spec.rb
│ │ ├── migrate_worker_spec.rb
│ │ ├── upgrade_master_spec.rb
│ │ ├── validate_configuration_changes_spec.rb
│ │ └── join_node_spec.rb
│ ├── version_command_spec.rb
│ ├── cluster_manager_spec.rb
│ ├── kubeadm
│ │ ├── init_config_spec.rb
│ │ └── kubeproxy_config_spec.rb
│ ├── terraform
│ │ ├── json_parser_spec.rb
│ │ └── legacy_json_parser_spec.rb
│ ├── phase_spec.rb
│ ├── command_options
│ │ └── tf_json_spec.rb
│ └── host
│ │ └── ubuntu
│ │ └── ubuntu_xenial_spec.rb
└── support
│ ├── fixtures_helper.rb
│ └── exit_with_error_helper.rb
├── Gemfile
├── .dockerignore
├── .rspec
├── Rakefile
├── examples
├── terraform-packet
│ ├── versions.tf
│ ├── cluster.yml
│ ├── packet-cloud-config-secret.yaml.tpl
│ ├── terraform.example.tfvars
│ └── README.md
├── terraform-aws
│ ├── cluster.yml
│ ├── README.md
│ ├── variables.tf
│ ├── output.tf
│ └── iam.tf
├── vagrant
│ ├── ubuntu
│ │ ├── etcd_certs
│ │ │ ├── ca-csr.json
│ │ │ ├── client.json
│ │ │ ├── config.json
│ │ │ ├── client-key.pem
│ │ │ ├── server-key.pem
│ │ │ ├── client.csr
│ │ │ ├── server.csr
│ │ │ ├── client.pem
│ │ │ ├── ca.csr
│ │ │ ├── server.pem
│ │ │ ├── ca-config.json
│ │ │ └── ca.pem
│ │ ├── proxy-only.sh
│ │ ├── block-ssh.sh
│ │ ├── cluster.yml
│ │ └── cluster-external-etcd.yml
│ └── centos7
│ │ ├── cluster.yml
│ │ └── Vagrantfile
├── terraform-do
│ ├── flannel
│ │ ├── 03-service-account.yml
│ │ ├── 02-cluster-role-binding.yml
│ │ ├── 01-cluster-role.yml
│ │ └── 04-config-map.yml.erb
│ ├── cluster.yml
│ ├── cluster_custom_network.yml
│ ├── pharos-addons
│ │ └── do-csi
│ │ │ ├── resources
│ │ │ └── 00-secret.yml.erb
│ │ │ └── addon.rb
│ └── README.md
└── authentication-token-webhook
│ ├── deploy
│ ├── cluster_role_binding.yml
│ └── daemonset.yml
│ └── README.md
├── bin
├── pharos-cluster
└── pharos
├── lib
├── pharos
│ ├── resources
│ │ ├── packet
│ │ │ ├── 01-serviceaccount.yml
│ │ │ ├── 03-clusterrolebinding.yml
│ │ │ └── 02-clusterrole.yml
│ │ ├── node_local_dns
│ │ │ ├── 00-service_account.yml
│ │ │ └── 01-configmap.yml.erb
│ │ ├── helm-controller
│ │ │ ├── 01-service-account.yml
│ │ │ ├── 00-crd.yml
│ │ │ ├── 02-cluster-role-binding.yml
│ │ │ └── 03-deployment.yml
│ │ ├── pharos
│ │ │ ├── 00-service-account.yml
│ │ │ ├── 13-cluster-role-pharos-cloud-controller.yml
│ │ │ ├── 11-cluster-role-binding-pvl-controller.yml
│ │ │ ├── 10-cluster-role-binding-node-controller.yml
│ │ │ ├── 12-cluster-role-binding-ccm.yml
│ │ │ ├── 03-cluster-role-pvl-controller.yml
│ │ │ ├── 14-cluster-role-binding-pharos-cloud-controller.yml
│ │ │ ├── 02-cluster-role-node-controller.yml
│ │ │ ├── 01-cluster-role-controller-manager.yml
│ │ │ └── 20-deployment.yml.erb
│ │ ├── metrics-server
│ │ │ ├── 01-metrics-server-service-account.yml
│ │ │ ├── metrics-apiservice.yml
│ │ │ ├── 02-auth-delegator.yml
│ │ │ ├── metrics-server-service.yml
│ │ │ ├── auth-reader.yml
│ │ │ ├── 03-cluster-role.yml
│ │ │ ├── cluster-role-binding.yml
│ │ │ └── metrics-server-deployment.yml.erb
│ │ ├── kubelet_rubber_stamp
│ │ │ ├── 02-service-account.yml
│ │ │ ├── 03-cluster-role-binding.yml
│ │ │ ├── 01-role.yml
│ │ │ └── 04-deployment.yml.erb
│ │ ├── weave
│ │ │ ├── 02-service-account.yml
│ │ │ ├── 06-flying-shuttle-cm.yml.erb
│ │ │ ├── 05-cluster-role-binding.yml
│ │ │ ├── 04-role-binding.yml
│ │ │ ├── 03-role.yml
│ │ │ ├── 01-cluster-role.yml.erb
│ │ │ └── 10-service.yml
│ │ ├── firewalld
│ │ │ ├── ipset.xml.erb
│ │ │ └── service.xml.erb
│ │ ├── secrets
│ │ │ └── encryption-config.yml.erb
│ │ ├── priority_classes
│ │ │ └── pharos-cluster-critical.yml
│ │ ├── audit
│ │ │ └── webhook-config.yml.erb
│ │ ├── calico
│ │ │ ├── 21-metrics-service.yml.erb
│ │ │ ├── 20-configmap.yml.erb
│ │ │ └── 30-controller-deployment.yml.erb
│ │ ├── psp
│ │ │ ├── 99-default-role.yml.erb
│ │ │ ├── 01-privileged-role.yml
│ │ │ ├── 99-default-role-binding.yml
│ │ │ ├── 02-privileged-role-binding.yml
│ │ │ ├── 00-privileged-psp.yml
│ │ │ └── 99-restricted-psp.yml
│ │ └── csi-crds
│ │ │ └── csidriver.yml
│ ├── types.rb
│ ├── scripts
│ │ ├── disable-firewalld.sh
│ │ ├── kubeadm-init.sh
│ │ ├── configure-kube.sh
│ │ ├── kubeadm-renew-certs.sh
│ │ ├── install-kubeadm.sh
│ │ ├── migrations
│ │ │ ├── migrate_worker_05_to_06.sh
│ │ │ └── migrate_master_05_to_06.sh
│ │ ├── kubeadm-reconfigure.sh
│ │ ├── wait-etcd.sh
│ │ ├── configure-weave-cni.sh
│ │ ├── configure-firewalld.sh
│ │ ├── configure-kubelet-proxy.sh
│ │ └── configure-etcd-ca.sh
│ ├── host
│ │ ├── ubuntu
│ │ │ ├── scripts
│ │ │ │ ├── configure-cfssl.sh
│ │ │ │ ├── configure-netfilter.sh
│ │ │ │ ├── upgrade-kubeadm.sh
│ │ │ │ ├── install-kube-packages.sh
│ │ │ │ ├── configure-essentials.sh
│ │ │ │ ├── configure-firewalld.sh
│ │ │ │ ├── ensure-kubelet.sh
│ │ │ │ ├── configure-containerd.sh
│ │ │ │ ├── reset.sh
│ │ │ │ └── configure-docker.sh
│ │ │ ├── ubuntu_focal.rb
│ │ │ └── ubuntu_bionic.rb
│ │ ├── debian
│ │ │ ├── scripts
│ │ │ │ ├── configure-cfssl.sh
│ │ │ │ ├── configure-netfilter.sh
│ │ │ │ ├── upgrade-kubeadm.sh
│ │ │ │ ├── install-kube-packages.sh
│ │ │ │ ├── configure-firewalld.sh
│ │ │ │ ├── ensure-kubelet.sh
│ │ │ │ ├── configure-essentials.sh
│ │ │ │ ├── reset.sh
│ │ │ │ ├── configure-containerd.sh
│ │ │ │ └── configure-docker.sh
│ │ │ └── debian.rb
│ │ └── el7
│ │ │ ├── scripts
│ │ │ ├── configure-cfssl.sh
│ │ │ ├── configure-netfilter.sh
│ │ │ ├── upgrade-kubeadm.sh
│ │ │ ├── configure-firewalld.sh
│ │ │ ├── configure-containerd.sh
│ │ │ ├── install-kube-packages.sh
│ │ │ ├── ensure-kubelet.sh
│ │ │ ├── configure-docker.sh
│ │ │ └── reset.sh
│ │ │ ├── centos7.rb
│ │ │ └── rhel7.rb
│ ├── kubeadm.rb
│ ├── version.rb
│ ├── configuration
│ │ ├── api.rb
│ │ ├── webhook_audit.rb
│ │ ├── telemetry.rb
│ │ ├── container_runtime.rb
│ │ ├── token_webhook.rb
│ │ ├── pod_security_policy.rb
│ │ ├── admission_plugin.rb
│ │ ├── kube_proxy.rb
│ │ ├── control_plane.rb
│ │ ├── taint.rb
│ │ ├── resolv_conf.rb
│ │ ├── repository.rb
│ │ ├── struct.rb
│ │ ├── audit.rb
│ │ ├── authentication.rb
│ │ ├── file_audit.rb
│ │ ├── os_release.rb
│ │ ├── etcd.rb
│ │ ├── oidc.rb
│ │ ├── kubelet.rb
│ │ ├── cpu_arch.rb
│ │ ├── bastion.rb
│ │ ├── cloud.rb
│ │ └── route.rb
│ ├── phases
│ │ ├── migrate_worker.rb
│ │ ├── configure_priority_classes.rb
│ │ ├── reset_host.rb
│ │ ├── configure_cfssl.rb
│ │ ├── mixins
│ │ │ ├── psp.rb
│ │ │ └── cluster_version.rb
│ │ ├── connect_ssh.rb
│ │ ├── configure_psp.rb
│ │ ├── delete_host.rb
│ │ ├── drain.rb
│ │ ├── pull_master_images.rb
│ │ ├── configure_kubelet_csr_approver.rb
│ │ ├── configure_bootstrap.rb
│ │ ├── migrate_master.rb
│ │ ├── configure_custom_network.rb
│ │ ├── join_node.rb
│ │ ├── configure_helm_controller.rb
│ │ ├── configure_etcd_ca.rb
│ │ ├── reconfigure_kubelet.rb
│ │ ├── configure_metrics.rb
│ │ ├── label_node.rb
│ │ ├── validate_configuration_changes.rb
│ │ ├── configure_cloud_provider.rb
│ │ ├── store_cluster_configuration.rb
│ │ ├── apply_manifests.rb
│ │ ├── upgrade_check.rb
│ │ └── configure_client.rb
│ ├── cloud
│ │ ├── packet.rb
│ │ ├── pharos.rb
│ │ ├── hcloud.rb
│ │ ├── provider.rb
│ │ └── provider_registry.rb
│ ├── yaml_file
│ │ └── namespace.rb
│ ├── terraform_command.rb
│ ├── terraform
│ │ ├── json_parser.rb
│ │ ├── destroy_command.rb
│ │ ├── apply_command.rb
│ │ └── base_command.rb
│ ├── core-ext
│ │ ├── colorize.rb
│ │ └── string_casing.rb
│ ├── kube.rb
│ ├── command_options
│ │ ├── yes.rb
│ │ └── filtered_hosts.rb
│ ├── kubeadm
│ │ ├── kubeproxy_config.rb
│ │ └── init_config.rb
│ ├── version_command.rb
│ ├── transport
│ │ ├── local.rb
│ │ └── command
│ │ │ └── ssh.rb
│ ├── transport.rb
│ ├── error.rb
│ ├── root_command.rb
│ ├── logging.rb
│ ├── kube
│ │ └── stack.rb
│ ├── retry.rb
│ └── kubeconfig_command.rb
└── pharos_cluster.rb
├── .gitignore
├── e2e
├── digitalocean
│ ├── terraform.tfvars
│ ├── terraform-0.12
│ │ ├── terraform.tfvars
│ │ └── cluster.yml
│ └── cluster.yml
├── drone_teardown.sh
├── cluster.yml
├── drone_setup.sh
├── footloose.yaml
└── util.sh
├── Dockerfile
├── docker-compose.yaml
├── .github
└── ISSUE_TEMPLATE
│ └── enhancement.md
├── conformance
└── README.md
├── kube-bench
├── job-node.yml
├── README.md
├── job-master.yml
└── run-bench.sh
├── .rubocop.yml
├── .drone.yml
└── README.md
/spec/fixtures/stacks/empty/empty.yml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/spec/fixtures/cluster.minimal.yml:
--------------------------------------------------------------------------------
1 | addons: {}
2 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | source "https://rubygems.org"
2 |
3 | gemspec
4 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | kupo
2 | .git
3 | .pharos
4 | e2e
5 | examples
6 |
--------------------------------------------------------------------------------
/spec/fixtures/yaml/erb/no_erb.yml:
--------------------------------------------------------------------------------
1 | test:
2 | result: success
3 |
--------------------------------------------------------------------------------
/spec/fixtures/cluster.yml:
--------------------------------------------------------------------------------
1 | hosts:
2 | - address: 192.0.2.1
3 | role: master
4 |
--------------------------------------------------------------------------------
/spec/fixtures/yaml/erb/with_erb_no_extension.yml:
--------------------------------------------------------------------------------
1 | test:
2 | result: <% failure %>
3 |
--------------------------------------------------------------------------------
/.rspec:
--------------------------------------------------------------------------------
1 | --require spec_helper
2 | --order rand
3 | --format d
4 | --backtrace
5 | --profile
6 |
--------------------------------------------------------------------------------
/spec/fixtures/yaml/erb/with_unknown_variable.yml.erb:
--------------------------------------------------------------------------------
1 | test:
2 | result: <%= name_error %>
3 |
--------------------------------------------------------------------------------
/Rakefile:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require "bundler/gem_tasks"
4 | task default: :spec
5 |
--------------------------------------------------------------------------------
/examples/terraform-packet/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/spec/fixtures/yaml/erb/with_unknown_variable_conditional.yml.erb:
--------------------------------------------------------------------------------
1 | test:
2 | result: <%= local_var || 'itsnil' %>
3 |
--------------------------------------------------------------------------------
/bin/pharos-cluster:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ruby
2 | # frozen_string_literal: true
3 |
4 | load File.join(__dir__, 'pharos')
5 |
--------------------------------------------------------------------------------
/spec/fixtures/cluster.yml.erb:
--------------------------------------------------------------------------------
1 | hosts:
2 | <% for ip in ['192.0.2.1'] %>
3 | - address: <%= ip %>
4 | role: master
5 | <% end %>
6 |
--------------------------------------------------------------------------------
/spec/pharos/configuration/os_release_spec.rb:
--------------------------------------------------------------------------------
1 | require 'pharos/config'
2 |
3 | describe Pharos::Configuration::OsRelease do
4 |
5 | end
--------------------------------------------------------------------------------
/examples/terraform-aws/cluster.yml:
--------------------------------------------------------------------------------
1 | cloud:
2 | provider: aws
3 | network:
4 | weave:
5 | trusted_subnets:
6 | - 172.31.0.0/16
7 |
--------------------------------------------------------------------------------
/spec/fixtures/yaml/erb/with_unknown_variable_assignment.yml.erb:
--------------------------------------------------------------------------------
1 | test:
2 | <% name_error ||= 'success' %>
3 | result: <%= name_error %>
4 |
--------------------------------------------------------------------------------
/examples/terraform-packet/cluster.yml:
--------------------------------------------------------------------------------
1 | network:
2 | provider: calico
3 | pod_network_cidr: 172.31.0.0/16
4 | service_cidr: 172.32.0.0/16
5 |
--------------------------------------------------------------------------------
/spec/fixtures/cluster.master_and_worker.yml:
--------------------------------------------------------------------------------
1 | hosts:
2 | - address: 192.0.2.1
3 | role: master
4 | - address: 192.0.2.2
5 | role: worker
6 |
--------------------------------------------------------------------------------
/examples/vagrant/ubuntu/etcd_certs/ca-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "etcd",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 2048
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/examples/vagrant/ubuntu/etcd_certs/client.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "client",
3 | "key": {
4 | "algo": "ecdsa",
5 | "size": 256
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/examples/terraform-do/flannel/03-service-account.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: flannel
6 | namespace: kube-system
7 |
--------------------------------------------------------------------------------
/examples/vagrant/ubuntu/proxy-only.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | /sbin/iptables -A OUTPUT -p tcp --dport 80 -j DROP
4 | /sbin/iptables -A OUTPUT -p tcp --dport 443 -j DROP
--------------------------------------------------------------------------------
/lib/pharos/resources/packet/01-serviceaccount.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: cloud-controller-manager
5 | namespace: kube-system
--------------------------------------------------------------------------------
/spec/fixtures/yaml/erb/with_erb.yml.erb:
--------------------------------------------------------------------------------
1 | test:
2 | <%- if result.nil? -%>
3 | result: success
4 | <%- else -%>
5 | result: <%= result %>
6 | <%- end -%>
7 |
--------------------------------------------------------------------------------
/lib/pharos/resources/node_local_dns/00-service_account.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: node-local-dns
5 | namespace: kube-system
6 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /.bundle/
2 | /.yardoc
3 | /_yardoc/
4 | /coverage/
5 | /doc/
6 | /pkg/
7 | /spec/reports/
8 | /tmp/
9 | **/.vagrant
10 | Gemfile.lock
11 | gems.locked
12 |
--------------------------------------------------------------------------------
/lib/pharos/resources/helm-controller/01-service-account.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: helm-controller
5 | namespace: kube-system
6 |
--------------------------------------------------------------------------------
/lib/pharos/resources/pharos/00-service-account.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: cloud-controller-manager
6 | namespace: kube-system
--------------------------------------------------------------------------------
/examples/vagrant/ubuntu/block-ssh.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | /sbin/iptables -A INPUT -p tcp -s 192.168.100.103 --dport 22 -j ACCEPT
4 | /sbin/iptables -A INPUT -p tcp --dport 22 -j DROP
--------------------------------------------------------------------------------
/lib/pharos/resources/metrics-server/01-metrics-server-service-account.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: metrics-server
5 | namespace: kube-system
--------------------------------------------------------------------------------
/spec/fixtures/secrets_cfg_no_encryption.yaml:
--------------------------------------------------------------------------------
1 | kind: EncryptionConfig
2 | apiVersion: v1
3 | resources:
4 | - resources:
5 | - secrets
6 | providers:
7 | - identity: {}
--------------------------------------------------------------------------------
/lib/pharos/resources/kubelet_rubber_stamp/02-service-account.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: kubelet-rubber-stamp
5 | namespace: kube-system
6 |
--------------------------------------------------------------------------------
/lib/pharos/types.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'dry-types'
4 |
5 | module Pharos
6 | module Types
7 | include Dry::Types.module
8 | end
9 | end
10 |
--------------------------------------------------------------------------------
/lib/pharos/resources/weave/02-service-account.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: weave-net
5 | labels:
6 | name: weave-net
7 | namespace: kube-system
--------------------------------------------------------------------------------
/lib/pharos/scripts/disable-firewalld.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | if systemctl is-active --quiet firewalld; then
6 | systemctl disable firewalld
7 | systemctl stop firewalld
8 | fi
9 |
--------------------------------------------------------------------------------
/examples/terraform-do/cluster.yml:
--------------------------------------------------------------------------------
1 | network:
2 | provider: weave
3 | pod_network_cidr: 172.31.0.0/16
4 | service_cidr: 172.32.0.0/16
5 | weave:
6 | trusted_subnets:
7 | - "10.133.0.0/16"
8 |
--------------------------------------------------------------------------------
/examples/terraform-do/cluster_custom_network.yml:
--------------------------------------------------------------------------------
1 | network:
2 | provider: custom
3 | pod_network_cidr: 172.31.0.0/16
4 | service_cidr: 172.32.0.0/16
5 | custom:
6 | manifest_path: ./flannel/
7 |
--------------------------------------------------------------------------------
/lib/pharos/host/ubuntu/scripts/configure-cfssl.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | mkdir -p /opt/pharos
6 | ctr -n pharos image pull "${IMAGE}"
7 | ctr -n pharos install --path /opt/pharos --replace "${IMAGE}"
8 |
--------------------------------------------------------------------------------
/lib/pharos/kubeadm.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Kubeadm
5 | PHAROS_DIR = '/etc/pharos'
6 | end
7 | end
8 |
9 | require_relative 'kubeadm/config_generator'
10 |
--------------------------------------------------------------------------------
/spec/fixtures/cluster_with_repos.yml:
--------------------------------------------------------------------------------
1 | hosts:
2 | - address: 192.0.2.1
3 | role: master
4 | repositories:
5 | - name: test
6 | key_url: URL_TO_KEY
7 | contents: |
8 | REPO_CONTENTS
9 |
--------------------------------------------------------------------------------
/lib/pharos/host/debian/scripts/configure-cfssl.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | mkdir -p /opt/pharos
6 | ctr -n pharos image pull "${IMAGE}"
7 | ctr -n pharos install --path /opt/pharos --replace "${IMAGE}"
8 |
9 |
--------------------------------------------------------------------------------
/lib/pharos/host/el7/scripts/configure-cfssl.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | mkdir -p /opt/pharos
6 | ctr -n pharos image pull "${IMAGE}"
7 | ctr -n pharos install --path /opt/pharos --replace "${IMAGE}"
8 |
9 |
--------------------------------------------------------------------------------
/lib/pharos/version.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | VERSION = "3.2.1"
5 |
6 | def self.version
7 | VERSION
8 | end
9 |
10 | def self.oss?
11 | true
12 | end
13 | end
14 |
--------------------------------------------------------------------------------
/spec/fixtures/stacks/empty/test.yml:
--------------------------------------------------------------------------------
1 | # This ConfigMap is used to configure a self-hosted Calico installation.
2 | kind: ConfigMap
3 | apiVersion: v1
4 | metadata:
5 | name: test
6 | namespace: default
7 | data:
8 | foo: bar
9 |
--------------------------------------------------------------------------------
/spec/fixtures/stacks/test/test.yml:
--------------------------------------------------------------------------------
1 | # This ConfigMap is used to configure a self-hosted Calico installation.
2 | kind: ConfigMap
3 | apiVersion: v1
4 | metadata:
5 | name: test
6 | namespace: default
7 | data:
8 | foo: bar
9 |
--------------------------------------------------------------------------------
/examples/terraform-do/pharos-addons/do-csi/resources/00-secret.yml.erb:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: digitalocean
5 | namespace: kube-system
6 | data:
7 | access-token: "<%= Base64.strict_encode64(config.token) %>"
8 |
--------------------------------------------------------------------------------
/lib/pharos/resources/firewalld/ipset.xml.erb:
--------------------------------------------------------------------------------
1 |
2 |
3 | <%= name %>
4 | <% entries.each do |entry| %>
5 | <%= entry %>
6 | <% end %>
7 |
8 |
--------------------------------------------------------------------------------
/e2e/digitalocean/terraform.tfvars:
--------------------------------------------------------------------------------
1 |
2 | cluster_name = "phrs"
3 |
4 | master_count = 1
5 |
6 | master_size = "4gb"
7 |
8 | worker_count = 3
9 |
10 | worker_size = "4gb"
11 |
12 | image = "ubuntu-18-04-x64"
13 |
14 | region = "ams3"
15 |
16 |
--------------------------------------------------------------------------------
/examples/terraform-aws/README.md:
--------------------------------------------------------------------------------
1 | # Kontena Pharos Cluster on AWS (using Terraform)
2 |
3 |
4 | ## Provision Cluster
5 |
6 | ```
7 | $ pharos tf apply
8 | ```
9 |
10 | ## Terminate Cluster
11 |
12 | ```
13 | $ pharos tf destroy
14 | ```
15 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/api.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class Api < Pharos::Configuration::Struct
6 | attribute :endpoint, Pharos::Types::String
7 | end
8 | end
9 | end
10 |
--------------------------------------------------------------------------------
/examples/terraform-do/README.md:
--------------------------------------------------------------------------------
1 | # Pharos Cluster on DigitalOcean (using Terraform)
2 |
3 |
4 | ## Provision Cluster
5 |
6 | ```
7 | $ pharos tf apply
8 | ```
9 |
10 |
11 | ## Terminate Cluster
12 |
13 | ```
14 | $ pharos tf destroy
15 | ```
16 |
--------------------------------------------------------------------------------
/lib/pharos/phases/migrate_worker.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class MigrateWorker < Pharos::Phase
6 | title "Migrate worker"
7 |
8 | def call; end
9 | end
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/examples/terraform-do/pharos-addons/do-csi/addon.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | Pharos.addon('do-csi') do
4 | version '1.0.0'
5 | license 'Apache 2.0'
6 |
7 | config_schema do
8 | optional(:token).filled(:str?)
9 | end
10 | end
11 |
--------------------------------------------------------------------------------
/examples/vagrant/ubuntu/etcd_certs/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "127.0.0.1",
3 | "hosts": [
4 | "127.0.0.1"
5 | ],
6 | "key": {
7 | "algo": "ecdsa",
8 | "size": 256
9 | },
10 | "names": [
11 | ]
12 | }
13 |
14 |
--------------------------------------------------------------------------------
/e2e/digitalocean/terraform-0.12/terraform.tfvars:
--------------------------------------------------------------------------------
1 |
2 | cluster_name = "phrs"
3 |
4 | master_count = 1
5 |
6 | master_size = "4gb"
7 |
8 | worker_count = 3
9 |
10 | worker_size = "4gb"
11 |
12 | image = "ubuntu-18-04-x64"
13 |
14 | region = "ams3"
15 |
16 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ruby:2.5
2 |
3 | WORKDIR /app
4 |
5 | COPY Gemfile Gemfile.lock *.gemspec ./
6 | COPY lib/pharos/version.rb ./lib/pharos/
7 | RUN bundle install --without test --without development
8 |
9 | COPY . .
10 |
11 | ENTRYPOINT ["./bin/pharos-cluster"]
12 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/webhook_audit.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class WebhookAudit < Pharos::Configuration::Struct
6 | attribute :server, Pharos::Types::String
7 | end
8 | end
9 | end
10 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/telemetry.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class Telemetry < Pharos::Configuration::Struct
6 | attribute :enabled, Pharos::Types::Bool.default(true)
7 | end
8 | end
9 | end
10 |
--------------------------------------------------------------------------------
/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | pharos-cluster:
2 | build: .
3 | volumes:
4 | - .:/app
5 | - $SSH_AUTH_SOCK:/run/ssh-agent.sock
6 | - ./.pharos:/root/.pharos
7 | environment:
8 | - SSH_AUTH_SOCK=/run/ssh-agent.sock
9 | entrypoint: bin/pharos-cluster
10 | command: up
11 |
--------------------------------------------------------------------------------
/lib/pharos/resources/weave/06-flying-shuttle-cm.yml.erb:
--------------------------------------------------------------------------------
1 | <%- if known_peers -%>
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | name: flying-shuttle
6 | namespace: kube-system
7 | data:
8 | known-peers: '<%= { peers: known_peers }.to_json %>'
9 | <%- end -%>
10 |
--------------------------------------------------------------------------------
/examples/terraform-packet/packet-cloud-config-secret.yaml.tpl:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: packet-cloud-config
5 | namespace: kube-system
6 | data:
7 | apiKey: ${api_key} # Base64 encoded API token
8 | projectID: ${project_id} # Base64 encoded project ID
9 |
--------------------------------------------------------------------------------
/examples/vagrant/ubuntu/etcd_certs/client-key.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN EC PRIVATE KEY-----
2 | MHcCAQEEIFSxEpZ1fota8RayFb6F9EPNH812RswALBD/+6ObGWAcoAoGCCqGSM49
3 | AwEHoUQDQgAEsbRt+xsQI6vhpePlK0kqxfxhPBy4FztvL9Dkv0L9N/XIfKz5D09J
4 | +rY+ITzk4/cZ/B1v8ulDqvLw8O9IySjnyw==
5 | -----END EC PRIVATE KEY-----
6 |
--------------------------------------------------------------------------------
/examples/vagrant/ubuntu/etcd_certs/server-key.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN EC PRIVATE KEY-----
2 | MHcCAQEEIN/o4L15HcxcnQ2R5paUMLnvAZ77Z5AkBSren0GcGyanoAoGCCqGSM49
3 | AwEHoUQDQgAE3KxCzMvETrNSoaqvWUmo8McZrrMngbRlo3Kjacs2Inl4n3Ikq3Pg
4 | CID9lN9Rn+pr7Y34KwbuUYj3KMJ5YVjbyg==
5 | -----END EC PRIVATE KEY-----
6 |
--------------------------------------------------------------------------------
/lib/pharos/resources/secrets/encryption-config.yml.erb:
--------------------------------------------------------------------------------
1 | kind: EncryptionConfig
2 | apiVersion: v1
3 | resources:
4 | - resources:
5 | - secrets
6 | providers:
7 | - aescbc:
8 | keys:
9 | - name: key1
10 | secret: "<%= key1 %>"
11 | - identity: {}
--------------------------------------------------------------------------------
/lib/pharos/cloud/packet.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative "provider"
4 |
5 | module Pharos
6 | module Cloud
7 | class Packet < Provider
8 | register_as :packet
9 |
10 | def csi?
11 | false
12 | end
13 | end
14 | end
15 | end
16 |
--------------------------------------------------------------------------------
/lib/pharos/cloud/pharos.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative "provider"
4 |
5 | module Pharos
6 | module Cloud
7 | class PharosCloud < Provider
8 | register_as :pharos
9 |
10 | def csi?
11 | false
12 | end
13 | end
14 | end
15 | end
16 |
--------------------------------------------------------------------------------
/lib/pharos/scripts/kubeadm-init.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eu
4 |
5 | if [ "$UNSET_PROXY" = "true" ]; then
6 | while read -r var; do unset "$var"; done < <(env | grep -i _proxy | sed 's/=.*//g')
7 | fi
8 |
9 | kubeadm init --ignore-preflight-errors all --skip-token-print --config "${CONFIG}"
10 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/container_runtime.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class ContainerRuntime < Pharos::Configuration::Struct
6 | attribute :insecure_registries, Pharos::Types::Array.default(proc { [] })
7 | end
8 | end
9 | end
10 |
--------------------------------------------------------------------------------
/lib/pharos/resources/pharos/13-cluster-role-pharos-cloud-controller.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: pharos-cloud-controller
6 | rules:
7 | - apiGroups:
8 | - ""
9 | resources:
10 | - nodes
11 | verbs:
12 | - get
13 | - list
14 |
--------------------------------------------------------------------------------
/spec/support/fixtures_helper.rb:
--------------------------------------------------------------------------------
1 | module FixturesHelper
2 | FIXTURES_PATH = File.expand_path('../fixtures', __dir__)
3 |
4 | def fixtures_path(*joinables)
5 | File.join(*[FIXTURES_PATH] + joinables)
6 | end
7 |
8 | def fixture(file)
9 | IO.read(fixtures_path(file))
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/token_webhook.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class TokenWebhook < Pharos::Configuration::Struct
6 | attribute :config, Pharos::Types::Hash
7 | attribute :cache_ttl, Pharos::Types::String
8 | end
9 | end
10 | end
11 |
--------------------------------------------------------------------------------
/lib/pharos/resources/firewalld/service.xml.erb:
--------------------------------------------------------------------------------
1 |
2 |
3 | <%= name %>
4 | <%= description %>
5 | <% ports.each do |port| %>
6 |
7 | <% end %>
8 |
9 |
--------------------------------------------------------------------------------
/lib/pharos/scripts/configure-kube.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eu
4 |
5 | if [ -e /etc/systemd/system/kubelet.service.d/05-pharos-kubelet.conf ]; then
6 | # remove kubelet config because it conflicts with kubeadm
7 | rm /etc/systemd/system/kubelet.service.d/05-pharos-kubelet.conf
8 | systemctl daemon-reload
9 | fi
--------------------------------------------------------------------------------
/spec/fixtures/stacks/multidoc/resources/multidoc.yml:
--------------------------------------------------------------------------------
1 | kind: ConfigMap
2 | apiVersion: v1
3 | metadata:
4 | name: multidoctest
5 | namespace: default
6 | data:
7 | doc: 1
8 | ---
9 | kind: ConfigMap
10 | apiVersion: v1
11 | metadata:
12 | name: multidoctest2
13 | namespace: default
14 | data:
15 | doc: 2
16 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/pod_security_policy.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class PodSecurityPolicy < Pharos::Configuration::Struct
6 | attribute :default_policy, Pharos::Types::Strict::String.default('00-pharos-privileged')
7 | end
8 | end
9 | end
10 |
--------------------------------------------------------------------------------
/lib/pharos/resources/priority_classes/pharos-cluster-critical.yml:
--------------------------------------------------------------------------------
1 | apiVersion: scheduling.k8s.io/v1beta1
2 | kind: PriorityClass
3 | metadata:
4 | name: pharos-cluster-critical
5 | value: 1000000000
6 | globalDefault: false
7 | description: "This priority class should be used for cluster critical services outside of kube-system."
8 |
--------------------------------------------------------------------------------
/e2e/drone_teardown.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | cd e2e/digitalocean
6 |
7 | if [ ! -f terraform.tfstate ]
8 | then
9 | echo "TF state not found, not running teardown"
10 | exit 0
11 | fi
12 |
13 | until terraform destroy -auto-approve
14 | do
15 | echo "Destroy failed... trying again in 5s"
16 | sleep 5
17 | done
--------------------------------------------------------------------------------
/lib/pharos/configuration/admission_plugin.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class AdmissionPlugin < Pharos::Configuration::Struct
6 | attribute :name, Pharos::Types::String
7 | attribute :enabled, Pharos::Types::Bool.default(true)
8 | end
9 | end
10 | end
11 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/kube_proxy.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class KubeProxy < Pharos::Configuration::Struct
6 | attribute :mode, Pharos::Types::String.default('iptables')
7 | attribute :conntrack, Pharos::Types::Strict::Hash
8 | end
9 | end
10 | end
11 |
--------------------------------------------------------------------------------
/lib/pharos/resources/audit/webhook-config.yml.erb:
--------------------------------------------------------------------------------
1 |
2 | apiVersion: v1
3 | clusters:
4 | - cluster:
5 | server: <%= server %>
6 | name: pharos
7 | contexts:
8 | - context:
9 | cluster: pharos
10 | user: ""
11 | name: default-context
12 | current-context: default-context
13 | kind: Config
14 | preferences: {}
15 | users: []
--------------------------------------------------------------------------------
/lib/pharos/phases/configure_priority_classes.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class ConfigurePriorityClasses < Pharos::Phase
6 | title "Configure priority classes"
7 |
8 | def call
9 | apply_stack('priority_classes')
10 | end
11 | end
12 | end
13 | end
14 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/control_plane.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class ControlPlane < Pharos::Configuration::Struct
6 | attribute :use_proxy, Pharos::Types::Bool.default(false)
7 | attribute :feature_gates, Pharos::Types::Strict::Hash
8 | end
9 | end
10 | end
11 |
--------------------------------------------------------------------------------
/lib/pharos/scripts/kubeadm-renew-certs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eu
4 |
5 | if [ "$UNSET_PROXY" = "true" ]; then
6 | while read -r var; do unset "$var"; done < <(env | grep -i _proxy | sed 's/=.*//g')
7 | fi
8 |
9 | kubeadm init phase certs apiserver --config "${CONFIG}"
10 | kubeadm alpha certs renew apiserver --config "${CONFIG}"
11 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/taint.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class Taint < Pharos::Configuration::Struct
6 | attribute :key, Pharos::Types::String
7 | attribute :value, Pharos::Types::String
8 | attribute :effect, Pharos::Types::String
9 | end
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/lib/pharos/scripts/install-kubeadm.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -ex
4 |
5 | [ "$(kubeadm version -o short)" = "v${VERSION}" ] && exit
6 |
7 | cd /tmp
8 | export DEBIAN_FRONTEND=noninteractive
9 | apt-get download "kubeadm=${VERSION}-00"
10 | dpkg -i --ignore-depends=kubelet kubeadm_"${VERSION}"-00_*.deb
11 | rm -f kubeadm_"${VERSION}"-00_*.deb
12 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/resolv_conf.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class ResolvConf < Pharos::Configuration::Struct
6 | attribute :nameserver_localhost, Pharos::Types::Strict::Bool
7 | attribute :systemd_resolved_stub, Pharos::Types::Strict::Bool
8 | end
9 | end
10 | end
11 |
--------------------------------------------------------------------------------
/lib/pharos/phases/reset_host.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class ResetHost < Pharos::Phase
6 | title "Reset hosts"
7 |
8 | def call
9 | logger.info { "Removing all traces of Kontena Pharos ..." }
10 | host_configurer.reset
11 | end
12 | end
13 | end
14 | end
15 |
--------------------------------------------------------------------------------
/lib/pharos/resources/helm-controller/00-crd.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apiextensions.k8s.io/v1beta1
2 | kind: CustomResourceDefinition
3 | metadata:
4 | name: helmcharts.helm.cattle.io
5 | spec:
6 | group: helm.cattle.io
7 | version: v1
8 | names:
9 | kind: HelmChart
10 | plural: helmcharts
11 | singular: helmchart
12 | scope: Namespaced
13 |
--------------------------------------------------------------------------------
/examples/terraform-packet/terraform.example.tfvars:
--------------------------------------------------------------------------------
1 | project_id = ""
2 |
3 | auth_token = ""
4 |
5 | cluster_name = "baremetal"
6 |
7 | facility = "ewr1"
8 |
9 | master_plan = "baremetal_0"
10 |
11 | master_count = 1
12 |
13 | worker_plan = "baremetal_0"
14 |
15 | worker_count = 3
16 |
17 | host_os = "ubuntu_18_04"
18 |
19 | bgp_address_pool = ""
20 |
--------------------------------------------------------------------------------
/lib/pharos/phases/configure_cfssl.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class ConfigureCfssl < Pharos::Phase
6 | title "Configure cfssl"
7 |
8 | def call
9 | logger.info { 'Installing cfssl ...' }
10 | host_configurer.configure_cfssl
11 | end
12 | end
13 | end
14 | end
15 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/repository.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class Repository < Pharos::Configuration::Struct
6 | attribute :name, Pharos::Types::String
7 | attribute :contents, Pharos::Types::String
8 | attribute :key_url, Pharos::Types::String
9 | end
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/examples/terraform-do/flannel/02-cluster-role-binding.yml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: ClusterRoleBinding
3 | apiVersion: rbac.authorization.k8s.io/v1beta1
4 | metadata:
5 | name: flannel
6 | roleRef:
7 | apiGroup: rbac.authorization.k8s.io
8 | kind: ClusterRole
9 | name: flannel
10 | subjects:
11 | - kind: ServiceAccount
12 | name: flannel
13 | namespace: kube-system
14 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/struct.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'dry-struct'
4 |
5 | module Pharos
6 | module Configuration
7 | class Struct < Dry::Struct
8 | transform_types do |type|
9 | # all attributes are optional and default to nil...
10 | type.meta(omittable: true)
11 | end
12 | end
13 | end
14 | end
15 |
--------------------------------------------------------------------------------
/lib/pharos/host/ubuntu/scripts/configure-netfilter.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | if grep "container=docker" /proc/1/environ ; then
6 | exit 0
7 | fi
8 |
9 | modprobe br_netfilter
10 | echo "br_netfilter" > /etc/modules-load.d/br_netfilter.conf
11 | echo "net.bridge.bridge-nf-call-iptables = 1" > /etc/sysctl.d/99-net-bridge.conf
12 | systemctl restart procps
13 |
--------------------------------------------------------------------------------
/lib/pharos/host/debian/scripts/configure-netfilter.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | if grep "container=docker" /proc/1/environ ; then
6 | exit 0
7 | fi
8 |
9 | /sbin/modprobe br_netfilter
10 | echo "br_netfilter" > /etc/modules-load.d/br_netfilter.conf
11 | echo "net.bridge.bridge-nf-call-iptables = 1" > /etc/sysctl.d/99-net-bridge.conf
12 | systemctl restart procps
13 |
--------------------------------------------------------------------------------
/lib/pharos/host/el7/scripts/configure-netfilter.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | if grep "container=docker" /proc/1/environ ; then
6 | exit 0
7 | fi
8 |
9 | /sbin/modprobe br_netfilter
10 | echo "br_netfilter" > /etc/modules-load.d/br_netfilter.conf
11 | echo "net.bridge.bridge-nf-call-iptables = 1" > /etc/sysctl.d/99-net-bridge.conf
12 | /usr/sbin/sysctl --system
13 |
--------------------------------------------------------------------------------
/lib/pharos/scripts/migrations/migrate_worker_05_to_06.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -ue
4 |
5 | SERVER=${SERVER:-localhost:6443}
6 |
7 | if ! grep -qF "server: https://$SERVER" /etc/kubernetes/kubelet.conf; then
8 | sed -i "s/server: .*/server: https:\/\/$SERVER/g" /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf
9 | systemctl restart kubelet
10 | fi
11 |
--------------------------------------------------------------------------------
/examples/authentication-token-webhook/deploy/cluster_role_binding.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1beta1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: admin-binding
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: cluster-admin
9 | subjects:
10 | - kind: User
11 | name: admin
12 | apiGroup: rbac.authorization.k8s.io
--------------------------------------------------------------------------------
/lib/pharos/phases/mixins/psp.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | module Mixins
6 | module PSP
7 | def apply_psp_stack
8 | apply_stack(
9 | 'psp',
10 | default_psp: @config.pod_security_policy.default_policy
11 | )
12 | end
13 | end
14 | end
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/lib/pharos/resources/helm-controller/02-cluster-role-binding.yml:
--------------------------------------------------------------------------------
1 | kind: ClusterRoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: helm-controller
5 | subjects:
6 | - kind: ServiceAccount
7 | namespace: kube-system
8 | name: helm-controller
9 | roleRef:
10 | kind: ClusterRole
11 | name: cluster-admin
12 | apiGroup: rbac.authorization.k8s.io
13 |
--------------------------------------------------------------------------------
/lib/pharos/resources/calico/21-metrics-service.yml.erb:
--------------------------------------------------------------------------------
1 | <% if metrics_enabled %>
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: calico-metrics
6 | namespace: kube-system
7 | spec:
8 | ports:
9 | - name: metrics
10 | port: <%= metrics_port %>
11 | protocol: TCP
12 | targetPort: <%= metrics_port %>
13 | selector:
14 | k8s-app: calico-node
15 | <% end %>
--------------------------------------------------------------------------------
/spec/fixtures/secrets_cfg.yaml:
--------------------------------------------------------------------------------
1 | kind: EncryptionConfig
2 | apiVersion: v1
3 | resources:
4 | - resources:
5 | - secrets
6 | providers:
7 | - aescbc:
8 | keys:
9 | - name: key1
10 | secret: "s6Xm3BlhHWkD0/5mW5tcks5kcdeWxE3qWkx/gA6hlcI="
11 | - name: key2
12 | secret: "23VanHzmFuMQgfnVQrp9oJf0lLa82mThTBVDXd8Uw0s="
13 | - identity: {}
--------------------------------------------------------------------------------
/lib/pharos/resources/metrics-server/metrics-apiservice.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apiregistration.k8s.io/v1beta1
2 | kind: APIService
3 | metadata:
4 | name: v1beta1.metrics.k8s.io
5 | spec:
6 | service:
7 | name: metrics-server
8 | namespace: kube-system
9 | group: metrics.k8s.io
10 | version: v1beta1
11 | insecureSkipTLSVerify: true
12 | groupPriorityMinimum: 100
13 | versionPriority: 100
--------------------------------------------------------------------------------
/bin/pharos:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ruby
2 | # frozen_string_literal: true
3 |
4 | # add lib to libpath (only needed when running from the sources)
5 | require 'pathname'
6 | lib_path = File.expand_path('../../lib', Pathname.new(__FILE__).realpath)
7 | $LOAD_PATH.unshift lib_path unless $LOAD_PATH.include?(lib_path)
8 |
9 | STDOUT.sync = true
10 |
11 | require 'pharos_cluster'
12 | Pharos::RootCommand.run
13 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/audit.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative "file_audit"
4 | require_relative "webhook_audit"
5 |
6 | module Pharos
7 | module Configuration
8 | class Audit < Pharos::Configuration::Struct
9 | attribute :webhook, Pharos::Configuration::WebhookAudit
10 | attribute :file, Pharos::Configuration::FileAudit
11 | end
12 | end
13 | end
14 |
--------------------------------------------------------------------------------
/lib/pharos/resources/kubelet_rubber_stamp/03-cluster-role-binding.yml:
--------------------------------------------------------------------------------
1 | kind: ClusterRoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: kubelet-rubber-stamp
5 | subjects:
6 | - kind: ServiceAccount
7 | namespace: kube-system
8 | name: kubelet-rubber-stamp
9 | roleRef:
10 | kind: ClusterRole
11 | name: kubelet-rubber-stamp
12 | apiGroup: rbac.authorization.k8s.io
13 |
--------------------------------------------------------------------------------
/lib/pharos/resources/metrics-server/02-auth-delegator.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1beta1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: metrics-server:system:auth-delegator
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: system:auth-delegator
9 | subjects:
10 | - kind: ServiceAccount
11 | name: metrics-server
12 | namespace: kube-system
--------------------------------------------------------------------------------
/lib/pharos/resources/pharos/11-cluster-role-binding-pvl-controller.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | name: system:pvl-controller
6 | roleRef:
7 | apiGroup: rbac.authorization.k8s.io
8 | kind: ClusterRole
9 | name: system:pvl-controller
10 | subjects:
11 | - kind: ServiceAccount
12 | name: pvl-controller
13 | namespace: kube-system
--------------------------------------------------------------------------------
/lib/pharos/resources/psp/99-default-role.yml.erb:
--------------------------------------------------------------------------------
1 | # Cluster role which grants access to the default pod security policy
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: pharos:podsecuritypolicy:default
6 | rules:
7 | - apiGroups:
8 | - policy
9 | resourceNames:
10 | - <%= default_psp %>
11 | resources:
12 | - podsecuritypolicies
13 | verbs:
14 | - use
--------------------------------------------------------------------------------
/lib/pharos/resources/weave/05-cluster-role-binding.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1beta1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: weave-net
5 | labels:
6 | name: weave-net
7 | roleRef:
8 | kind: ClusterRole
9 | name: weave-net
10 | apiGroup: rbac.authorization.k8s.io
11 | subjects:
12 | - kind: ServiceAccount
13 | name: weave-net
14 | namespace: kube-system
--------------------------------------------------------------------------------
/spec/pharos/phases/migrate_master_spec.rb:
--------------------------------------------------------------------------------
1 | require 'pharos/phases/migrate_master'
2 |
3 | describe Pharos::Phases::MigrateMaster do
4 | let(:host) { instance_double(Pharos::Configuration::Host) }
5 | let(:ssh) { instance_double(Pharos::Transport::SSH) }
6 | subject { described_class.new(host) }
7 |
8 | before do
9 | allow(host).to receive(:transport).and_return(ssh)
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/spec/pharos/phases/migrate_worker_spec.rb:
--------------------------------------------------------------------------------
1 | require 'pharos/phases/migrate_worker'
2 |
3 | describe Pharos::Phases::MigrateWorker do
4 | let(:host) { instance_double(Pharos::Configuration::Host) }
5 | let(:ssh) { instance_double(Pharos::Transport::SSH) }
6 | subject { described_class.new(host) }
7 |
8 | before do
9 | allow(host).to receive(:transport).and_return(ssh)
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/lib/pharos/resources/metrics-server/metrics-server-service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: metrics-server
5 | namespace: kube-system
6 | labels:
7 | kubernetes.io/name: "Metrics-server"
8 | kubernetes.io/cluster-service: 'true'
9 | spec:
10 | selector:
11 | k8s-app: metrics-server
12 | ports:
13 | - port: 443
14 | protocol: TCP
15 | targetPort: 443
--------------------------------------------------------------------------------
/lib/pharos/resources/packet/03-clusterrolebinding.yml:
--------------------------------------------------------------------------------
1 | kind: ClusterRoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: system:cloud-controller-manager
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: system:cloud-controller-manager
9 | subjects:
10 | - kind: ServiceAccount
11 | name: cloud-controller-manager
12 | namespace: kube-system
--------------------------------------------------------------------------------
/lib/pharos/resources/weave/04-role-binding.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1beta1
2 | kind: RoleBinding
3 | metadata:
4 | name: weave-net
5 | labels:
6 | name: weave-net
7 | namespace: kube-system
8 | roleRef:
9 | kind: Role
10 | name: weave-net
11 | apiGroup: rbac.authorization.k8s.io
12 | subjects:
13 | - kind: ServiceAccount
14 | name: weave-net
15 | namespace: kube-system
--------------------------------------------------------------------------------
/lib/pharos/scripts/kubeadm-reconfigure.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eu
4 |
5 | if [ "$UNSET_PROXY" = "true" ]; then
6 | while read -r var; do unset "$var"; done < <(env | grep -i _proxy | sed 's/=.*//g')
7 | fi
8 |
9 | kubeadm init phase control-plane all --config "${CONFIG}"
10 | kubeadm init phase mark-control-plane --config "${CONFIG}"
11 | kubeadm init phase upload-config all --config "${CONFIG}"
12 |
--------------------------------------------------------------------------------
/examples/vagrant/ubuntu/etcd_certs/client.csr:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE REQUEST-----
2 | MIHLMHMCAQAwETEPMA0GA1UEAxMGY2xpZW50MFkwEwYHKoZIzj0CAQYIKoZIzj0D
3 | AQcDQgAEsbRt+xsQI6vhpePlK0kqxfxhPBy4FztvL9Dkv0L9N/XIfKz5D09J+rY+
4 | ITzk4/cZ/B1v8ulDqvLw8O9IySjny6AAMAoGCCqGSM49BAMCA0gAMEUCIQCxm7EJ
5 | 6xTgw6XVj8BmwLP6iU4JSELyllIDXfwyZ6Go6QIgS++vFMoy77zYIlzlhcOfyyrG
6 | b5JKN4Ot2qARVoY5IyY=
7 | -----END CERTIFICATE REQUEST-----
8 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/authentication.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative 'token_webhook'
4 | require_relative 'oidc'
5 |
6 | module Pharos
7 | module Configuration
8 | class Authentication < Pharos::Configuration::Struct
9 | attribute :token_webhook, Pharos::Configuration::TokenWebhook
10 | attribute :oidc, Pharos::Configuration::OIDC
11 | end
12 | end
13 | end
14 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/file_audit.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class FileAudit < Pharos::Configuration::Struct
6 | attribute :path, Pharos::Types::String
7 | attribute :max_age, Pharos::Types::Integer
8 | attribute :max_backups, Pharos::Types::Integer
9 | attribute :max_size, Pharos::Types::Integer
10 | end
11 | end
12 | end
13 |
--------------------------------------------------------------------------------
/lib/pharos/resources/metrics-server/auth-reader.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1beta1
2 | kind: RoleBinding
3 | metadata:
4 | name: metrics-server-auth-reader
5 | namespace: kube-system
6 | roleRef:
7 | apiGroup: rbac.authorization.k8s.io
8 | kind: Role
9 | name: extension-apiserver-authentication-reader
10 | subjects:
11 | - kind: ServiceAccount
12 | name: metrics-server
13 | namespace: kube-system
--------------------------------------------------------------------------------
/lib/pharos/yaml_file/namespace.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | class YamlFile
5 | class Namespace
6 | def initialize(variables)
7 | variables.each do |key, value|
8 | singleton_class.send(:define_method, key) { value }
9 | end
10 | end
11 |
12 | def with_binding(&block)
13 | yield binding
14 | end
15 | end
16 | end
17 | end
18 |
--------------------------------------------------------------------------------
/lib/pharos/resources/pharos/10-cluster-role-binding-node-controller.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | name: system:cloud-node-controller
6 | roleRef:
7 | apiGroup: rbac.authorization.k8s.io
8 | kind: ClusterRole
9 | name: system:cloud-node-controller
10 | subjects:
11 | - kind: ServiceAccount
12 | name: cloud-node-controller
13 | namespace: kube-system
--------------------------------------------------------------------------------
/lib/pharos/resources/pharos/12-cluster-role-binding-ccm.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | name: system:cloud-controller-manager
6 | roleRef:
7 | apiGroup: rbac.authorization.k8s.io
8 | kind: ClusterRole
9 | name: system:cloud-controller-manager
10 | subjects:
11 | - kind: ServiceAccount
12 | name: cloud-controller-manager
13 | namespace: kube-system
14 |
--------------------------------------------------------------------------------
/lib/pharos/resources/pharos/03-cluster-role-pvl-controller.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: system:pvl-controller
6 | rules:
7 | - apiGroups:
8 | - ""
9 | resources:
10 | - persistentvolumes
11 | verbs:
12 | - get
13 | - list
14 | - watch
15 | - apiGroups:
16 | - ""
17 | resources:
18 | - events
19 | verbs:
20 | - create
21 | - patch
22 | - update
--------------------------------------------------------------------------------
/lib/pharos/resources/psp/01-privileged-role.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: pharos:podsecuritypolicy:privileged
5 | labels:
6 | kubernetes.io/cluster-service: "true"
7 | addonmanager.kubernetes.io/mode: Reconcile
8 | rules:
9 | - apiGroups:
10 | - policy
11 | resourceNames:
12 | - 00-pharos-privileged
13 | resources:
14 | - podsecuritypolicies
15 | verbs:
16 | - use
--------------------------------------------------------------------------------
/lib/pharos/terraform_command.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative 'terraform/apply_command'
4 | require_relative 'terraform/destroy_command'
5 |
6 | module Pharos
7 | class TerraformCommand < Pharos::Command
8 | subcommand "apply", "apply terraform configuration", Pharos::Terraform::ApplyCommand
9 | subcommand "destroy", "destroy terraformed infrastructure", Pharos::Terraform::DestroyCommand
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/spec/pharos/version_command_spec.rb:
--------------------------------------------------------------------------------
1 | describe Pharos::VersionCommand, if: Pharos.oss? do
2 | subject { described_class.new('') }
3 |
4 | it 'outputs version' do
5 | expect{subject.run([])}.to output(/Kontena Pharos:\n.+?version (\S+)/m).to_stdout
6 | end
7 |
8 | context '--version' do
9 | it 'outputs version' do
10 | expect{subject.run(['--version'])}.to output(/.+?version (\S+)/).to_stdout
11 | end
12 | end
13 | end
14 |
--------------------------------------------------------------------------------
/lib/pharos/phases/connect_ssh.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class ConnectSSH < Pharos::Phase
6 | title "Open SSH connection"
7 |
8 | def call
9 | Retry.perform(60, logger: logger, exceptions: [Net::SSH::Disconnect, Net::SSH::Timeout, Net::SSH::ConnectionTimeout, Errno::EHOSTDOWN]) do
10 | host.transport.connect
11 | end
12 | end
13 | end
14 | end
15 | end
16 |
--------------------------------------------------------------------------------
/lib/pharos/scripts/wait-etcd.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | etcd_healthy() {
6 | response=$(curl -s --noproxy "*" --cacert /etc/pharos/pki/ca.pem --cert /etc/pharos/pki/etcd/client.pem --key /etc/pharos/pki/etcd/client-key.pem "https://${PEER_IP}:2379/health")
7 | [ "${response}" = '{"health":"true"}' ]
8 | }
9 |
10 | echo "Waiting etcd to launch on port 2379..."
11 | while ! etcd_healthy; do
12 | sleep 1
13 | done
14 | echo "etcd launched"
15 |
--------------------------------------------------------------------------------
/examples/vagrant/ubuntu/etcd_certs/server.csr:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE REQUEST-----
2 | MIHyMIGYAgEAMBQxEjAQBgNVBAMTCTEyNy4wLjAuMTBZMBMGByqGSM49AgEGCCqG
3 | SM49AwEHA0IABNysQszLxE6zUqGqr1lJqPDHGa6zJ4G0ZaNyo2nLNiJ5eJ9yJKtz
4 | 4AiA/ZTfUZ/qa+2N+CsG7lGI9yjCeWFY28qgIjAgBgkqhkiG9w0BCQ4xEzARMA8G
5 | A1UdEQQIMAaHBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhAKNIsF2gZGIwlKLqqiyE
6 | XJiCxaRcjNglvVy6mfsjDJwrAiEAqYvyLFN9UYuUK1yI/xqlXxITHXtsKBFzzlxh
7 | y71FdUk=
8 | -----END CERTIFICATE REQUEST-----
9 |
--------------------------------------------------------------------------------
/lib/pharos/phases/configure_psp.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative 'mixins/psp'
4 |
5 | module Pharos
6 | module Phases
7 | class ConfigurePSP < Pharos::Phase
8 | include Pharos::Phases::Mixins::PSP
9 | title "Configure pod security policies"
10 |
11 | def call
12 | logger.info { "Configuring default pod security policies ..." }
13 | apply_psp_stack
14 | end
15 | end
16 | end
17 | end
18 |
--------------------------------------------------------------------------------
/lib/pharos/cloud/hcloud.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative "provider"
4 |
5 | module Pharos
6 | module Cloud
7 | class HCloud < Provider
8 | register_as :hcloud
9 |
10 | # @return [Hash]
11 | def feature_gates
12 | {
13 | 'CSINodeInfo' => true,
14 | 'CSIDriverRegistry' => true
15 | }
16 | end
17 |
18 | def csi?
19 | true
20 | end
21 | end
22 | end
23 | end
24 |
--------------------------------------------------------------------------------
/lib/pharos/host/ubuntu/scripts/upgrade-kubeadm.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -ex
4 |
5 | [ -x "/usr/local/bin/pharos-kubeadm-${VERSION}" ] && exit
6 |
7 | tmpdir=$(mktemp -d)
8 | cd "$tmpdir"
9 | apt-get download "kubeadm=${VERSION}-00"
10 | dpkg-deb -R kubeadm_"${VERSION}"-00*.deb kubeadm
11 | install -o root -g root -m 0755 -T ./kubeadm/usr/bin/kubeadm "/usr/local/bin/pharos-kubeadm-${VERSION}"
12 | rm -rf "$tmpdir"
13 | "/usr/local/bin/pharos-kubeadm-${VERSION}" version
14 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/os_release.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class OsRelease < Pharos::Configuration::Struct
6 | attribute :id, Pharos::Types::Strict::String
7 | attribute :id_like, Pharos::Types::Strict::String.optional.default(nil)
8 | attribute :name, Pharos::Types::Strict::String.optional.default(nil)
9 | attribute :version, Pharos::Types::Strict::String
10 | end
11 | end
12 | end
13 |
--------------------------------------------------------------------------------
/lib/pharos/resources/metrics-server/03-cluster-role.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: system:metrics-server
5 | rules:
6 | - apiGroups:
7 | - ""
8 | resources:
9 | - pods
10 | - nodes
11 | - nodes/stats
12 | - namespaces
13 | verbs:
14 | - get
15 | - list
16 | - watch
17 | - apiGroups:
18 | - "extensions"
19 | resources:
20 | - deployments
21 | verbs:
22 | - get
23 | - list
24 | - watch
25 |
--------------------------------------------------------------------------------
/lib/pharos/resources/metrics-server/cluster-role-binding.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: system:metrics-server
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: system:metrics-server
9 | subjects:
10 | - kind: ServiceAccount
11 | name: metrics-server
12 | namespace: kube-system
13 | - apiGroup: rbac.authorization.k8s.io
14 | kind: User
15 | name: metrics-server
16 |
--------------------------------------------------------------------------------
/lib/pharos/resources/pharos/14-cluster-role-binding-pharos-cloud-controller.yml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: ClusterRoleBinding
3 | apiVersion: rbac.authorization.k8s.io/v1
4 | metadata:
5 | name: pharos-cloud-controller
6 | roleRef:
7 | apiGroup: rbac.authorization.k8s.io
8 | kind: ClusterRole
9 | name: pharos-cloud-controller
10 | subjects:
11 | - kind: ServiceAccount
12 | name: pharos-cloud-controller # name used by providers/pharos for clientBuilder(...)
13 | namespace: kube-system
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/enhancement.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Enhancement Request
3 | about: Suggest an enhancement to the Pharos project
4 |
5 | ---
6 |
7 |
8 | **What would you like to be added**:
9 |
10 | **Why is this needed**:
11 |
12 |
13 | **Example cluster.yml**:
14 |
15 |
16 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/etcd.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class Etcd < Pharos::Configuration::Struct
6 | attribute :endpoints, Pharos::Types::Array.of(Pharos::Types::String)
7 | attribute :version, Pharos::Types::String
8 | attribute :certificate, Pharos::Types::String
9 | attribute :key, Pharos::Types::String
10 | attribute :ca_certificate, Pharos::Types::String
11 | end
12 | end
13 | end
14 |
--------------------------------------------------------------------------------
/lib/pharos/host/debian/scripts/upgrade-kubeadm.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -ex
4 |
5 | if [ -x "/usr/local/bin/pharos-kubeadm-${VERSION}" ]; then
6 | exit
7 | fi
8 |
9 | tmpdir=$(mktemp -d)
10 | cd "$tmpdir"
11 | apt-get download "kubeadm=${VERSION}-00"
12 | dpkg-deb -R kubeadm_"${VERSION}"-00*.deb kubeadm
13 | install -o root -g root -m 0755 -T ./kubeadm/usr/bin/kubeadm "/usr/local/bin/pharos-kubeadm-${VERSION}"
14 | rm -rf "$tmpdir"
15 | "/usr/local/bin/pharos-kubeadm-${VERSION}" version
16 |
--------------------------------------------------------------------------------
/lib/pharos/phases/delete_host.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class DeleteHost < Pharos::Phase
6 | title "Delete node"
7 |
8 | def call
9 | logger.info { "deleting node from kubernetes api ..." }
10 | master_host.transport.exec!("kubectl delete node #{@host.hostname}")
11 | rescue Pharos::ExecError => ex
12 | logger.error { "failed to delete node: #{ex.message}" }
13 | end
14 | end
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/examples/terraform-do/flannel/01-cluster-role.yml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: ClusterRole
3 | apiVersion: rbac.authorization.k8s.io/v1beta1
4 | metadata:
5 | name: flannel
6 | rules:
7 | - apiGroups:
8 | - ""
9 | resources:
10 | - pods
11 | verbs:
12 | - get
13 | - apiGroups:
14 | - ""
15 | resources:
16 | - nodes
17 | verbs:
18 | - list
19 | - watch
20 | - apiGroups:
21 | - ""
22 | resources:
23 | - nodes/status
24 | verbs:
25 | - patch
26 |
--------------------------------------------------------------------------------
/lib/pharos/resources/weave/03-role.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1beta1
2 | kind: Role
3 | metadata:
4 | name: weave-net
5 | labels:
6 | name: weave-net
7 | namespace: kube-system
8 | rules:
9 | - apiGroups:
10 | - ''
11 | resourceNames:
12 | - weave-net
13 | - flying-shuttle
14 | resources:
15 | - configmaps
16 | verbs:
17 | - get
18 | - update
19 | - apiGroups:
20 | - ''
21 | resources:
22 | - configmaps
23 | verbs:
24 | - create
25 |
--------------------------------------------------------------------------------
/lib/pharos/phases/drain.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class Drain < Pharos::Phase
6 | title "Drain node"
7 |
8 | def call
9 | logger.info { "draining ..." }
10 | master_host.transport.exec!("kubectl drain --grace-period=120 --force --timeout=5m --ignore-daemonsets --delete-local-data #{@host.hostname}")
11 | rescue Pharos::ExecError => ex
12 | logger.error { "failed to drain node: #{ex.message}" }
13 | end
14 | end
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/lib/pharos/cloud/provider.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative "provider_registry"
4 |
5 | module Pharos
6 | module Cloud
7 | class Provider
8 | # @param name [String]
9 | def self.register_as(name)
10 | Pharos::Cloud::ProviderRegistry.instance.register_as(name, self)
11 | end
12 |
13 | # @return [Hash]
14 | def feature_gates
15 | {}
16 | end
17 |
18 | # @return [Boolean]
19 | def csi?
20 | false
21 | end
22 | end
23 | end
24 | end
25 |
--------------------------------------------------------------------------------
/lib/pharos/host/debian/scripts/install-kube-packages.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | export DEBIAN_FRONTEND=noninteractive
6 | apt-mark unhold kubectl kubeadm kubelet kubernetes-cni || echo "Nothing to unhold"
7 | apt-get install -y "kubectl=${KUBE_VERSION}-00" "kubeadm=${KUBEADM_VERSION}-00" "kubelet=${KUBE_VERSION}-00" "kubernetes-cni=${CNI_VERSION}-00"
8 | apt-mark hold kubectl kubeadm kubelet kubernetes-cni
9 |
10 | if ! dpkg -s nfs-common > /dev/null; then
11 | systemctl mask rpcbind
12 | apt-get install -y nfs-common
13 | fi
14 |
--------------------------------------------------------------------------------
/lib/pharos/host/ubuntu/scripts/install-kube-packages.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | export DEBIAN_FRONTEND=noninteractive
6 | apt-mark unhold kubectl kubeadm kubelet kubernetes-cni || echo "Nothing to unhold"
7 | apt-get install -y "kubectl=${KUBE_VERSION}-00" "kubeadm=${KUBEADM_VERSION}-00" "kubelet=${KUBE_VERSION}-00" "kubernetes-cni=${CNI_VERSION}-00"
8 | apt-mark hold kubectl kubeadm kubelet kubernetes-cni
9 |
10 | if ! dpkg -s nfs-common > /dev/null; then
11 | systemctl mask rpcbind
12 | apt-get install -y nfs-common
13 | fi
14 |
--------------------------------------------------------------------------------
/lib/pharos/scripts/configure-weave-cni.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | mkdir -p /etc/cni/net.d/
6 | cat </etc/cni/net.d/00-pharos.conflist
7 | {
8 | "cniVersion": "0.3.0",
9 | "name": "pharos",
10 | "plugins": [
11 | {
12 | "name": "weave",
13 | "type": "weave-net",
14 | "hairpinMode": true
15 | },
16 | {
17 | "type": "portmap",
18 | "capabilities": {"portMappings": true},
19 | "snat": true
20 | }
21 | ]
22 | }
23 | EOF
--------------------------------------------------------------------------------
/lib/pharos/resources/pharos/02-cluster-role-node-controller.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: system:cloud-node-controller
6 | rules:
7 | - apiGroups:
8 | - ""
9 | resources:
10 | - nodes
11 | verbs:
12 | - delete
13 | - get
14 | - patch
15 | - update
16 | - list
17 | - apiGroups:
18 | - ""
19 | resources:
20 | - nodes/status
21 | verbs:
22 | - patch
23 | - apiGroups:
24 | - ""
25 | resources:
26 | - events
27 | verbs:
28 | - create
29 | - patch
30 | - update
--------------------------------------------------------------------------------
/spec/pharos/configuration/cpu_arch_spec.rb:
--------------------------------------------------------------------------------
1 | require 'pharos/config'
2 |
3 | describe Pharos::Configuration::CpuArch do
4 |
5 | let(:subject) do
6 | described_class.new(
7 | id: 'amd64'
8 | )
9 | end
10 |
11 | describe '#supported?' do
12 | it 'returns true when valid id and version' do
13 | expect(subject.supported?).to be_truthy
14 | end
15 |
16 | it 'returns false if invalid version' do
17 | allow(subject).to receive(:id).and_return('armv7')
18 | expect(subject.supported?).to be_falsey
19 | end
20 | end
21 | end
--------------------------------------------------------------------------------
/lib/pharos/configuration/oidc.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class OIDC < Pharos::Configuration::Struct
6 | attribute :issuer_url, Pharos::Types::String
7 | attribute :client_id, Pharos::Types::String
8 | attribute :username_claim, Pharos::Types::String
9 | attribute :username_prefix, Pharos::Types::String
10 | attribute :groups_prefix, Pharos::Types::String
11 | attribute :groups_claim, Pharos::Types::String
12 | attribute :ca_file, Pharos::Types::String
13 | end
14 | end
15 | end
16 |
--------------------------------------------------------------------------------
/lib/pharos/host/el7/centos7.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative 'el7'
4 |
5 | module Pharos
6 | module Host
7 | class Centos7 < El7
8 | register_config 'centos', '7'
9 |
10 | register_component(
11 | name: 'docker', version: DOCKER_VERSION, license: 'Apache License 2.0',
12 | enabled: proc { |c| c.hosts.any? { |h| h.container_runtime == 'docker' } }
13 | )
14 |
15 | register_component(
16 | name: 'cfssl', version: CFSSL_VERSION, license: 'MIT',
17 | enabled: proc { |c| !c.etcd&.endpoints }
18 | )
19 | end
20 | end
21 | end
22 |
--------------------------------------------------------------------------------
/spec/pharos/phases/upgrade_master_spec.rb:
--------------------------------------------------------------------------------
1 | require "pharos/phases/upgrade_master"
2 |
3 | describe Pharos::Phases::UpgradeMaster do
4 | let(:master) { Pharos::Configuration::Host.new(address: 'test') }
5 | let(:config) { Pharos::Config.new(
6 | hosts: (1..2).map { |i| Pharos::Configuration::Host.new() },
7 | network: {},
8 | addons: {},
9 | etcd: {}
10 | ) }
11 | let(:cpu_arch) { double(:cpu_arch, name: 'amd64') }
12 |
13 | before do
14 | allow(master).to receive(:cpu_arch).and_return(cpu_arch)
15 | end
16 |
17 | subject { described_class.new(master, config: config) }
18 | end
19 |
--------------------------------------------------------------------------------
/lib/pharos_cluster.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require "clamp"
4 | require_relative "pharos/autoload"
5 | require_relative "pharos/version"
6 | require_relative "pharos/command"
7 | require_relative "pharos/error"
8 | require_relative "pharos/root_command"
9 |
10 | module Pharos
11 | KUBE_VERSION = ENV.fetch('KUBE_VERSION') { '1.18.8' }
12 | CNI_VERSION = '0.8.7'
13 | COREDNS_VERSION = '1.6.7'
14 | DNS_NODE_CACHE_VERSION = '1.15.11'
15 | ETCD_VERSION = ENV.fetch('ETCD_VERSION') { '3.4.3' }
16 | KUBEADM_VERSION = ENV.fetch('KUBEADM_VERSION') { KUBE_VERSION }
17 | KUBELET_PROXY_VERSION = '0.3.8'
18 | end
19 |
--------------------------------------------------------------------------------
/examples/terraform-aws/variables.tf:
--------------------------------------------------------------------------------
1 | variable "cluster_name" {
2 | default = "pharos"
3 | }
4 |
5 | variable "aws_region" {
6 | default = "eu-west-2"
7 | }
8 |
9 | variable "ssh_key" {
10 | description = "SSH key name"
11 | }
12 |
13 | variable "master_count" {
14 | default = 3
15 | }
16 |
17 | variable "worker_count" {
18 | default = 3
19 | }
20 |
21 | variable "master_type" {
22 | default = "m5.large"
23 | }
24 |
25 | variable "worker_type" {
26 | default = "m5.large"
27 | }
28 |
29 | variable "master_volume_size" {
30 | default = 100
31 | }
32 |
33 | variable "worker_volume_size" {
34 | default = 100
35 | }
36 |
--------------------------------------------------------------------------------
/lib/pharos/host/el7/scripts/upgrade-kubeadm.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # shellcheck disable=SC1091
4 | . /usr/local/share/pharos/util.sh
5 |
6 | set -ex
7 |
8 | if [ -x "/usr/local/bin/pharos-kubeadm-${VERSION}" ]; then
9 | exit
10 | fi
11 |
12 | tmpdir=$(mktemp -d)
13 | mkdir -p "$tmpdir"
14 | yum install "kubeadm-${VERSION}" -y --downloadonly --downloaddir="$tmpdir" --disableplugin=versionlock
15 | cd "$tmpdir"
16 | rpm2cpio ./*kubeadm*.rpm | cpio -idmv
17 | install -o root -g root -m 0755 -T ./usr/bin/kubeadm "/usr/local/bin/pharos-kubeadm-${VERSION}"
18 | rm -rf "$tmpdir"
19 | "/usr/local/bin/pharos-kubeadm-${VERSION}" version
20 |
--------------------------------------------------------------------------------
/lib/pharos/resources/psp/99-default-role-binding.yml:
--------------------------------------------------------------------------------
1 | # Cluster role binding for default pod security policy granting all authenticated users access
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | name: default-psp
6 | roleRef:
7 | apiGroup: rbac.authorization.k8s.io
8 | kind: ClusterRole
9 | name: pharos:podsecuritypolicy:default
10 | subjects:
11 | # For authenticated users
12 | - apiGroup: rbac.authorization.k8s.io
13 | kind: Group
14 | name: system:authenticated
15 | # For all service accounts
16 | - apiGroup: rbac.authorization.k8s.io
17 | kind: Group
18 | name: system:serviceaccounts
--------------------------------------------------------------------------------
/e2e/cluster.yml:
--------------------------------------------------------------------------------
1 | hosts:
2 | - address: 127.0.0.1
3 | private_interface: eth0
4 | ssh_port: 9022
5 | user: root
6 | role: master
7 | ssh_key_path: ~/.ssh/id_rsa_travis
8 | container_runtime: $CONTAINER_RUNTIME
9 | - address: 127.0.0.1
10 | private_interface: eth0
11 | ssh_port: 9023
12 | user: root
13 | role: worker
14 | ssh_key_path: ~/.ssh/id_rsa_travis
15 | container_runtime: $CONTAINER_RUNTIME
16 | network:
17 | provider: $NETWORK_PROVIDER
18 | pod_network_cidr: 172.20.0.0/16
19 | service_cidr: 172.19.0.0/16
20 | calico:
21 | ipip_mode: Never
22 | kube_proxy:
23 | conntrack:
24 | maxPerCore: 0
25 |
--------------------------------------------------------------------------------
/lib/pharos/host/el7/scripts/configure-firewalld.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # shellcheck disable=SC1091
6 | . /usr/local/share/pharos/util.sh
7 |
8 | mkdir -p /etc/systemd/system/firewalld.service.d
9 | cat </etc/systemd/system/firewalld.service.d/10-pharos.conf
10 | [Service]
11 | Restart=always
12 | Before=kubelet.service
13 | EOF
14 |
15 | if ! rpm -qi firewalld ; then
16 | yum install -y firewalld
17 |
18 | if ! systemctl is-active --quiet firewalld; then
19 | systemctl enable firewalld
20 | systemctl start firewalld
21 | fi
22 | fi
23 |
24 | lineinfile "^CleanupOnExit=" "CleanupOnExit=no" "/etc/firewalld/firewalld.conf"
25 |
--------------------------------------------------------------------------------
/lib/pharos/phases/pull_master_images.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class PullMasterImages < Pharos::Phase
6 | title "Pull control plane images"
7 |
8 | def call
9 | logger.info { "Pulling control plane images ..." }
10 | cfg = kubeadm.generate_yaml_config
11 | transport.tempfile(content: cfg, prefix: "kubeadm.cfg") do |tmp_file|
12 | transport.exec!("sudo kubeadm config images pull --config #{tmp_file}")
13 | end
14 | end
15 |
16 | def kubeadm
17 | Pharos::Kubeadm::ConfigGenerator.new(@config, @host)
18 | end
19 | end
20 | end
21 | end
22 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/kubelet.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class Kubelet < Pharos::Configuration::Struct
6 | attribute :read_only_port, Pharos::Types::Bool.default(false)
7 | attribute :feature_gates, Pharos::Types::Strict::Hash
8 | attribute :extra_args, Pharos::Types::Strict::Array.of(Pharos::Types::String)
9 | attribute :cpu_cfs_quota, Pharos::Types::Bool.default(true)
10 | attribute :cpu_cfs_quota_period, Pharos::Types::String
11 | attribute :system_reserved, Pharos::Types::Strict::Hash
12 | attribute :kube_reserved, Pharos::Types::Strict::Hash
13 | end
14 | end
15 | end
16 |
--------------------------------------------------------------------------------
/lib/pharos/phases/configure_kubelet_csr_approver.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class ConfigureKubeletCsrApprover < Pharos::Phase
6 | title "Configure kubelet csr auto-approver"
7 |
8 | RUBBER_STAMP_VERSION = '0.3.1'
9 |
10 | register_component(
11 | name: 'kubelet-rubber-stamp', version: RUBBER_STAMP_VERSION, license: 'Apache License 2.0'
12 | )
13 |
14 | def call
15 | apply_stack(
16 | "kubelet_rubber_stamp",
17 | version: RUBBER_STAMP_VERSION,
18 | image_repository: @config.image_repository
19 | )
20 | end
21 | end
22 | end
23 | end
24 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/cpu_arch.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class CpuArch < Pharos::Configuration::Struct
6 | SUPPORTED_IDS = %w(
7 | amd64 x86_64
8 | arm64 aarch64
9 | ).freeze
10 |
11 | attribute :id, Pharos::Types::Strict::String
12 |
13 | # @return [Boolean]
14 | def supported?
15 | SUPPORTED_IDS.include?(id)
16 | end
17 |
18 | def name
19 | case id
20 | when 'x86_64'
21 | 'amd64'
22 | when 'aarch64'
23 | 'arm64'
24 | else
25 | id
26 | end
27 | end
28 | end
29 | end
30 | end
31 |
--------------------------------------------------------------------------------
/examples/vagrant/ubuntu/cluster.yml:
--------------------------------------------------------------------------------
1 | hosts:
2 | - address: 192.168.100.100
3 | user: vagrant
4 | role: master
5 | ssh_key_path: ~/.vagrant.d/insecure_private_key
6 | - address: 192.168.100.101
7 | user: vagrant
8 | role: worker
9 | ssh_key_path: ~/.vagrant.d/insecure_private_key
10 | - address: 192.168.100.102
11 | user: vagrant
12 | role: worker
13 | ssh_key_path: ~/.vagrant.d/insecure_private_key
14 | network:
15 | pod_network_cidr: 10.32.0.0/16
16 | service_cidr: 10.33.0.0/16
17 | weave:
18 | trusted_subnets:
19 | - 192.168.100.0/24
20 | addons:
21 | ingress-nginx:
22 | enabled: true
23 | configmap:
24 | map-hash-bucket-size: "128"
25 |
--------------------------------------------------------------------------------
/lib/pharos/phases/configure_bootstrap.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class ConfigureBootstrap < Pharos::Phase
6 | title "Configure bootstrap tokens"
7 |
8 | def call
9 | if new_hosts?
10 | logger.info { "Creating node bootstrap token ..." }
11 | cluster_context['join-command'] = transport.exec!("sudo kubeadm token create --print-join-command")
12 | else
13 | logger.info { "No new nodes, skipping bootstrap token creation ..." }
14 | end
15 | end
16 |
17 | def new_hosts?
18 | @config.worker_hosts.any? { |h| !h.checks['kubelet_configured'] }
19 | end
20 | end
21 | end
22 | end
23 |
--------------------------------------------------------------------------------
/lib/pharos/phases/migrate_master.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative 'mixins/cluster_version'
4 |
5 | module Pharos
6 | module Phases
7 | class MigrateMaster < Pharos::Phase
8 | include Pharos::Phases::Mixins::ClusterVersion
9 |
10 | title "Migrate master"
11 |
12 | def call
13 | if existing_version < build_version('2.4.0-alpha.0')
14 | migrate_from_2_3
15 | else
16 | logger.info "Nothing to migrate."
17 | end
18 | end
19 |
20 | def migrate_from_2_3
21 | logger.info "Triggering etcd certificate refresh ..."
22 | cluster_context['recreate-etcd-certs'] = true
23 | end
24 | end
25 | end
26 | end
27 |
--------------------------------------------------------------------------------
/lib/pharos/terraform/json_parser.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'json'
4 |
5 | module Pharos
6 | module Terraform
7 | class ParserError < Pharos::Error; end
8 |
9 | class JsonParser
10 | # @param json [String]
11 | def initialize(json)
12 | @json = json
13 | end
14 |
15 | def data
16 | @data ||= JSON.parse(@json)
17 | rescue JSON::ParserError => ex
18 | raise ParserError, ex.message
19 | end
20 |
21 | def valid?
22 | data.dig('pharos_cluster', 'type').is_a?(Array)
23 | end
24 |
25 | # @return [Hash]
26 | def cluster
27 | data.dig('pharos_cluster', 'value')
28 | end
29 | end
30 | end
31 | end
32 |
--------------------------------------------------------------------------------
/spec/pharos/cluster_manager_spec.rb:
--------------------------------------------------------------------------------
1 | describe Pharos::ClusterManager do
2 | let(:hosts) do
3 | [ Pharos::Configuration::Host.new(address: '1.1.1.1', role: 'master') ]
4 | end
5 |
6 | let(:subject) do
7 | described_class.new(Pharos::Config.new(
8 | hosts: hosts
9 | ))
10 | end
11 |
12 | let(:transport) do
13 | instance_double(Pharos::Transport::Base)
14 | end
15 |
16 | before(:each) do
17 | hosts.each do |host|
18 | allow(host).to receive(:transport).and_return(transport)
19 | end
20 | end
21 |
22 | describe '#disconnect' do
23 | it 'disconnects transports' do
24 | expect(transport).to receive(:disconnect)
25 | subject.disconnect
26 | end
27 | end
28 | end
29 |
--------------------------------------------------------------------------------
/lib/pharos/phases/configure_custom_network.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class ConfigureCustomNetwork < Pharos::Phase
6 | title "Configure Custom network"
7 |
8 | def call
9 | logger.info { "Configuring custom network ..." }
10 | stack = Pharos::Kube.stack(
11 | 'custom-network',
12 | @config.network.custom.manifest_path,
13 | name: 'custom_network',
14 | cluster_config: @config,
15 | firewalld_enabled: !!@config.network&.firewalld&.enabled,
16 | reload_iptables: !!cluster_context['reload-iptables']
17 | )
18 | stack.apply(kube_client)
19 | end
20 | end
21 | end
22 | end
23 |
--------------------------------------------------------------------------------
/lib/pharos/resources/psp/02-privileged-role-binding.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | name: kube-system-psp
5 | namespace: kube-system
6 | roleRef:
7 | apiGroup: rbac.authorization.k8s.io
8 | kind: ClusterRole
9 | name: pharos:podsecuritypolicy:privileged
10 | subjects:
11 | # For the kubeadm kube-system nodes
12 | - apiGroup: rbac.authorization.k8s.io
13 | kind: Group
14 | name: system:nodes
15 | # For the cluster-admin role
16 | - apiGroup: rbac.authorization.k8s.io
17 | kind: Group
18 | name: cluster-admin
19 | # For all service accounts in the kube-system namespace
20 | - apiGroup: rbac.authorization.k8s.io
21 | kind: Group
22 | name: system:serviceaccounts:kube-system
--------------------------------------------------------------------------------
/lib/pharos/terraform/destroy_command.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative 'base_command'
4 |
5 | module Pharos
6 | module Terraform
7 | class DestroyCommand < BaseCommand
8 | def execute
9 | tf_workspace
10 | tf_destroy
11 | end
12 |
13 | def tf_destroy
14 | cmd = ["terraform", "destroy"]
15 | cmd += common_tf_options
16 |
17 | run_cmd!(cmd.join(' '))
18 | unless workspace == 'default'
19 | run_cmd! "terraform workspace select default"
20 | run_cmd! "terraform workspace delete #{workspace}"
21 | end
22 | File.delete(workspace_file) if File.exist?(workspace_file)
23 | end
24 | end
25 | end
26 | end
27 |
--------------------------------------------------------------------------------
/lib/pharos/core-ext/colorize.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'pastel'
4 |
5 | module Pharos
6 | module CoreExt
7 | module Colorize
8 | def self.pastel
9 | @pastel ||= Pastel.new(enabled: true)
10 | end
11 |
12 | def self.disable!
13 | @pastel = Pastel.new(enabled: false)
14 | end
15 |
16 | def self.enabled?
17 | pastel.enabled?
18 | end
19 |
20 | refine String do
21 | Pastel::ANSI::ATTRIBUTES.each_key do |meth|
22 | next if meth == :underscore
23 |
24 | define_method(meth) do
25 | Pharos::CoreExt::Colorize.pastel.send(meth, self)
26 | end
27 | end
28 | end
29 | end
30 | end
31 | end
32 |
--------------------------------------------------------------------------------
/lib/pharos/host/debian/scripts/configure-firewalld.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # shellcheck disable=SC1091
6 | . /usr/local/share/pharos/util.sh
7 |
8 | mkdir -p /etc/systemd/system/firewalld.service.d
9 | cat </etc/systemd/system/firewalld.service.d/10-pharos.conf
10 | [Service]
11 | Restart=always
12 | Before=kubelet.service
13 | EOF
14 |
15 | if ! dpkg -l firewalld > /dev/null; then
16 | export DEBIAN_FRONTEND=noninteractive
17 | apt-get install -y firewalld ipset ebtables
18 | if ! systemctl is-active --quiet firewalld; then
19 | systemctl enable firewalld
20 | systemctl start firewalld
21 | fi
22 | fi
23 |
24 | lineinfile "^CleanupOnExit=" "CleanupOnExit=no" "/etc/firewalld/firewalld.conf"
25 |
--------------------------------------------------------------------------------
/lib/pharos/host/debian/scripts/ensure-kubelet.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | # we don't want to accidentally upgrade kubelet
6 | if systemctl is-active --quiet kubelet; then
7 | exit 0
8 | fi
9 |
10 | mkdir -p /etc/systemd/system/kubelet.service.d
11 | cat </etc/systemd/system/kubelet.service.d/05-pharos-kubelet.conf
12 | [Service]
13 | ExecStartPre=-/sbin/swapoff -a
14 | ExecStart=
15 | ExecStart=/usr/bin/kubelet ${KUBELET_ARGS} --pod-infra-container-image=${IMAGE_REPO}/pause:3.1
16 | EOF
17 |
18 | export DEBIAN_FRONTEND=noninteractive
19 | apt-mark unhold kubelet kubernetes-cni || echo "Nothing to unhold"
20 | apt-get install -y "kubelet=${KUBE_VERSION}-00" "kubernetes-cni=${CNI_VERSION}-00"
21 | apt-mark hold kubelet kubernetes-cni
22 |
--------------------------------------------------------------------------------
/lib/pharos/resources/weave/01-cluster-role.yml.erb:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1beta1
2 | kind: ClusterRole
3 | metadata:
4 | name: weave-net
5 | labels:
6 | name: weave-net
7 | rules:
8 | - apiGroups:
9 | - ''
10 | resources:
11 | - pods
12 | - namespaces
13 | - nodes
14 | verbs:
15 | - get
16 | - list
17 | - watch
18 | - apiGroups:
19 | - networking.k8s.io
20 | resources:
21 | - networkpolicies
22 | verbs:
23 | - get
24 | - list
25 | - watch
26 | - apiGroups:
27 | - ''
28 | resources:
29 | <% if flying_shuttle_enabled %>
30 | - nodes
31 | <% end %>
32 | - nodes/status
33 | verbs:
34 | - patch
35 | - update
36 |
--------------------------------------------------------------------------------
/lib/pharos/scripts/configure-firewalld.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | RELOAD="false"
6 | # reload only if this is first run
7 | if ! firewall-cmd --get-services | grep pharos-worker > /dev/null 2>&1 ; then
8 | RELOAD="true"
9 | firewall-cmd --reload
10 | sleep 10
11 | fi
12 |
13 | if [ "$ROLE" = "master" ]; then
14 | firewall-cmd --permanent --add-service pharos-master
15 | fi
16 |
17 | firewall-cmd --permanent --add-service pharos-worker
18 | firewall-cmd --permanent --add-source ipset:pharos --zone trusted
19 | if firewall-cmd --query-masquerade > /dev/null 2>&1 ; then
20 | firewall-cmd --remove-masquerade --permanent
21 | fi
22 |
23 | if [[ "${RELOAD}" = "true" ]]; then
24 | firewall-cmd --reload
25 | sleep 10
26 | fi
27 |
--------------------------------------------------------------------------------
/lib/pharos/host/ubuntu/scripts/configure-essentials.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # shellcheck disable=SC1091
6 | . /usr/local/share/pharos/util.sh
7 |
8 | if ! dpkg -l apt-transport-https software-properties-common > /dev/null; then
9 | export DEBIAN_FRONTEND=noninteractive
10 | apt-get update -y
11 | apt-get install -y apt-transport-https software-properties-common
12 | fi
13 |
14 | autoupgrade_file="/etc/apt/apt.conf.d/20auto-upgrades"
15 | if [ ! -f $autoupgrade_file ]; then
16 | touch $autoupgrade_file
17 | fi
18 | lineinfile "^APT::Periodic::Update-Package-Lists " 'APT::Periodic::Update-Package-Lists "1";' $autoupgrade_file
19 | lineinfile "^APT::Periodic::Unattended-Upgrade " 'APT::Periodic::Unattended-Upgrade "0";' $autoupgrade_file
20 |
--------------------------------------------------------------------------------
/examples/terraform-aws/output.tf:
--------------------------------------------------------------------------------
1 | output "pharos_api" {
2 | value = {
3 | endpoint = "${aws_lb.pharos_master.dns_name}"
4 | }
5 | }
6 |
7 | output "pharos_hosts" {
8 | value = {
9 | masters = {
10 | address = "${aws_instance.pharos_master.*.public_ip}"
11 | private_address = "${aws_instance.pharos_master.*.private_ip}"
12 | role = "master"
13 | user = "ubuntu"
14 | }
15 |
16 | workers = {
17 | address = "${aws_instance.pharos_worker.*.public_ip}"
18 | private_address = "${aws_instance.pharos_worker.*.private_ip}"
19 | role = "worker"
20 | user = "ubuntu"
21 |
22 | label = {
23 | ingress = "nginx"
24 | }
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/lib/pharos/host/el7/rhel7.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative 'el7'
4 |
5 | module Pharos
6 | module Host
7 | class Rhel7 < El7
8 | register_config 'rhel', '7.4'
9 | register_config 'rhel', '7.5'
10 | register_config 'rhel', '7.6'
11 | register_config 'rhel', '7.7'
12 | register_config 'rhel', '7.8'
13 |
14 | register_component(
15 | name: 'docker', version: DOCKER_VERSION, license: 'Apache License 2.0',
16 | enabled: proc { |c| c.hosts.any? { |h| h.container_runtime == 'docker' } }
17 | )
18 |
19 | register_component(
20 | name: 'cfssl', version: CFSSL_VERSION, license: 'MIT',
21 | enabled: proc { |c| !c.etcd&.endpoints }
22 | )
23 | end
24 | end
25 | end
26 |
--------------------------------------------------------------------------------
/lib/pharos/host/ubuntu/scripts/configure-firewalld.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # shellcheck disable=SC1091
6 | . /usr/local/share/pharos/util.sh
7 |
8 | mkdir -p /etc/systemd/system/firewalld.service.d
9 | cat </etc/systemd/system/firewalld.service.d/10-pharos.conf
10 | [Service]
11 | Restart=always
12 | Before=kubelet.service
13 | EOF
14 |
15 | if ! dpkg -l firewalld > /dev/null; then
16 | export DEBIAN_FRONTEND=noninteractive
17 | systemctl mask ebtables
18 | apt-get install -y firewalld ipset
19 |
20 | if ! systemctl is-active --quiet firewalld; then
21 | systemctl enable firewalld
22 | systemctl start firewalld
23 | fi
24 | fi
25 |
26 | lineinfile "^CleanupOnExit=" "CleanupOnExit=no" "/etc/firewalld/firewalld.conf"
27 |
--------------------------------------------------------------------------------
/lib/pharos/resources/weave/10-service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: weave
5 | namespace: kube-system
6 | annotations:
7 | prometheus.io/scrape: 'true'
8 | spec:
9 | type: ClusterIP
10 | clusterIP: None
11 | selector:
12 | name: weave-net
13 | ports:
14 | - name: weave
15 | protocol: TCP
16 | port: 80
17 | targetPort: 6782
18 | ---
19 | apiVersion: v1
20 | kind: Service
21 | metadata:
22 | name: weave-npc
23 | namespace: kube-system
24 | annotations:
25 | prometheus.io/scrape: 'true'
26 | spec:
27 | type: ClusterIP
28 | clusterIP: None
29 | selector:
30 | name: weave-net
31 | ports:
32 | - name: weave-npc
33 | protocol: TCP
34 | port: 80
35 | targetPort: 6781
36 |
--------------------------------------------------------------------------------
/lib/pharos/resources/helm-controller/03-deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: helm-controller
5 | namespace: kube-system
6 | labels:
7 | app: helm-controller
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: helm-controller
13 | template:
14 | metadata:
15 | labels:
16 | app: helm-controller
17 | spec:
18 | serviceAccountName: helm-controller
19 | containers:
20 | - name: helm-controller
21 | image: docker.io/rancher/helm-controller:v0.4.1
22 | command: ["helm-controller"]
23 | args: ["--namespace", "kube-system"]
24 | resources:
25 | requests:
26 | cpu: 10m
27 | memory: 24Mi
28 |
--------------------------------------------------------------------------------
/lib/pharos/host/debian/scripts/configure-essentials.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # shellcheck disable=SC1091
6 | . /usr/local/share/pharos/util.sh
7 |
8 | if ! dpkg -l apt-transport-https software-properties-common curl > /dev/null; then
9 | export DEBIAN_FRONTEND=noninteractive
10 | apt-get update -y
11 | apt-get install -y apt-transport-https software-properties-common curl
12 | fi
13 |
14 | autoupgrade_file="/etc/apt/apt.conf.d/20auto-upgrades"
15 | if [ ! -f "$autoupgrade_file" ]; then
16 | touch "$autoupgrade_file"
17 | fi
18 | lineinfile "^APT::Periodic::Update-Package-Lists " 'APT::Periodic::Update-Package-Lists "1";' "$autoupgrade_file"
19 | lineinfile "^APT::Periodic::Unattended-Upgrade " 'APT::Periodic::Unattended-Upgrade "0";' "$autoupgrade_file"
20 |
--------------------------------------------------------------------------------
/lib/pharos/phases/join_node.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class JoinNode < Pharos::Phase
6 | title "Join nodes"
7 | def already_joined?
8 | transport.file("/etc/kubernetes/kubelet.conf").exist?
9 | end
10 |
11 | def call
12 | if already_joined?
13 | logger.info { "Already joined ..." }
14 | return
15 | end
16 |
17 | logger.info { "Joining host to the master ..." }
18 | join_command = cluster_context['join-command'].split(' ')
19 | join_command << "--node-name #{@host.hostname}"
20 | join_command << "--ignore-preflight-errors all"
21 |
22 | transport.exec!('sudo ' + join_command.join(' '))
23 | end
24 | end
25 | end
26 | end
27 |
--------------------------------------------------------------------------------
/lib/pharos/host/el7/scripts/configure-containerd.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # shellcheck disable=SC1091
6 | . /usr/local/share/pharos/util.sh
7 | # shellcheck disable=SC1091
8 | . /usr/local/share/pharos/el7.sh
9 |
10 | configure_container_runtime_proxy "containerd"
11 |
12 | if [ -z "$CONTAINERD_VERSION" ]; then
13 | docker info
14 | exit 0
15 | fi
16 |
17 | yum_install_with_lock "containerd.io" "${CONTAINERD_VERSION}"
18 |
19 | lineinfile "^disabled_plugins =" "disabled_plugins = []" "/etc/containerd/config.toml"
20 |
21 | if ! systemctl is-active --quiet containerd; then
22 | systemctl enable containerd
23 | systemctl start containerd
24 | fi
25 |
26 | if ! systemctl is-active --quiet containerd; then
27 | systemctl enable containerd
28 | systemctl start containerd
29 | fi
30 |
--------------------------------------------------------------------------------
/lib/pharos/host/el7/scripts/install-kube-packages.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # shellcheck disable=SC1091
4 | . /usr/local/share/pharos/util.sh
5 | # shellcheck disable=SC1091
6 | . /usr/local/share/pharos/el7.sh
7 |
8 | set -e
9 |
10 | yum_install_with_lock "kubectl" "$KUBE_VERSION"
11 | yum_install_with_lock "kubeadm" "$KUBE_VERSION"
12 | yum_install_with_lock "kubelet" "$KUBE_VERSION"
13 |
14 | if needs-restarting -s | grep -q kubelet.service ; then
15 | systemctl daemon-reload
16 | systemctl restart kubelet
17 | fi
18 |
19 | # use KUBELET_EXTRA_ARGS from /etc/systemd/system/kubelet.service.d/11-pharos.conf instead
20 | sed -i 's/^KUBELET_EXTRA_ARGS=/#\0/' /etc/sysconfig/kubelet
21 |
22 | if systemctl is-active --quiet rpcbind; then
23 | systemctl stop rpcbind
24 | systemctl disable rpcbind
25 | fi
26 |
--------------------------------------------------------------------------------
/lib/pharos/scripts/configure-kubelet-proxy.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eu
4 |
5 | mkdir -p /etc/kubernetes/manifests
6 | mkdir -p /etc/kubernetes/tmp
7 | cat >/etc/kubernetes/tmp/pharos-proxy.yaml < tf.json
22 |
23 | if [ "${WORKER_UP_COUNT}" -gt "0" ]; then
24 | jq ".worker_up.value.address[0]" tf.json | sed 's/"//g' > worker_up_address.txt
25 | jq ".pharos_hosts.value.masters[0].address[0]" tf.json | sed 's/"//g' > master_address.txt
26 | fi
27 |
28 | sleep 10
29 |
--------------------------------------------------------------------------------
/lib/pharos/command_options/yes.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module CommandOptions
5 | module Yes
6 | def self.included(base)
7 | base.prepend(InstanceMethods)
8 | base.option ['-y', '--yes'], :flag, 'answer automatically yes to prompts'
9 | end
10 |
11 | module InstanceMethods
12 | def confirm_yes!(message, default: true)
13 | return if yes?
14 |
15 | if !$stdin.tty?
16 | warn('--yes required when running in non interactive mode')
17 | exit 1
18 | else
19 | exit 1 unless prompt.yes?(message, default: default)
20 | end
21 | rescue TTY::Reader::InputInterrupt
22 | warn 'Interrupted'
23 | exit 1
24 | end
25 | end
26 | end
27 | end
28 | end
29 |
--------------------------------------------------------------------------------
/lib/pharos/host/debian/scripts/reset.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | rm -rf /etc/kubernetes/manifests
4 | sleep 5
5 | systemctl stop kubelet
6 | systemctl disable kubelet
7 |
8 | kubeadm reset --force
9 |
10 | export DEBIAN_FRONTEND=noninteractive
11 | apt-get purge -y --allow-change-held-packages --purge kubeadm kubelet kubectl kubernetes-cni docker-ce
12 | apt-get autoremove -y
13 | rm -rf /etc/kubernetes \
14 | /etc/pharos \
15 | /etc/systemd/system/kubelet.service \
16 | /etc/systemd/system/kubelet.service.d \
17 | ~/.kube \
18 | /var/lib/kubelet \
19 | /var/lib/containers \
20 | /opt/cni \
21 | /var/lib/etcd \
22 | /var/lib/weave \
23 | /var/lib/calico \
24 | /usr/local/bin/crictl \
25 | /opt/pharos \
26 | /usr/local/bin/pharos-kubeadm-*
27 |
28 | systemctl daemon-reload
29 | systemctl reset-failed
30 |
--------------------------------------------------------------------------------
/examples/terraform-do/flannel/04-config-map.yml.erb:
--------------------------------------------------------------------------------
1 | ---
2 | kind: ConfigMap
3 | apiVersion: v1
4 | metadata:
5 | name: kube-flannel-cfg
6 | namespace: kube-system
7 | labels:
8 | tier: node
9 | app: flannel
10 | data:
11 | cni-conf.json: |
12 | {
13 | "name": "cbr0",
14 | "plugins": [
15 | {
16 | "type": "flannel",
17 | "delegate": {
18 | "hairpinMode": true,
19 | "isDefaultGateway": true
20 | }
21 | },
22 | {
23 | "type": "portmap",
24 | "capabilities": {
25 | "portMappings": true
26 | }
27 | }
28 | ]
29 | }
30 | net-conf.json: |
31 | {
32 | "Network": "<%= cluster_config.network.pod_network_cidr %>",
33 | "Backend": {
34 | "Type": "vxlan"
35 | }
36 | }
--------------------------------------------------------------------------------
/lib/pharos/cloud/provider_registry.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require "singleton"
4 |
5 | module Pharos
6 | module Cloud
7 | class ProviderRegistry
8 | include Singleton
9 |
10 | def initialize
11 | @registry = {}
12 | end
13 |
14 | # @param provider_name [String,Symbol]
15 | # @return [Pharos::Cloud::Provider]
16 | def provider(provider_name)
17 | (@registry[provider_name.to_sym] || Pharos::Cloud::Provider).new
18 | end
19 |
20 | # @param name [String]
21 | # @param klass [Class]
22 | def register_as(name, klass)
23 | @registry ||= {}
24 | @registry[name] = klass
25 | end
26 |
27 | # @return [Hash{Symbol => Pharos::Cloud::Provider}]
28 | def providers
29 | @registry
30 | end
31 | end
32 | end
33 | end
34 |
--------------------------------------------------------------------------------
/lib/pharos/phases/mixins/cluster_version.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | module Mixins
6 | module ClusterVersion
7 | # @return [Gem::Version]
8 | def existing_version
9 | if cluster_version = cluster_context['existing-pharos-version']
10 | build_version(cluster_version)
11 | else
12 | build_version('0.0.1')
13 | end
14 | end
15 |
16 | # @return [Gem::Version]
17 | def pharos_version
18 | @pharos_version ||= build_version(Pharos::VERSION)
19 | end
20 |
21 | # @param version [String]
22 | # @return [Gem::Version]
23 | def build_version(version)
24 | Gem::Version.new(version.gsub(/\+.*/, ''))
25 | end
26 | end
27 | end
28 | end
29 | end
30 |
--------------------------------------------------------------------------------
/lib/pharos/kubeadm/kubeproxy_config.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Kubeadm
5 | class KubeProxyConfig
6 | # @param config [Pharos::Config] cluster config
7 | # @param host [Pharos::Configuration::Host] master host-specific config
8 | def initialize(config, host)
9 | @config = config
10 | @host = host
11 | end
12 |
13 | # @return [Hash]
14 | def generate
15 | config = {
16 | 'apiVersion' => 'kubeproxy.config.k8s.io/v1alpha1',
17 | 'kind' => 'KubeProxyConfiguration',
18 | 'mode' => @config.kube_proxy&.mode || 'iptables'
19 | }
20 | if @config.kube_proxy&.conntrack
21 | config['conntrack'] = @config.kube_proxy.conntrack
22 | end
23 |
24 | config
25 | end
26 | end
27 | end
28 | end
29 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/bastion.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class Bastion < Pharos::Configuration::Struct
6 | attribute :address, Pharos::Types::Strict::String
7 | attribute :user, Pharos::Types::Strict::String
8 | attribute :ssh_key_path, Pharos::Types::Strict::String
9 | attribute :ssh_port, Pharos::Types::Strict::Integer.default(22)
10 | attribute :ssh_proxy_command, Pharos::Types::Strict::String
11 |
12 | def host
13 | @host ||= Host.new(attributes)
14 | end
15 |
16 | def method_missing(meth, *args)
17 | host.respond_to?(meth) ? host.send(meth, *args) : super
18 | end
19 |
20 | def respond_to_missing?(meth, include_private = false)
21 | host.respond_to?(meth) || super
22 | end
23 | end
24 | end
25 | end
26 |
--------------------------------------------------------------------------------
/lib/pharos/resources/kubelet_rubber_stamp/01-role.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1beta1
2 | kind: ClusterRole
3 | metadata:
4 | name: kubelet-rubber-stamp
5 | rules:
6 | - apiGroups:
7 | - certificates.k8s.io
8 | resources:
9 | - signers
10 | # legacy-unknown: support before kubernetes-1.18.0
11 | resourceNames:
12 | - "kubernetes.io/legacy-unknown"
13 | - "kubernetes.io/kubelet-serving"
14 | verbs:
15 | - approve
16 | - apiGroups:
17 | - certificates.k8s.io
18 | resources:
19 | - certificatesigningrequests
20 | verbs:
21 | - get
22 | - list
23 | - watch
24 | - apiGroups:
25 | - certificates.k8s.io
26 | resources:
27 | - certificatesigningrequests/approval
28 | verbs:
29 | - create
30 | - update
31 | - apiGroups:
32 | - authorization.k8s.io
33 | resources:
34 | - subjectaccessreviews
35 | verbs:
36 | - create
37 |
--------------------------------------------------------------------------------
/lib/pharos/scripts/configure-etcd-ca.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # shellcheck disable=SC1091
6 | . /usr/local/share/pharos/util.sh
7 |
8 | mkdir -p /etc/pharos/pki
9 | if [ ! -e /etc/pharos/pki/ca-csr.json ]; then
10 | cat </etc/pharos/pki/ca-csr.json
11 | {
12 | "CN": "Kontena Pharos CA",
13 | "key": {
14 | "algo": "rsa",
15 | "size": 2048
16 | },
17 | "names": [
18 | {
19 | "C": "US",
20 | "L": "NY",
21 | "O": "Kontena Inc",
22 | "ST": "New York",
23 | "OU": "Kontena Pharos"
24 | }
25 | ]
26 | }
27 | EOF
28 | fi
29 |
30 | cd /etc/pharos/pki
31 |
32 | if [ ! -e ca.pem ]; then
33 | echo "Initializing Certificate Authority ..."
34 | /opt/pharos/bin/cfssl gencert -initca ca-csr.json | /opt/pharos/bin/cfssljson -bare ca -
35 | fi
36 |
37 |
--------------------------------------------------------------------------------
/lib/pharos/scripts/migrations/migrate_master_05_to_06.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | if [ -e /etc/kubernetes/manifests/etcd.yaml ]; then
6 | # shutdown etcd
7 | rm /etc/kubernetes/manifests/etcd.yaml
8 | while nc -z localhost 2379; do
9 | sleep 1
10 | done
11 | # shutdown control plane
12 | rm /etc/kubernetes/manifests/kube-*.yaml
13 | while nc -z localhost 6443; do
14 | sleep 1
15 | done
16 | # trigger new kubeadm init
17 | rm /etc/kubernetes/admin.conf
18 | # reconfigure
19 | sed -i "s/${PEER_IP}/localhost/g" /etc/kubernetes/controller-manager.conf
20 | sed -i "s/${PEER_IP}/localhost/g" /etc/kubernetes/kubelet.conf
21 | sed -i "s/${PEER_IP}/localhost/g" /etc/kubernetes/scheduler.conf
22 | # trigger new certs
23 | rm /etc/kubernetes/pki/apiserver.*
24 | rm /etc/kubernetes/pki/front-proxy-*
25 | systemctl restart kubelet
26 | fi
--------------------------------------------------------------------------------
/conformance/README.md:
--------------------------------------------------------------------------------
1 | # Conformance testing Kontena Pharos
2 |
3 | ## Setup Kontena Pharos cluster
4 |
5 | Setup Kontena Pharos cluster as per the [Pharos documentation](https://www.pharos.sh/docs/). To run conformance tests, we recommend that you use a cluster that provides sufficient resources.
6 |
7 | ## Run conformance tests
8 |
9 | Download latest version of Heptio Sonobuoy tool from [here](https://github.com/heptio/sonobuoy/releases/latest).
10 |
11 | Start the conformance tests on your Kontena Pharos cluster
12 |
13 | ```sh
14 | $ sonobuoy start
15 | ```
16 |
17 | View status:
18 |
19 | ```sh
20 | $ sonobuoy status
21 | ```
22 |
23 | View logs:
24 |
25 | ```sh
26 | $ sonobuoy logs
27 | ```
28 |
29 | Once sonobuoy status shows the run as completed, copy the output directory from the main Sonobuoy pod to a local directory:
30 |
31 | ```sh
32 | $ sonobuoy retrieve .
33 | ```
--------------------------------------------------------------------------------
/lib/pharos/host/ubuntu/scripts/ensure-kubelet.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 |
5 | # we don't want to accidentally upgrade kubelet
6 | if systemctl is-active --quiet kubelet; then
7 | exit 0
8 | fi
9 |
10 | mkdir -p /etc/systemd/system/kubelet.service.d
11 | cat </etc/systemd/system/kubelet.service.d/05-pharos-kubelet.conf
12 | [Service]
13 | ExecStartPre=-/sbin/swapoff -a
14 | ExecStart=
15 | ExecStart=/usr/bin/kubelet ${KUBELET_ARGS} --pod-infra-container-image=${IMAGE_REPO}/pause:3.1
16 | EOF
17 |
18 | export DEBIAN_FRONTEND=noninteractive
19 | apt-mark unhold kubelet kubernetes-cni || echo "Nothing to unhold"
20 | apt-get install -y "kubelet=${KUBE_VERSION}-00" "kubernetes-cni=${CNI_VERSION}-00"
21 | apt-mark hold kubelet kubernetes-cni
22 |
23 | if ! systemctl is-active --quiet kubelet; then
24 | systemctl enable kubelet
25 | systemctl start kubelet
26 | fi
27 |
--------------------------------------------------------------------------------
/lib/pharos/version_command.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | class VersionCommand < Pharos::Command
5 | def execute
6 | puts "Kontena Pharos:"
7 | puts " - #{File.basename($PROGRAM_NAME)} version #{Pharos.version}"
8 | ClusterManager.new(Pharos::Config.new({})).load
9 |
10 | phases.each do |os, phases|
11 | title = (os || 'Common').capitalize
12 | puts "#{title}:"
13 | phases.each do |c|
14 | puts " - #{c.name} #{c.version} (#{c.license})"
15 | end
16 | end
17 | end
18 |
19 | # @return [Array]
20 | def phases
21 | phases = Pharos::Phases.components.sort_by(&:name)
22 | phases.group_by { |c|
23 | if c.os_release
24 | "#{c.os_release.id} #{c.os_release.version}"
25 | end
26 | }
27 | end
28 | end
29 | end
30 |
--------------------------------------------------------------------------------
/lib/pharos/host/el7/scripts/ensure-kubelet.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # shellcheck disable=SC1091
4 | . /usr/local/share/pharos/util.sh
5 | # shellcheck disable=SC1091
6 | . /usr/local/share/pharos/el7.sh
7 |
8 | set -e
9 |
10 | # we don't want to accidentally upgrade kubelet
11 | if systemctl is-active --quiet kubelet; then
12 | exit 0
13 | fi
14 |
15 | mkdir -p /etc/systemd/system/kubelet.service.d
16 | cat </etc/systemd/system/kubelet.service.d/05-pharos-kubelet.conf
17 | [Service]
18 | ExecStartPre=-/sbin/swapoff -a
19 | ExecStart=
20 | ExecStart=/usr/bin/kubelet ${KUBELET_ARGS} --pod-infra-container-image=${IMAGE_REPO}/pause:3.1
21 | EOF
22 |
23 | yum_install_with_lock "kubernetes-cni" "$CNI_VERSION"
24 | yum_install_with_lock "kubelet" "$KUBE_VERSION"
25 |
26 | if ! systemctl is-active --quiet kubelet; then
27 | systemctl enable kubelet
28 | systemctl start kubelet
29 | fi
30 |
--------------------------------------------------------------------------------
/lib/pharos/phases/configure_helm_controller.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class ConfigureHelmController < Pharos::Phase
6 | title "Configure Helm Controller"
7 |
8 | HELM_CONTROLLER_VERSION = '0.4.1'
9 |
10 | register_component(
11 | name: 'helm-controller', version: HELM_CONTROLLER_VERSION, license: 'Apache License 2.0'
12 | )
13 |
14 | def call
15 | configure_helm_controller
16 | end
17 |
18 | def configure_helm_controller
19 | logger.info { "Configuring helm controller ..." }
20 | Retry.perform(logger: logger, exceptions: [K8s::Error::NotFound, K8s::Error::ServiceUnavailable]) do
21 | apply_stack(
22 | 'helm-controller',
23 | version: HELM_CONTROLLER_VERSION
24 | )
25 | end
26 | end
27 | end
28 | end
29 | end
30 |
--------------------------------------------------------------------------------
/lib/pharos/phases/configure_etcd_ca.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class ConfigureEtcdCa < Pharos::Phase
6 | title "Configure etcd certificate authority"
7 | CA_PATH = '/etc/pharos/pki'
8 | CA_FILES = %w(ca.pem ca-key.pem).freeze
9 |
10 | def call
11 | logger.info { 'Configuring etcd certificate authority ...' }
12 | exec_script(
13 | 'configure-etcd-ca.sh',
14 | ARCH: @host.cpu_arch.name
15 | )
16 | logger.info { 'Caching certificate authority files to memory ...' }
17 | cache_ca_to_memory
18 | end
19 |
20 | def cache_ca_to_memory
21 | data = {}
22 | CA_FILES.each do |file|
23 | data[file] = transport.file(File.join(CA_PATH, file)).read
24 | end
25 | cluster_context['etcd-ca'] = data
26 | end
27 | end
28 | end
29 | end
30 |
--------------------------------------------------------------------------------
/lib/pharos/transport/local.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Transport
5 | class Local < Base
6 | def forward(*_args)
7 | raise TypeError, "Non-SSH connections do not provide port forwarding"
8 | end
9 |
10 | def close(*_args)
11 | raise TypeError, "Non-SSH connections do not provide port forwarding"
12 | end
13 |
14 | def connect(**_options)
15 | nil
16 | end
17 |
18 | def disconnect
19 | nil
20 | end
21 |
22 | def connected?
23 | true
24 | end
25 |
26 | def interactive_session
27 | return unless ENV['SHELL']
28 |
29 | synchronize { system ENV['SHELL'] }
30 | end
31 |
32 | private
33 |
34 | def command(cmd, **options)
35 | Pharos::Transport::Command::Local.new(self, cmd, **options)
36 | end
37 | end
38 | end
39 | end
40 |
--------------------------------------------------------------------------------
/lib/pharos/transport.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'net/ssh'
4 | require 'net/ssh/proxy/jump'
5 |
6 | module Pharos
7 | module Transport
8 | def self.for(host, **options)
9 | if host.local?
10 | Local.new('localhost', **options)
11 | else
12 | opts = {}
13 | opts[:keys] = [host.ssh_key_path] if host.ssh_key_path
14 | opts[:send_env] = [] # override default to not send LC_* envs
15 | opts[:proxy] = Net::SSH::Proxy::Command.new(host.ssh_proxy_command) if host.ssh_proxy_command
16 | opts[:bastion] = host.bastion if host.bastion
17 | opts[:port] = host.ssh_port
18 | opts[:keepalive] = true
19 | opts[:keepalive_interval] = 30
20 | opts[:keepalive_maxcount] = 5
21 | opts[:timeout] = 5
22 | SSH.new(host.address, user: host.user, **opts.merge(options))
23 | end
24 | end
25 | end
26 | end
27 |
--------------------------------------------------------------------------------
/kube-bench/job-node.yml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: kube-bench-node
5 | spec:
6 | template:
7 | spec:
8 | hostPID: true
9 | containers:
10 | - name: kube-bench
11 | image: aquasec/kube-bench:latest
12 | command: ["kube-bench","node", "--noremediations", "--version", "1.11"]
13 | volumeMounts:
14 | - name: var-lib-kubelet
15 | mountPath: /var/lib/kubelet
16 | - name: etc-systemd
17 | mountPath: /etc/systemd
18 | - name: etc-kubernetes
19 | mountPath: /etc/kubernetes
20 | restartPolicy: Never
21 | volumes:
22 | - name: var-lib-kubelet
23 | hostPath:
24 | path: "/var/lib/kubelet"
25 | - name: etc-systemd
26 | hostPath:
27 | path: "/etc/systemd"
28 | - name: etc-kubernetes
29 | hostPath:
30 | path: "/etc/kubernetes"
31 |
--------------------------------------------------------------------------------
/lib/pharos/host/debian/scripts/configure-containerd.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # shellcheck disable=SC1091
6 | . /usr/local/share/pharos/util.sh
7 |
8 | configure_container_runtime_proxy "containerd"
9 |
10 | if [ -z "$CONTAINERD_VERSION" ]; then
11 | containerd -v
12 | exit 0
13 | fi
14 |
15 | export DEBIAN_FRONTEND=noninteractive
16 |
17 | apt-mark unhold containerd.io || echo "Nothing to unhold"
18 | if dpkg -l docker-ce ; then
19 | apt-get install -y "containerd.io=$CONTAINERD_VERSION*" || echo "Cannot install specific version, keeping the current one"
20 | else
21 | apt-get install -y "containerd.io=$CONTAINERD_VERSION*"
22 | fi
23 | apt-mark hold containerd.io
24 |
25 | lineinfile "^disabled_plugins =" "disabled_plugins = []" "/etc/containerd/config.toml"
26 |
27 | if ! systemctl is-active --quiet containerd; then
28 | systemctl enable containerd
29 | systemctl start containerd
30 | fi
31 |
--------------------------------------------------------------------------------
/lib/pharos/host/ubuntu/scripts/configure-containerd.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # shellcheck disable=SC1091
6 | . /usr/local/share/pharos/util.sh
7 |
8 | configure_container_runtime_proxy "containerd"
9 |
10 | if [ -z "$CONTAINERD_VERSION" ]; then
11 | containerd -v
12 | exit 0
13 | fi
14 |
15 | export DEBIAN_FRONTEND=noninteractive
16 |
17 | apt-mark unhold containerd.io || echo "Nothing to unhold"
18 | if dpkg -l docker-ce ; then
19 | apt-get install -y "containerd.io=$CONTAINERD_VERSION*" || echo "Cannot install specific version, keeping the current one"
20 | else
21 | apt-get install -y "containerd.io=$CONTAINERD_VERSION*"
22 | fi
23 | apt-mark hold containerd.io
24 |
25 | lineinfile "^disabled_plugins =" "disabled_plugins = []" "/etc/containerd/config.toml"
26 |
27 | if ! systemctl is-active --quiet containerd; then
28 | systemctl enable containerd
29 | systemctl start containerd
30 | fi
31 |
--------------------------------------------------------------------------------
/spec/pharos/kubeadm/init_config_spec.rb:
--------------------------------------------------------------------------------
1 | require "pharos/phases/configure_master"
2 |
3 | describe Pharos::Kubeadm::InitConfig do
4 | let(:master) { Pharos::Configuration::Host.new(address: 'test', private_address: 'private', role: 'master') }
5 | let(:config_hosts_count) { 1 }
6 |
7 | let(:config) { Pharos::Config.new(
8 | hosts: (1..config_hosts_count).map { |i| Pharos::Configuration::Host.new(role: 'worker') },
9 | network: {
10 | service_cidr: '1.2.3.4/16',
11 | pod_network_cidr: '10.0.0.0/16'
12 | },
13 | addons: {},
14 | etcd: {}
15 | ) }
16 |
17 | subject { described_class.new(config, master) }
18 |
19 | describe '#generate' do
20 |
21 | it 'comes with correct master addresses' do
22 | config.hosts << master
23 | config = subject.generate
24 | expect(config.dig('localAPIEndpoint', 'advertiseAddress')).to eq('private')
25 | end
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/examples/vagrant/centos7/cluster.yml:
--------------------------------------------------------------------------------
1 | hosts:
2 | - address: 192.168.110.100
3 | user: vagrant
4 | role: master
5 | ssh_key_path: ~/.vagrant.d/insecure_private_key
6 | - address: 192.168.110.101
7 | user: vagrant
8 | role: worker
9 | ssh_key_path: ~/.vagrant.d/insecure_private_key
10 | - address: 192.168.110.102
11 | user: vagrant
12 | role: worker
13 | ssh_key_path: ~/.vagrant.d/insecure_private_key
14 | network:
15 | provider: calico
16 | pod_network_cidr: 172.31.0.0/16
17 | service_cidr: 172.32.0.0/16
18 | addons:
19 | ingress-nginx:
20 | enabled: true
21 | kontena-network-lb:
22 | enabled: true
23 | node_selector:
24 | node-role.kubernetes.io/worker: ""
25 | tolerations:
26 | - operator: "Exists"
27 | effect: "NoSchedule"
28 | address_pools:
29 | - name: default
30 | protocol: layer2
31 | addresses:
32 | - 192.168.110.110-192.168.110.150
33 |
--------------------------------------------------------------------------------
/examples/vagrant/ubuntu/etcd_certs/client.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIICWDCCAUCgAwIBAgIUCC5owiVgzjD9lpqFdDFpO94j+C4wDQYJKoZIhvcNAQEL
3 | BQAwDzENMAsGA1UEAxMEZXRjZDAeFw0xODAzMTUwNzM3MDBaFw0yMzAzMTQwNzM3
4 | MDBaMBExDzANBgNVBAMTBmNsaWVudDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IA
5 | BLG0bfsbECOr4aXj5StJKsX8YTwcuBc7by/Q5L9C/Tf1yHys+Q9PSfq2PiE85OP3
6 | Gfwdb/LpQ6ry8PDvSMko58ujdTBzMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAK
7 | BggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBQv1vpTiEJ68g6PBBrT
8 | oaoICEPMmzAfBgNVHSMEGDAWgBT8f31uN+FecEg3rrtaECc82LmLvzANBgkqhkiG
9 | 9w0BAQsFAAOCAQEAIuYXD9kNpUe084mx6sp79EwY/BfFdszrpRWXmZw6kwAc3KXM
10 | D0YPaRWxe1V7DKffU00ByrjFNqdG5WQpNhRjipKQMfvBDXFgHmjhPDwcN3IASNlS
11 | Q9eLlA9i6njpFGBKeb/LcyjxvD7TH5rTtINFr+nykE1oo5816hgJMPdcWfHtvMcF
12 | 2HIenxLjgYJOILlvkZzxbUr7qq3pJLSud+WtM80l4EleCrR8HM0ieLaVfIPpX1nK
13 | Z7Zcxg1bP/LF5HlFR6MFK7VFc3SLJN0BYdXaYBKp1wDnI2+i6V0PCu0X7thNAHOs
14 | FGxVoXTQU/MzGuwuRn3ViEt2ObkCCvRffAfrXw==
15 | -----END CERTIFICATE-----
16 |
--------------------------------------------------------------------------------
/examples/vagrant/ubuntu/etcd_certs/ca.csr:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE REQUEST-----
2 | MIICVDCCATwCAQAwDzENMAsGA1UEAxMEZXRjZDCCASIwDQYJKoZIhvcNAQEBBQAD
3 | ggEPADCCAQoCggEBAKU4WktLUAbZS6D3AFw4ntkUo5ud01sjW0TJl1rwH8wy4aCE
4 | XXFMuNfjV+S1IpbKWIr5VAEm6a4ie2D5KkCRZZ6rEVfZrWgB/6u74ROYb/2HIzaL
5 | KG8LcBRIzJJM0SDenyxZUL3ilDsscjwBF1//3e7tC+XQjAbnwjfvwuzGKY2/tJOA
6 | oyY4dDZBu/OTYcB4xCOcohcOssV0Nc1l3iC29lQ2SxdUo+Ks3pBuyPLCZaY9o61D
7 | 95LdJEpEvCUHSX1w+Ub2Ez+2g9Vs5fSuGjwbUJF5POnVREmQD/7Uq89SahU3DmXp
8 | DiWi4oNE/pxysY22YKhkRyUer/q7rSOpQi9ivzUCAwEAAaAAMA0GCSqGSIb3DQEB
9 | CwUAA4IBAQBbje7Bb5yXGi1UzYgtVucLl6EL676IJ+NTahTU4YEKc81HCi8zQ0B/
10 | EFFXDafdpwmDExPKlnK4yKGFoobIyFOyZ3j05OVUd6Cpuwoxz9UCL25+TOyWR+OO
11 | 6b4tzf2hB6JizgZJhkfnLWg+uOAdyp50kkpaTiOUl6dLlmVo/9RtrLFyvivp2M+B
12 | SLEoT853/Jj9JXiWxo+mAU3Xa4cx7VZR4WP7yK+WPTsM3cij8MaDIz0HRjZCOK8L
13 | 3hC+c4dzP1QYjokNTQQHw44Tqnr95+0jm+i41gLRGJY2+80pr8BnwIPfKzoTHvOI
14 | yhrbXbgGEpWeT7y3bdVntrsyc6zmVEnq
15 | -----END CERTIFICATE REQUEST-----
16 |
--------------------------------------------------------------------------------
/lib/pharos/resources/psp/00-privileged-psp.yml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1beta1
2 | kind: PodSecurityPolicy
3 | metadata:
4 | name: 00-pharos-privileged
5 | annotations:
6 | kubernetes.io/description: 'privileged allows full unrestricted access to
7 | pod features, as if the PodSecurityPolicy controller was not enabled.'
8 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
9 | labels:
10 | kubernetes.io/cluster-service: "true"
11 | addonmanager.kubernetes.io/mode: Reconcile
12 | spec:
13 | privileged: true
14 | allowPrivilegeEscalation: true
15 | allowedCapabilities:
16 | - '*'
17 | volumes:
18 | - '*'
19 | hostNetwork: true
20 | hostPorts:
21 | - min: 0
22 | max: 65535
23 | hostIPC: true
24 | hostPID: true
25 | runAsUser:
26 | rule: 'RunAsAny'
27 | seLinux:
28 | rule: 'RunAsAny'
29 | supplementalGroups:
30 | rule: 'RunAsAny'
31 | fsGroup:
32 | rule: 'RunAsAny'
33 | readOnlyRootFilesystem: false
--------------------------------------------------------------------------------
/lib/pharos/host/el7/scripts/configure-docker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # shellcheck disable=SC1091
6 | . /usr/local/share/pharos/util.sh
7 | # shellcheck disable=SC1091
8 | . /usr/local/share/pharos/el7.sh
9 |
10 | configure_container_runtime_proxy "docker"
11 |
12 | if [ -z "$DOCKER_VERSION" ]; then
13 | docker info
14 | exit 0
15 | fi
16 |
17 | mkdir -p /etc/docker
18 | cat </etc/docker/daemon.json
19 | {
20 | "bridge": "none",
21 | "iptables": false,
22 | "ip-masq": false,
23 | "insecure-registries": $INSECURE_REGISTRIES
24 | }
25 | EOF
26 |
27 | yum_install_with_lock "containerd.io" "${CONTAINERD_VERSION}"
28 | yum_install_with_lock "docker-ce" "${DOCKER_VERSION}"
29 |
30 | if ! systemctl is-active --quiet containerd; then
31 | systemctl enable containerd
32 | systemctl start containerd
33 | fi
34 |
35 | if ! systemctl is-active --quiet docker; then
36 | systemctl enable docker
37 | systemctl start docker
38 | fi
39 |
--------------------------------------------------------------------------------
/lib/pharos/phases/reconfigure_kubelet.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class ReconfigureKubelet < Pharos::Phase
6 | title "Reconfigure kubelet"
7 |
8 | def call
9 | return if host.new?
10 |
11 | logger.info { 'Reconfiguring kubelet ...' }
12 | reconfigure_kubelet
13 | end
14 |
15 | def reconfigure_kubelet
16 | config = transport.file('/var/lib/kubelet/config.yaml')
17 | unless config.exist?
18 | logger.error "Cannot read existing configuration file, skipping reconfigure ..."
19 | return
20 | end
21 | org_config = config.read
22 | transport.exec!("sudo kubeadm upgrade node phase kubelet-config --kubelet-version #{Pharos::KUBE_VERSION}")
23 | new_config = config.read
24 | return if new_config == org_config
25 |
26 | transport.exec!('sudo systemctl restart kubelet')
27 | end
28 | end
29 | end
30 | end
31 |
--------------------------------------------------------------------------------
/e2e/digitalocean/terraform-0.12/cluster.yml:
--------------------------------------------------------------------------------
1 | network:
2 | provider: weave
3 | pod_network_cidr: 172.30.0.0/16
4 | service_cidr: 172.31.0.0/16
5 | firewalld:
6 | enabled: true
7 | weave:
8 | trusted_subnets:
9 | - "10.133.0.0/16"
10 | image_repository: registry-tuusula.pharos.sh/kontenapharos
11 | telemetry:
12 | enabled: false
13 | addons:
14 | ingress-nginx:
15 | enabled: true
16 | helm:
17 | enabled: true
18 | kontena-lens:
19 | enabled: true
20 | name: pharos-drone
21 | tls:
22 | email: bxwqaumj@grr.la
23 | persistence:
24 | enabled: true
25 | kontena-storage:
26 | enabled: true
27 | data_dir: /var/lib/kontena-storage
28 | storage:
29 | use_all_nodes: true
30 | directories:
31 | - path: /mnt/data1
32 | cert-manager:
33 | enabled: true
34 | issuer:
35 | name: letsencrypt-staging
36 | server: https://acme-staging-v02.api.letsencrypt.org/directory
37 | email: bxwqaumj@grr.la
38 |
--------------------------------------------------------------------------------
/examples/vagrant/ubuntu/etcd_certs/server.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIICeDCCAWCgAwIBAgIUE2Il1y7ZmXAYvC6v4n4s4/dBbeowDQYJKoZIhvcNAQEL
3 | BQAwDzENMAsGA1UEAxMEZXRjZDAeFw0xODAzMTUwNzM5MDBaFw0yMzAzMTQwNzM5
4 | MDBaMBQxEjAQBgNVBAMTCTEyNy4wLjAuMTBZMBMGByqGSM49AgEGCCqGSM49AwEH
5 | A0IABNysQszLxE6zUqGqr1lJqPDHGa6zJ4G0ZaNyo2nLNiJ5eJ9yJKtz4AiA/ZTf
6 | UZ/qa+2N+CsG7lGI9yjCeWFY28qjgZEwgY4wDgYDVR0PAQH/BAQDAgWgMB0GA1Ud
7 | JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQW
8 | BBQJlHeMI9ro4h5Sj39VvqKJEwpWlzAfBgNVHSMEGDAWgBT8f31uN+FecEg3rrta
9 | ECc82LmLvzAPBgNVHREECDAGhwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQB2w+uV
10 | MUndF3KalF5b0xe5SfPsMx7ZzU3ycZ0VuYZ81rlaE5yR26KVUXcRLYmRIzuwzdR3
11 | BektDmXKb9vVH8y+Rh2wLMCZ+UC5TLPWyuI3cmm7MumFDT82cWq8EMlJVDxzbgzD
12 | vJwR9Ae+TmsT2+8KHDhFLMh1YIKQKcFGhZvd9/kVPdlP0GaBq/8v/c4dGmCJuBkI
13 | qsS2rBrev72yZZzxFdjYwyqG4Z8Xr0PRMQD/UXb+G8GA8XABByiuyz/0XaDUmpcr
14 | /WAcYGK5ZFRUNS1ZtH2ndmKBNoN9Mxhh5LvWVBJMWrjUhkbioGbEj23WsrGfjEYR
15 | 2LPO6FgNtVlZ5BO+
16 | -----END CERTIFICATE-----
17 |
--------------------------------------------------------------------------------
/lib/pharos/resources/pharos/01-cluster-role-controller-manager.yml:
--------------------------------------------------------------------------------
1 | # RBAC rules for the kubernetes cloud-controller-manager controllers
2 | ---
3 | apiVersion: rbac.authorization.k8s.io/v1
4 | kind: ClusterRole
5 | metadata:
6 | name: system:cloud-controller-manager
7 | rules:
8 | - apiGroups:
9 | - ""
10 | resources:
11 | - events
12 | verbs:
13 | - create
14 | - patch
15 | - update
16 | - apiGroups:
17 | - ""
18 | resources:
19 | - nodes
20 | verbs:
21 | - '*'
22 | - apiGroups:
23 | - ""
24 | resources:
25 | - nodes/status
26 | verbs:
27 | - patch
28 | - apiGroups:
29 | - ""
30 | resources:
31 | - services
32 | verbs:
33 | - list
34 | - patch
35 | - update
36 | - watch
37 | - apiGroups:
38 | - ""
39 | resources:
40 | - serviceaccounts
41 | - secrets
42 | verbs:
43 | - get
44 | - list
45 | - create
46 | - apiGroups:
47 | - ""
48 | resources:
49 | - endpoints
50 | verbs:
51 | - create
52 | - get
53 | - list
54 | - watch
55 | - update
--------------------------------------------------------------------------------
/lib/pharos/host/debian/scripts/configure-docker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # shellcheck disable=SC1091
6 | . /usr/local/share/pharos/util.sh
7 |
8 | configure_container_runtime_proxy "docker"
9 |
10 | if [ -z "$DOCKER_VERSION" ]; then
11 | docker info
12 | exit 0
13 | fi
14 |
15 | mkdir -p /etc/docker
16 | cat </etc/docker/daemon.json
17 | {
18 | "storage-driver": "overlay2",
19 | "live-restore": true,
20 | "bridge": "none",
21 | "iptables": false,
22 | "ip-masq": false,
23 | "log-driver": "json-file",
24 | "log-opts": {
25 | "max-size": "20m",
26 | "max-file": "3"
27 | },
28 | "insecure-registries": $INSECURE_REGISTRIES
29 | }
30 | EOF
31 |
32 | debconf-set-selections <
26 | ```
27 |
28 | 3. Deploy token reviewer service
29 |
30 | ```sh
31 | $ kubectl apply -f ./examples/authentication-token-webhook/deploy
32 | ```
33 | The default user and token are `admin/verysecret`, but you can edit `daemonset.yml` and `cluster_role_binding.yml` to change those.
34 |
35 | 4. Request API server with the token
36 | ```sh
37 | $ curl -X GET \
38 | https://:6443/api/v1/nodes \
39 | -H 'authorization: Bearer verysecret' \
40 | -H 'cache-control: no-cache'
41 | ```
42 |
43 |
--------------------------------------------------------------------------------
/examples/terraform-aws/iam.tf:
--------------------------------------------------------------------------------
1 | resource "aws_iam_role" "role" {
2 | name = "${var.cluster_name}_host"
3 |
4 | assume_role_policy = < "can't change #{key} from #{old_val} to #{new_val}" }
9 |
10 | def call
11 | changed?('network', 'provider', &DEFAULT_PROC)
12 | changed?('network', 'service_cidr', &DEFAULT_PROC)
13 | changed?('network', 'pod_network_cidr', &DEFAULT_PROC)
14 | end
15 |
16 | def changed?(*config_keys)
17 | old_value = previous_config&.dig(*config_keys)
18 | new_value = @config&.dig(*config_keys)
19 | return false if old_value == new_value
20 | return true unless block_given?
21 |
22 | yield config_keys.map(&:to_s).join('.'), old_value, new_value
23 | end
24 |
25 | def previous_config
26 | cluster_context['previous-config']
27 | end
28 | end
29 | end
30 | end
31 |
--------------------------------------------------------------------------------
/lib/pharos/error.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | class Error < StandardError; end
5 | class InvalidHostError < Error; end
6 | class InvalidAddonError < Error; end
7 |
8 | class ConfigError < Error
9 | attr_reader :errors
10 |
11 | def initialize(errors)
12 | @errors = errors
13 | end
14 |
15 | def to_s
16 | "Invalid configuration:\n#{YAML.dump(@errors)}"
17 | end
18 | end
19 |
20 | class ExecError < Error
21 | attr_reader :cmd, :exit_status, :output
22 |
23 | def initialize(cmd, exit_status, output)
24 | @cmd = cmd
25 | @exit_status = exit_status
26 | @output = if output.respond_to?(:string)
27 | output.string
28 | elsif output.respond_to?(:read)
29 | output.rewind
30 | output.read
31 | else
32 | output
33 | end
34 | end
35 |
36 | def message
37 | "exec failed with code #{@exit_status}: #{@cmd}\n#{@output.gsub(/^/m, ' ')}"
38 | end
39 | end
40 | end
41 |
--------------------------------------------------------------------------------
/spec/pharos/phases/validate_configuration_changes_spec.rb:
--------------------------------------------------------------------------------
1 | require 'pharos/config'
2 | require 'pharos/phases/validate_configuration_changes'
3 |
4 | describe Pharos::Phases::ValidateConfigurationChanges do
5 | let(:host) {
6 | double(
7 | :host,
8 | address: '10.10.10.2',
9 | user: 'root',
10 | ssh_key_path: '~/.ssh/id_rsa.pub',
11 | container_runtime: 'docker',
12 | hostname: 'node-1'
13 | )
14 | }
15 | let(:config) { Pharos::Config.new(hosts: [{address: '10.0.0.1', role: 'master'}], network: { provider: 'new' }) }
16 | let(:old_config) { Pharos::Config.new(hosts: [{address: '10.0.0.1', role: 'master'}], network: { provider: 'old' }) }
17 | let(:cluster_context) { { 'previous-config' => old_config } }
18 |
19 | let(:subject) { described_class.new(config.hosts.first, cluster_context: cluster_context, config: config) }
20 |
21 | describe '#call' do
22 | it 'detects network provider change' do
23 | expect{subject.call}.to raise_error(Pharos::ConfigError, /can't change network.provider from old to new/)
24 | end
25 | end
26 | end
27 |
--------------------------------------------------------------------------------
/lib/pharos/resources/kubelet_rubber_stamp/04-deployment.yml.erb:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: kubelet-rubber-stamp
5 | namespace: kube-system
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | name: kubelet-rubber-stamp
11 | template:
12 | metadata:
13 | labels:
14 | name: kubelet-rubber-stamp
15 | spec:
16 | serviceAccountName: kubelet-rubber-stamp
17 | priorityClassName: system-cluster-critical
18 | tolerations:
19 | - effect: NoSchedule
20 | operator: Exists
21 | nodeSelector:
22 | node-role.kubernetes.io/master: ""
23 | containers:
24 | - name: rubber-stamp
25 | image: <%= image_repository %>/kubelet-rubber-stamp:<%= version %>
26 | env:
27 | - name: WATCH_NAMESPACE
28 | value: ""
29 | - name: POD_NAME
30 | valueFrom:
31 | fieldRef:
32 | fieldPath: metadata.name
33 | - name: OPERATOR_NAME
34 | value: "kubelet-rubber-stamp"
35 |
--------------------------------------------------------------------------------
/lib/pharos/resources/csi-crds/csidriver.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apiextensions.k8s.io/v1beta1
2 | kind: CustomResourceDefinition
3 | metadata:
4 | name: csidrivers.csi.storage.k8s.io
5 | labels:
6 | addonmanager.kubernetes.io/mode: Reconcile
7 | spec:
8 | group: csi.storage.k8s.io
9 | names:
10 | kind: CSIDriver
11 | plural: csidrivers
12 | scope: Cluster
13 | validation:
14 | openAPIV3Schema:
15 | properties:
16 | spec:
17 | description: Specification of the CSI Driver.
18 | properties:
19 | attachRequired:
20 | description: Indicates this CSI volume driver requires an attach operation,
21 | and that Kubernetes should call attach and wait for any attach operation
22 | to complete before proceeding to mount.
23 | type: boolean
24 | podInfoOnMountVersion:
25 | description: Indicates this CSI volume driver requires additional pod
26 | information (like podName, podUID, etc.) during mount operations.
27 | type: string
28 | version: v1alpha1
29 |
--------------------------------------------------------------------------------
/examples/authentication-token-webhook/deploy/daemonset.yml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: DaemonSet
3 | metadata:
4 | name: k8s-token-reviewer
5 | namespace: kube-system
6 | labels:
7 | k8s-app: k8s-token-reviewer
8 | version: v0.0.1
9 | spec:
10 | selector:
11 | matchLabels:
12 | k8s-app: k8s-token-reviewer
13 | template:
14 | metadata:
15 | labels:
16 | k8s-app: k8s-token-reviewer
17 | annotations:
18 | scheduler.alpha.kubernetes.io/critical-pod: ''
19 | spec:
20 | nodeSelector:
21 | node-role.kubernetes.io/master: ''
22 | tolerations:
23 | - effect: NoSchedule
24 | operator: Exists
25 | containers:
26 | - image: nevalla/token_webhook_example:latest
27 | name: k8s-token-reviewer
28 | imagePullPolicy: Always
29 | env:
30 | - name: USER
31 | value: admin
32 | - name: TOKEN
33 | value: verysecret
34 | - name: GROUP
35 | value: admin
36 | hostNetwork: true
37 | restartPolicy: Always
38 |
--------------------------------------------------------------------------------
/lib/pharos/resources/metrics-server/metrics-server-deployment.yml.erb:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: metrics-server
5 | namespace: kube-system
6 | labels:
7 | k8s-app: metrics-server
8 | spec:
9 | selector:
10 | matchLabels:
11 | k8s-app: metrics-server
12 | template:
13 | metadata:
14 | name: metrics-server
15 | labels:
16 | k8s-app: metrics-server
17 | spec:
18 | serviceAccountName: metrics-server
19 | <% if worker_count == 0 %>
20 | tolerations:
21 | - effect: NoSchedule
22 | operator: Exists
23 | <% end %>
24 | priorityClassName: system-cluster-critical
25 | containers:
26 | - name: metrics-server
27 | image: <%= image_repository %>/metrics-server:v<%= version %>
28 | imagePullPolicy: IfNotPresent
29 | command:
30 | - /metrics-server
31 | - --logtostderr=true
32 | - --kubelet-preferred-address-types=InternalIP,ExternalIP
33 | resources:
34 | requests:
35 | cpu: 10m
36 | memory: 32Mi
37 |
--------------------------------------------------------------------------------
/spec/support/exit_with_error_helper.rb:
--------------------------------------------------------------------------------
1 | RSpec::Matchers.define_negated_matcher :exit_without_error, :exit_with_error
2 | RSpec::Matchers.define :exit_with_error do
3 |
4 | def supports_block_expectations?
5 | true
6 | end
7 |
8 | match do |block|
9 | begin
10 | block.call
11 | rescue SystemExit => e
12 | @exit_status = e.status
13 | end
14 | !@exit_status.nil? && @exit_status == expected_status
15 | end
16 |
17 | chain :status do |status|
18 | @expected_status = status
19 | end
20 |
21 | failure_message do |block|
22 | "expected block to exit with status #{expected_status} but exit " +
23 | (@exit_status.nil? ? "was not called" : "status was #{@exit_status}")
24 | end
25 |
26 | failure_message_when_negated do |block|
27 | "expected block not to raise SystemExit, got exit with status #{@exit_status}"
28 | end
29 |
30 | description do
31 | "expect block to exit #{expected_status.zero? ? "without error" : "with error (status #{expected_status})"}"
32 | end
33 |
34 | def expected_status
35 | @expected_status ||= 1
36 | end
37 | end
38 |
--------------------------------------------------------------------------------
/.rubocop.yml:
--------------------------------------------------------------------------------
1 | inherit_from: .rubocop.relaxed.yml
2 |
3 | AllCops:
4 | Exclude:
5 | - spec/**/*
6 | - non-oss/spec/**/*
7 | - Gemfile
8 | - "*.gemspec"
9 | - bundler/**/*
10 | TargetRubyVersion: 2.5
11 |
12 | Style/PercentLiteralDelimiters:
13 | PreferredDelimiters:
14 | default: ()
15 | '%i': '()'
16 | '%I': '()'
17 | '%r': '{}'
18 | '%w': '()'
19 | '%W': '()'
20 |
21 | Style/FormatString:
22 | EnforcedStyle: percent
23 |
24 | Style/FrozenStringLiteralComment:
25 | EnforcedStyle: always
26 |
27 | Style/WordArray:
28 | Enabled: true
29 | MinSize: 3
30 |
31 | Style/SymbolArray:
32 | Enabled: true
33 | MinSize: 3
34 |
35 | Gemspec/OrderedDependencies:
36 | Enabled: false
37 |
38 | Style/PerlBackrefs:
39 | Enabled: true
40 |
41 | Layout/SpaceInsideParens:
42 | Enabled: true
43 |
44 | Style/SpecialGlobalVars:
45 | Enabled: true
46 |
47 | Style/Alias:
48 | Enabled: true
49 |
50 | Style/BeginBlock:
51 | Enabled: true
52 |
53 | Naming/UncommunicativeMethodParamName:
54 | AllowedNames:
55 | - cn
56 |
57 | Metrics/BlockLength:
58 | Enabled: false
59 |
--------------------------------------------------------------------------------
/lib/pharos/root_command.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative 'up_command'
4 | require_relative 'reset_command'
5 | require_relative 'version_command'
6 | require_relative 'kubeconfig_command'
7 | require_relative 'exec_command'
8 | require_relative 'terraform_command'
9 | require_relative 'worker_up_command'
10 |
11 | module Pharos
12 | class RootCommand < Pharos::Command
13 | banner "#{File.basename($PROGRAM_NAME)} - Kontena Pharos cluster manager"
14 |
15 | subcommand "up", "initialize/upgrade cluster", UpCommand
16 |
17 | subcommand "worker", "worker node specific commands" do
18 | subcommand "up", "initialize/upgrade a worker node", WorkerUpCommand
19 | end
20 |
21 | subcommand "kubeconfig", "fetch admin kubeconfig file", KubeconfigCommand
22 | subcommand "reset", "reset cluster", ResetCommand
23 | subcommand %w(exec ssh), "run a command or an interactive session on a host", ExecCommand
24 | subcommand %w(tf terraform), "terraform specific commands", TerraformCommand
25 | subcommand "version", "show version information", VersionCommand
26 | end
27 | end
28 |
--------------------------------------------------------------------------------
/examples/vagrant/ubuntu/cluster-external-etcd.yml:
--------------------------------------------------------------------------------
1 | hosts:
2 | - address: 192.168.100.100
3 | private_address: 192.168.100.100 # just to advertise correct ip with vagrant
4 | user: vagrant
5 | role: master
6 | ssh_key_path: ~/.vagrant.d/insecure_private_key
7 | container_runtime: docker
8 | - address: 192.168.100.101
9 | user: vagrant
10 | role: worker
11 | ssh_key_path: ~/.vagrant.d/insecure_private_key
12 | container_runtime: docker
13 | - address: 192.168.100.102
14 | user: vagrant
15 | role: worker
16 | ssh_key_path: ~/.vagrant.d/insecure_private_key
17 | container_runtime: docker
18 | network:
19 | pod_network_cidr: 10.32.0.0/16
20 | trusted_subnets:
21 | - 192.168.100.0/24
22 | etcd:
23 | endpoints:
24 | - https://127.0.0.1:2379
25 | certificate: ./etcd_certs/client.pem
26 | key: ./etcd_certs/client-key.pem
27 | ca_certificate: ./etcd_certs/ca.pem
28 | addons:
29 | ingress-nginx:
30 | enabled: false
31 | configmap:
32 | map-hash-bucket-size: "128"
33 | kured:
34 | enabled: false
35 | host-upgrades:
36 | enabled: false
37 | interval: 7d
38 |
--------------------------------------------------------------------------------
/examples/terraform-packet/README.md:
--------------------------------------------------------------------------------
1 | # Pharos Cluster on Packet.net (using Terraform)
2 |
3 |
4 | ## Prerequisities
5 |
6 | - Pharos [toolchain](https://docs.k8spharos.dev/install.html) installed locally
7 | - [Terraform](https://www.terraform.io/) 0.12 installed locally
8 | - [Packet.com](https://packet.com) credentials
9 |
10 | ## Clone this repository
11 |
12 | ```
13 | $ git clone https://github.com/kontena/pharos-cluster.git
14 | $ cd pharos-cluster/examples/terraform-packet/
15 | ```
16 |
17 | ## Configure Terraform
18 |
19 | Copy [terraform.example.tfvars](./terraform.example.tfvars) example file to `terraform.tfvars`:
20 |
21 | ```
22 | $ cp terraform.example.tfvars terraform.tfvars
23 | ```
24 |
25 | Edit `project_id` and `auth_token`. Optionally you can also configure number of machines and their types. Once done save the file.
26 |
27 | ## Create Cluster
28 |
29 | ```
30 | $ pharos tf apply -y --trust-hosts
31 | ```
32 |
33 | This command will first execute Terraform to create the infrastructure, collects host information and passes this information back to `pharos`.
34 |
35 | ## Teardown Cluster
36 |
37 | ```
38 | $ pharos tf destroy
39 | ```
40 |
--------------------------------------------------------------------------------
/lib/pharos/logging.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'logger'
4 |
5 | module Pharos
6 | module Logging
7 | def self.debug?
8 | !!@debug
9 | end
10 |
11 | def self.debug!
12 | @debug = true
13 | end
14 |
15 | def self.format_exception(exc, severity = "ERROR")
16 | if !ENV['DEBUG'].to_s.empty? || severity == "DEBUG"
17 | backtrace = "\n #{exc.backtrace.join("\n ")}"
18 | end
19 |
20 | "Error: #{exc.message.strip}#{backtrace}"
21 | end
22 |
23 | def self.log_level
24 | @log_level ||= debug? ? Logger::DEBUG : Logger::INFO
25 | end
26 |
27 | def self.logger
28 | @logger ||= Logger.new($stdout).tap do |logger|
29 | logger.progname = 'API'
30 | logger.level = Pharos::Logging.log_level
31 | logger.formatter = proc do |severity, _datetime, _progname, msg|
32 | message = msg.is_a?(Exception) ? Pharos::Logging.format_exception(msg, severity) : msg
33 | " %s\n" % { msg: message }
34 | end
35 | end
36 | end
37 |
38 | def logger
39 | Pharos::Logging.logger
40 | end
41 | end
42 | end
43 |
--------------------------------------------------------------------------------
/examples/vagrant/ubuntu/etcd_certs/ca.pem:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIC7jCCAdagAwIBAgIUDjOJYLq55ckKIGxWdT3AbAi8NWUwDQYJKoZIhvcNAQEL
3 | BQAwDzENMAsGA1UEAxMEZXRjZDAeFw0xODAzMTUwNzM2MDBaFw0yMzAzMTQwNzM2
4 | MDBaMA8xDTALBgNVBAMTBGV0Y2QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
5 | AoIBAQClOFpLS1AG2Uug9wBcOJ7ZFKObndNbI1tEyZda8B/MMuGghF1xTLjX41fk
6 | tSKWyliK+VQBJumuIntg+SpAkWWeqxFX2a1oAf+ru+ETmG/9hyM2iyhvC3AUSMyS
7 | TNEg3p8sWVC94pQ7LHI8ARdf/93u7Qvl0IwG58I378LsximNv7STgKMmOHQ2Qbvz
8 | k2HAeMQjnKIXDrLFdDXNZd4gtvZUNksXVKPirN6QbsjywmWmPaOtQ/eS3SRKRLwl
9 | B0l9cPlG9hM/toPVbOX0rho8G1CReTzp1URJkA/+1KvPUmoVNw5l6Q4louKDRP6c
10 | crGNtmCoZEclHq/6u60jqUIvYr81AgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
11 | BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBT8f31uN+FecEg3rrtaECc82LmLvzAN
12 | BgkqhkiG9w0BAQsFAAOCAQEAKHJkNwLkY8d6ux/VWQqNWusEUv92pGeKrqSeRvVZ
13 | x8UjxI1Tg5oaeft2K1P7+uyOQcTulOYF37tlrEcdUAP9GMLzgjLyUQoUujY+CdGn
14 | Gt9yy0k9lVA536bY+sLcy2reyMgrM0YO8Hvm4oVsLBQdKGHCE9f2qa7Rh1fMTez5
15 | dH30twNeDuV35ABS1svw0n5Lb1rTxZEw3z/jsJRL9nLWiKJhU60uCGZUrmfH1cqV
16 | 3IH6OV75GIRkRvCmNaWr9LekxwaO5M+SxgO1B02jgP3vs+cyJJ02isgp1a0qXrpH
17 | y0k1AjC6vE+iXdCcgkzfgaaz0qNK72xXtW/WIrOLbpx1JQ==
18 | -----END CERTIFICATE-----
19 |
--------------------------------------------------------------------------------
/lib/pharos/terraform/apply_command.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative 'base_command'
4 |
5 | module Pharos
6 | module Terraform
7 | class ApplyCommand < BaseCommand
8 | options :load_config
9 |
10 | option ['-f', '--force'], :flag, "force upgrade"
11 | option ['--trust-hosts'], :flag, "removes addresses from ~/.ssh/known_hosts before connecting"
12 |
13 | def execute
14 | tf_workspace
15 | tf_init
16 | tf_apply
17 | pharos_up
18 | end
19 |
20 | def tf_init
21 | run_cmd! "terraform init"
22 | end
23 |
24 | def tf_apply
25 | cmd = ["terraform", "apply"]
26 | cmd += common_tf_options
27 |
28 | run_cmd! cmd.join(' ')
29 | end
30 |
31 | def pharos_up
32 | run_cmd "terraform output -json > .#{workspace}.json"
33 |
34 | cmd = @config_options || []
35 | cmd << '--tf-json'
36 | cmd << ".#{workspace}.json"
37 | cmd << '-y' if yes?
38 | cmd << '--force' if force?
39 | cmd << '--trust-hosts' if trust_hosts?
40 |
41 | Pharos::UpCommand.new('pharos').run(cmd)
42 | end
43 | end
44 | end
45 | end
46 |
--------------------------------------------------------------------------------
/lib/pharos/kube/stack.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'k8s-client'
4 |
5 | module Pharos
6 | module Kube
7 | class Stack < K8s::Stack
8 | # custom labels
9 | LABEL = 'pharos.kontena.io/stack'
10 | CHECKSUM_ANNOTATION = 'pharos.kontena.io/stack-checksum'
11 |
12 | # Load stack with resources from path containing erb-templated YAML files
13 | #
14 | # @param path [String]
15 | # @param name [String]
16 | # @param vars [Hash]
17 | def self.load(name, path, **vars)
18 | path = Pathname.new(path).freeze
19 | files = if File.file?(path)
20 | [path]
21 | else
22 | Pathname.glob(path.join('*.{yml,yaml,yml.erb,yaml.erb}')).sort_by(&:to_s)
23 | end
24 | resources = files.flat_map do |file|
25 | Pharos::YamlFile.new(file).load_stream(name: name, **vars) do |doc|
26 | K8s::Resource.new(doc)
27 | end
28 | end.select do |r|
29 | # Take in only resources that are valid kube resources
30 | r.kind && r.apiVersion
31 | end
32 |
33 | new(name, resources)
34 | end
35 | end
36 | end
37 | end
38 |
--------------------------------------------------------------------------------
/lib/pharos/resources/packet/02-clusterrole.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | annotations:
5 | rbac.authorization.kubernetes.io/autoupdate: "true"
6 | name: system:cloud-controller-manager
7 | rules:
8 | - apiGroups:
9 | - ""
10 | resources:
11 | - events
12 | verbs:
13 | - create
14 | - patch
15 | - update
16 | - apiGroups:
17 | - ""
18 | resources:
19 | - nodes
20 | verbs:
21 | - '*'
22 | - apiGroups:
23 | - ""
24 | resources:
25 | - nodes/status
26 | verbs:
27 | - patch
28 | - apiGroups:
29 | - ""
30 | resources:
31 | - services
32 | verbs:
33 | - list
34 | - patch
35 | - update
36 | - watch
37 | - apiGroups:
38 | - ""
39 | resources:
40 | - services/status
41 | verbs:
42 | - list
43 | - patch
44 | - update
45 | - watch
46 | - apiGroups:
47 | - ""
48 | resources:
49 | - serviceaccounts
50 | verbs:
51 | - create
52 | - apiGroups:
53 | - ""
54 | resources:
55 | - persistentvolumes
56 | verbs:
57 | - get
58 | - list
59 | - update
60 | - watch
61 | - apiGroups:
62 | - ""
63 | resources:
64 | - endpoints
65 | verbs:
66 | - create
67 | - get
68 | - list
69 | - watch
70 | - update
--------------------------------------------------------------------------------
/lib/pharos/phases/configure_cloud_provider.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class ConfigureCloudProvider < Pharos::Phase
6 | title "Configure cloud provider"
7 |
8 | def call
9 | unless @config.cloud&.provider
10 | logger.info "Cloud provider not set, skipping."
11 | return
12 | end
13 |
14 | if @config.cloud.intree_provider?
15 | logger.info "In-tree cloud provider #{@config.cloud.provider} enabled."
16 | elsif @config.cloud.outtree_provider?
17 | logger.info "Configuring cloud provider #{@config.cloud.provider} ..."
18 | apply_cloud_config
19 | else
20 | logger.info "Using external cloud provider, provider needs to be configured manually."
21 | end
22 | end
23 |
24 | def apply_cloud_config
25 | apply_stack('csi-crds') if @config.cloud.cloud_provider.csi?
26 | if @config.cloud.config
27 | stack = Pharos::Kube::Stack.load("#{@config.cloud.provider}-cloud-config", @config.cloud.config)
28 | stack.apply(kube_client)
29 | end
30 | apply_stack(@config.cloud.provider, image_repository: @config.image_repository)
31 | end
32 | end
33 | end
34 | end
35 |
--------------------------------------------------------------------------------
/spec/pharos/phases/join_node_spec.rb:
--------------------------------------------------------------------------------
1 | require 'pharos/config'
2 | require 'pharos/phases/join_node'
3 |
4 | describe Pharos::Phases::JoinNode do
5 | let(:host) do
6 | Pharos::Configuration::Host.new(
7 | address: '10.10.10.2',
8 | user: 'root',
9 | ssh_key_path: '~/.ssh/id_rsa.pub',
10 | container_runtime: 'docker',
11 | hostname: 'node-1'
12 | )
13 | end
14 |
15 | let(:ssh) { instance_double(Pharos::Transport::SSH) }
16 | let(:cluster_context) {
17 | {
18 | 'join-command' => join_cmd
19 | }
20 | }
21 | let(:subject) { described_class.new(host, cluster_context: cluster_context) }
22 | let(:join_cmd) { 'kubeadm join --token 531bb9.d1637f0a9b6af2ba 127.0.0.1:6443 --discovery-token-ca-cert-hash sha256:98d563efbb07a11cde93884394ba1d266912def377bfadc65d01a3bcc0ddd30d' }
23 |
24 | before(:each) do
25 | allow(host).to receive(:transport).and_return(ssh)
26 | allow(subject).to receive(:already_joined?).and_return(false)
27 | end
28 |
29 | describe '#call' do
30 | it 'joins via ssh' do
31 | expect(ssh).to receive(:exec!) do |cmd|
32 | expect(cmd).to include("sudo kubeadm join")
33 | expect(cmd).to include("--node-name #{host.hostname}")
34 | end
35 | subject.call
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/lib/pharos/configuration/cloud.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Configuration
5 | class Cloud < Pharos::Configuration::Struct
6 | attribute :provider, Pharos::Types::String
7 | attribute :config, Pharos::Types::String
8 |
9 | INTREE_PROVIDERS = %w(aws azure cloudstack gce openstack ovirt photon vsphere).freeze
10 |
11 | # @return [Array]
12 | def self.external_providers
13 | Pharos::Cloud::ProviderRegistry.instance.providers.keys.map(&:to_s)
14 | end
15 |
16 | # @return [Array]
17 | def self.providers
18 | INTREE_PROVIDERS + external_providers
19 | end
20 |
21 | # @return [Boolean]
22 | def intree_provider?
23 | INTREE_PROVIDERS.include?(provider)
24 | end
25 |
26 | # @return [Boolean]
27 | def outtree_provider?
28 | self.class.external_providers.include?(provider)
29 | end
30 |
31 | # @return [String]
32 | def resolve_provider
33 | return provider if intree_provider?
34 |
35 | 'external'
36 | end
37 |
38 | # @return [Pharos::Cloud::Provider]
39 | def cloud_provider
40 | Pharos::Cloud::ProviderRegistry.instance.provider(provider)
41 | end
42 | end
43 | end
44 | end
45 |
--------------------------------------------------------------------------------
/kube-bench/job-master.yml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: kube-bench-master
5 | spec:
6 | template:
7 | spec:
8 | hostPID: true
9 | nodeSelector:
10 | node-role.kubernetes.io/master: ""
11 | tolerations:
12 | - key: node-role.kubernetes.io/master
13 | operator: Exists
14 | effect: NoSchedule
15 | containers:
16 | - name: kube-bench
17 | image: docker.io/aquasec/kube-bench:latest
18 | command: ["kube-bench","master", "--noremediations", "--version", "1.11"]
19 | volumeMounts:
20 | - name: var-lib-etcd
21 | mountPath: /var/lib/etcd
22 | - name: etc-kubernetes
23 | mountPath: /etc/kubernetes
24 | # # /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version.
25 | # # You can omit this mount if you specify --version as part of the command.
26 | # - name: usr-bin
27 | # mountPath: /usr/bin
28 | restartPolicy: Never
29 | volumes:
30 | - name: var-lib-etcd
31 | hostPath:
32 | path: "/var/lib/etcd"
33 | - name: etc-kubernetes
34 | hostPath:
35 | path: "/etc/kubernetes"
36 | # - name: usr-bin
37 | # hostPath:
38 | # path: "/usr/bin"
--------------------------------------------------------------------------------
/lib/pharos/host/ubuntu/scripts/configure-docker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # shellcheck disable=SC1091
6 | . /usr/local/share/pharos/util.sh
7 |
8 | configure_container_runtime_proxy "docker"
9 |
10 | if [ -z "$DOCKER_VERSION" ]; then
11 | docker info
12 | exit 0
13 | fi
14 |
15 | mkdir -p /etc/docker
16 | cat </etc/docker/daemon.json
17 | {
18 | "storage-driver": "overlay2",
19 | "bridge": "none",
20 | "iptables": false,
21 | "ip-masq": false,
22 | "log-driver": "json-file",
23 | "log-opts": {
24 | "max-size": "20m",
25 | "max-file": "3"
26 | },
27 | "insecure-registries": $INSECURE_REGISTRIES
28 | }
29 | EOF
30 |
31 | export DEBIAN_FRONTEND=noninteractive
32 |
33 | apt-mark unhold "$DOCKER_PACKAGE" || echo "Nothing to unhold"
34 | if dpkg -l docker-ce ; then
35 | apt-get install -y "$DOCKER_PACKAGE=5:$DOCKER_VERSION*" || echo "Cannot install specific version, keeping the current one"
36 | else
37 | apt-get install -y "$DOCKER_PACKAGE=5:$DOCKER_VERSION*"
38 | fi
39 | apt-mark hold "$DOCKER_PACKAGE"
40 |
41 | if ! systemctl is-active --quiet containerd; then
42 | systemctl enable containerd
43 | systemctl start containerd
44 | fi
45 |
46 | if ! systemctl is-active --quiet docker; then
47 | systemctl enable docker
48 | systemctl start docker
49 | fi
50 |
--------------------------------------------------------------------------------
/.drone.yml:
--------------------------------------------------------------------------------
1 | kind: pipeline
2 | name: test
3 | platform:
4 | os: linux
5 | arch: amd64
6 | steps:
7 | - name: test-ruby
8 | image: ruby:2.5
9 | commands:
10 | - gem install bundler -Nf
11 | - bundle install --path bundler
12 | - bundle exec rubocop --fail-level A -S --format c --parallel
13 | - bundle exec rspec spec/
14 | - name: test-shellcheck
15 | image: koalaman/shellcheck-alpine:latest
16 | commands:
17 | - apk update && apk add bash
18 | - bash -c 'shopt -s globstar; shellcheck **/*.sh'
19 | ---
20 | kind: pipeline
21 | name: github-release
22 | depends_on:
23 | - test
24 | platform:
25 | os: linux
26 | arch: amd64
27 | steps:
28 | - name: create_gh_release
29 | image: ubuntu:xenial
30 | environment:
31 | GITHUB_TOKEN:
32 | from_secret: github_token
33 | commands:
34 | - ./build/drone/create_release.sh
35 | when:
36 | event: tag
37 | ---
38 | kind: pipeline
39 | name: release-oss-binary
40 | depends_on:
41 | - github-release
42 | platform:
43 | os: linux
44 | arch: amd64
45 | steps:
46 | - name: build_ubuntu
47 | image: ubuntu:xenial
48 | environment:
49 | CPPFLAGS: "-P"
50 | GITHUB_TOKEN:
51 | from_secret: github_token
52 | commands:
53 | - ./build/drone/ubuntu.sh
54 | when:
55 | event: tag
56 |
--------------------------------------------------------------------------------
/kube-bench/run-bench.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | RED='\033[0;31m'
6 | GREEN='\033[0;32m'
7 | YELLOW='\033[0;33m'
8 | RESET='\033[0m'
9 |
10 | function logs() {
11 | # shellcheck disable=SC2059
12 | kubectl logs "$1" | sed ''/PASS/s//"$(printf "${GREEN}PASS${RESET}")"/'' | sed ''/WARN/s//"$(printf "${YELLOW}WARN${RESET}")"/'' | sed ''/FAIL/s//"$(printf "${RED}FAIL${RESET}")"/''
13 | }
14 |
15 | if [ -z "$1" ]
16 | then
17 | echo "You need to supply the role (master | node)"
18 | exit 1
19 | fi
20 |
21 | if [[ ! "$1" =~ ^(master|node)$ ]]; then
22 | echo "You need to supply the role as master or node"
23 | exit 1
24 | fi
25 |
26 | role=$1
27 |
28 | # Create job for defined role
29 | kubectl create -f job-"${role}".yml
30 | echo "Waiting for benchmarking pod(s) to complete..."
31 | kubectl wait --for=condition=complete --timeout=60s job/kube-bench-"${role}"
32 |
33 | pod=$(kubectl get pods --selector=job-name=kube-bench-"${role}" --output=jsonpath={.items..metadata.name})
34 | logs "$pod"
35 | sleep 1
36 | # Grab the exit code of the pod. Not that it currently matters though as kube-bench seems to exit with 0 every time
37 | exit_code=$(kubectl get pod "$pod" --output=jsonpath="{.status.containerStatuses[0].state.terminated.exitCode}")
38 | echo "Pod exit code: $exit_code"
39 | kubectl delete -f job-"${role}".yml
40 |
41 | exit "$exit_code"
42 |
43 |
--------------------------------------------------------------------------------
/spec/pharos/phase_spec.rb:
--------------------------------------------------------------------------------
1 | require "pharos/phase"
2 |
3 | describe Pharos::Phase do
4 | let(:host) { double(:host) }
5 | let(:config) { double(:config) }
6 | let(:cluster_context) { {} }
7 | let(:subject) { described_class.new(host, config: config, cluster_context: cluster_context) }
8 |
9 | describe '#worker_pool' do
10 | it 'returns FixedThreadPool' do
11 | pool = subject.worker_pool('foo', 2)
12 | expect(pool).to be_instance_of(Concurrent::FixedThreadPool)
13 | end
14 |
15 | it 'returns the same pool if asked twice' do
16 | pool1 = subject.worker_pool('foo', 2)
17 | pool2 = subject.worker_pool('foo', 2)
18 | expect(pool1).to eq(pool2)
19 | end
20 |
21 | it 'returns a different pool if asked twice with different name' do
22 | pool1 = subject.worker_pool('foo', 2)
23 | pool2 = subject.worker_pool('bar', 2)
24 | expect(pool1).not_to eq(pool2)
25 | end
26 | end
27 |
28 | describe '#throttled_work' do
29 | it 'runs given block' do
30 | value = subject.throttled_work('foo', 2) do
31 | 'bar'
32 | end
33 | expect(value).to eq('bar')
34 | end
35 |
36 | it 'raises re-raises exceptions' do
37 | expect {
38 | subject.throttled_work('foo', 2) do
39 | raise 'bar'
40 | end
41 | }.to raise_error(StandardError)
42 | end
43 | end
44 | end
45 |
--------------------------------------------------------------------------------
/spec/pharos/command_options/tf_json_spec.rb:
--------------------------------------------------------------------------------
1 | describe Pharos::CommandOptions::TfJson do
2 | let(:arguments) { [] }
3 |
4 | subject do
5 | Class.new(Pharos::Command) do
6 | options :load_config, :tf_json
7 |
8 | def config
9 | @config ||= load_config
10 | end
11 | end.new('').tap do |subject|
12 | subject.parse(arguments)
13 | end
14 | end
15 |
16 | describe '#load_config' do
17 | context 'with --tf-json' do
18 | let(:arguments) { ["--config=#{fixtures_path('cluster.minimal.yml')}", "--tf-json=#{fixtures_path('terraform/tf.json')}"] }
19 |
20 | it 'loads the config hosts' do
21 | expect(subject.config.hosts.map{|h| {address: h.address, role: h.role}}).to eq [
22 | { address: '147.75.100.11', role: 'master' },
23 | { address: "147.75.102.245", role: 'worker' },
24 | { address: "147.75.100.113", role: 'worker' },
25 | { address: "147.75.100.9", role: 'worker' },
26 | ]
27 | end
28 | end
29 |
30 | context 'with --tf-json including api endpoint' do
31 | let(:arguments) { ["--config=#{fixtures_path('cluster.minimal.yml')}", "--tf-json=#{fixtures_path('terraform/with_api_endpoint.json')}"] }
32 |
33 | it 'loads the api.endpoint' do
34 | expect(subject.config.api.endpoint).to eq 'api.example.com'
35 | end
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/lib/pharos/resources/node_local_dns/01-configmap.yml.erb:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: node-local-dns
5 | namespace: kube-system
6 | data:
7 | Corefile: |
8 | cluster.local:53 {
9 | errors
10 | cache {
11 | success 9984 30
12 | denial 9984 5
13 | }
14 | reload
15 | loop
16 | bind <%= nodelocal_dns %>
17 | forward . <%= forward_target %> {
18 | force_tcp
19 | }
20 | prometheus :9253
21 | health 169.254.20.10:8080
22 | }
23 | in-addr.arpa:53 {
24 | errors
25 | cache 30
26 | reload
27 | loop
28 | bind <%= nodelocal_dns %>
29 | forward . <%= forward_target %> {
30 | force_tcp
31 | }
32 | prometheus :9253
33 | }
34 | ip6.arpa:53 {
35 | errors
36 | cache 30
37 | reload
38 | loop
39 | bind <%= nodelocal_dns %>
40 | forward . <%= forward_target %> {
41 | force_tcp
42 | }
43 | prometheus :9253
44 | }
45 | .:53 {
46 | errors
47 | cache 30
48 | reload
49 | loop
50 | bind <%= nodelocal_dns %>
51 | forward . /etc/resolv.conf {
52 | force_tcp
53 | }
54 | prometheus :9253
55 | }
56 |
--------------------------------------------------------------------------------
/lib/pharos/phases/store_cluster_configuration.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class StoreClusterConfiguration < Pharos::Phase
6 | using Pharos::CoreExt::DeepTransformKeys
7 |
8 | title "Store cluster configuration"
9 |
10 | def call
11 | logger.info { "Storing cluster configuration to configmap ..." }
12 | ensure_resource(resource)
13 | end
14 |
15 | private
16 |
17 | def resource
18 | data = @config.data.to_h.deep_transform_keys(&:to_s)
19 | K8s::Resource.new(
20 | apiVersion: 'v1',
21 | kind: 'ConfigMap',
22 | metadata: {
23 | namespace: 'kube-system',
24 | name: 'pharos-config'
25 | },
26 | data: {
27 | 'cluster.yml' => data.to_yaml,
28 | 'pharos-version' => Pharos.version,
29 | 'pharos-components.yml' => components.to_yaml,
30 | 'pharos-cluster-name' => @config.name
31 | }
32 | )
33 | end
34 |
35 | def ensure_resource(resource)
36 | kube_client.update_resource(resource)
37 | rescue K8s::Error::NotFound
38 | kube_client.create_resource(resource)
39 | end
40 |
41 | def components
42 | Pharos::Phases.components_for_config(@config).sort_by(&:name).map { |c| c.to_h.deep_stringify_keys }
43 | end
44 | end
45 | end
46 | end
47 |
--------------------------------------------------------------------------------
/lib/pharos/resources/calico/20-configmap.yml.erb:
--------------------------------------------------------------------------------
1 | # This ConfigMap is used to configure a self-hosted Calico installation.
2 | kind: ConfigMap
3 | apiVersion: v1
4 | metadata:
5 | name: calico-config
6 | namespace: kube-system
7 | data:
8 | ipv4_pool_cidr: "<%= ipv4_pool_cidr %>"
9 |
10 | # Configure the Calico backend to use.
11 | calico_backend: "bird"
12 |
13 | # Configure the MTU to use
14 | veth_mtu: "<%= mtu %>"
15 | felix_mtu: "<%= (mtu - 20) %>"
16 |
17 | # The CNI network configuration to install on each node.
18 | cni_network_config: |-
19 | {
20 | "name": "k8s-pod-network",
21 | "cniVersion": "0.3.0",
22 | "plugins": [
23 | {
24 | "type": "calico",
25 | "log_level": "info",
26 | "datastore_type": "kubernetes",
27 | "nodename": "__KUBERNETES_NODE_NAME__",
28 | "mtu": __CNI_MTU__,
29 | "ipam": {
30 | "type": "calico-ipam"
31 | },
32 | "policy": {
33 | "type": "k8s"
34 | },
35 | "kubernetes": {
36 | "kubeconfig": "__KUBECONFIG_FILEPATH__"
37 | }
38 | },
39 | {
40 | "type": "portmap",
41 | "snat": true,
42 | "capabilities": {"portMappings": true}
43 | },
44 | {
45 | "type": "bandwidth",
46 | "capabilities": {"bandwidth": true}
47 | }
48 | ]
49 | }
50 |
--------------------------------------------------------------------------------
/lib/pharos/core-ext/string_casing.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module CoreExt
5 | module StringCasing
6 | def underscore
7 | return self if empty?
8 |
9 | result = gsub(/([A-Z\d]+)([A-Z][a-z])/, '\1_\2')
10 | result.gsub!(/([a-z\d])([A-Z])/, '\1_\2')
11 | result.tr!('-', '_')
12 | result.gsub!(/\s+/, '_')
13 | result.gsub!(/__+/, '_')
14 | result.downcase!
15 | result
16 | end
17 |
18 | def camelcase
19 | return self if empty?
20 |
21 | extend(StringCasing).underscore.split('_').map(&:capitalize).join
22 | end
23 |
24 | def camelback
25 | return self if empty?
26 |
27 | camelcased = extend(StringCasing).camelcase
28 | camelcased[0] = camelcased[0].downcase
29 | camelcased
30 | end
31 |
32 | %i(underscore camelcase camelback).each do |meth|
33 | define_method("#{meth}!") do
34 | return self if empty?
35 |
36 | replace(extend(StringCasing).send(meth))
37 | end
38 | end
39 |
40 | refine String do
41 | include StringCasing
42 | end
43 |
44 | refine Symbol do
45 | %i(underscore camelcase camelback).each do |meth|
46 | define_method(meth) do
47 | to_s.extend(StringCasing).send(meth)
48 | end
49 | end
50 | end
51 | end
52 | end
53 | end
54 |
--------------------------------------------------------------------------------
/spec/fixtures/terraform/tf.json:
--------------------------------------------------------------------------------
1 | {
2 | "pharos": {
3 | "sensitive": false,
4 | "type": "map",
5 | "value": {
6 | "masters": [
7 | {
8 | "address": [
9 | "147.75.100.11"
10 | ],
11 | "private_address": [
12 | "10.80.4.139"
13 | ],
14 | "role": "master",
15 | "user": "root"
16 | }
17 | ],
18 | "workers": [
19 | {
20 | "address": [
21 | "147.75.102.245",
22 | "147.75.100.113",
23 | "147.75.100.9"
24 | ],
25 | "label": [
26 | {
27 | "foo": "bar"
28 | }
29 | ],
30 | "environment": [
31 | {
32 | "BAR": "baz"
33 | }
34 | ],
35 | "private_address": [
36 | "10.80.4.129",
37 | "10.80.4.145",
38 | "10.80.4.149"
39 | ],
40 | "role": "worker",
41 | "user": "ubuntu"
42 | }
43 | ]
44 | }
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/spec/fixtures/terraform/with_addons.json:
--------------------------------------------------------------------------------
1 | {
2 | "pharos": {
3 | "sensitive": false,
4 | "type": "map",
5 | "value": {
6 | "masters": [
7 | {
8 | "address": [
9 | "147.75.100.11"
10 | ],
11 | "private_address": [
12 | "10.80.4.139"
13 | ],
14 | "role": "master",
15 | "user": "root"
16 | }
17 | ],
18 | "workers": [
19 | {
20 | "address": [
21 | "147.75.102.245",
22 | "147.75.100.113",
23 | "147.75.100.9"
24 | ],
25 | "label": [
26 | {
27 | "foo": "bar"
28 | }
29 | ],
30 | "private_address": [
31 | "10.80.4.129",
32 | "10.80.4.145",
33 | "10.80.4.149"
34 | ],
35 | "role": "worker",
36 | "user": "ubuntu"
37 | }
38 | ]
39 | }
40 | },
41 | "pharos_addons": {
42 | "value": {
43 | "addon1": [
44 | { "foo": "bar", "bar": "baz" }
45 | ]
46 | }
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/lib/pharos/resources/psp/99-restricted-psp.yml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1beta1
2 | kind: PodSecurityPolicy
3 | metadata:
4 | annotations:
5 | apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
6 | apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
7 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
8 | seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
9 | name: 99-pharos-restricted
10 | spec:
11 | allowedCapabilities: [] # default set of capabilities are implicitly allowed
12 | allowPrivilegeEscalation: false
13 | fsGroup:
14 | rule: 'MustRunAs'
15 | ranges:
16 | # Forbid adding the root group.
17 | - min: 1
18 | max: 65535
19 | hostIPC: false
20 | hostNetwork: false
21 | hostPID: false
22 | privileged: false
23 | readOnlyRootFilesystem: false
24 | runAsUser:
25 | rule: 'MustRunAsNonRoot'
26 | seLinux:
27 | rule: 'RunAsNonRoot'
28 | supplementalGroups:
29 | rule: 'RunAsNonRoot'
30 | ranges:
31 | # Forbid adding the root group.
32 | - min: 1
33 | max: 65535
34 | volumes:
35 | - 'configMap'
36 | - 'downwardAPI'
37 | - 'emptyDir'
38 | - 'persistentVolumeClaim'
39 | - 'projected'
40 | - 'secret'
41 | hostNetwork: false
42 | runAsUser:
43 | rule: 'RunAsAny'
44 | seLinux:
45 | rule: 'RunAsAny'
46 | supplementalGroups:
47 | rule: 'RunAsAny'
48 | fsGroup:
49 | rule: 'RunAsAny'
--------------------------------------------------------------------------------
/spec/fixtures/terraform/with_api_endpoint.json:
--------------------------------------------------------------------------------
1 | {
2 | "pharos_api": {
3 | "sensitive": false,
4 | "type": "map",
5 | "value": {
6 | "endpoint": "api.example.com"
7 | }
8 | },
9 | "pharos_hosts": {
10 | "sensitive": false,
11 | "type": "map",
12 | "value": {
13 | "masters": [
14 | {
15 | "address": [
16 | "147.75.100.11"
17 | ],
18 | "private_address": [
19 | "10.80.4.139"
20 | ],
21 | "role": "master",
22 | "user": "root"
23 | }
24 | ],
25 | "workers": [
26 | {
27 | "address": [
28 | "147.75.102.245",
29 | "147.75.100.113",
30 | "147.75.100.9"
31 | ],
32 | "label": [
33 | {
34 | "foo": "bar"
35 | }
36 | ],
37 | "private_address": [
38 | "10.80.4.129",
39 | "10.80.4.145",
40 | "10.80.4.149"
41 | ],
42 | "role": "worker",
43 | "user": "ubuntu"
44 | }
45 | ]
46 | }
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/lib/pharos/resources/calico/30-controller-deployment.yml.erb:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: calico-kube-controllers
5 | namespace: kube-system
6 | labels:
7 | k8s-app: calico-kube-controllers
8 | annotations:
9 | scheduler.alpha.kubernetes.io/critical-pod: ''
10 | spec:
11 | selector:
12 | matchLabels:
13 | k8s-app: calico-kube-controllers
14 | # The controller can only have a single active instance.
15 | replicas: 1
16 | strategy:
17 | type: Recreate
18 | template:
19 | metadata:
20 | name: calico-kube-controllers
21 | namespace: kube-system
22 | labels:
23 | k8s-app: calico-kube-controllers
24 | spec:
25 | tolerations:
26 | # Mark the pod as a critical add-on for rescheduling.
27 | - key: CriticalAddonsOnly
28 | operator: Exists
29 | - key: node-role.kubernetes.io/master
30 | effect: NoSchedule
31 | serviceAccountName: calico-kube-controllers
32 | containers:
33 | - name: calico-kube-controllers
34 | image: <%= image_repository %>/kube-controllers:v<%= version %>
35 | env:
36 | # Choose which controllers to run.
37 | - name: ENABLED_CONTROLLERS
38 | value: node
39 | - name: DATASTORE_TYPE
40 | value: kubernetes
41 | readinessProbe:
42 | exec:
43 | command:
44 | - /usr/bin/check-status
45 | - -r
46 |
--------------------------------------------------------------------------------
/lib/pharos/kubeadm/init_config.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Kubeadm
5 | class InitConfig
6 | # @param config [Pharos::Config] cluster config
7 | # @param host [Pharos::Configuration::Host] master host-specific config
8 | def initialize(config, host)
9 | @config = config
10 | @host = host
11 | end
12 |
13 | # @return [Hash]
14 | def generate
15 | config = {
16 | 'apiVersion' => 'kubeadm.k8s.io/v1beta1',
17 | 'kind' => 'InitConfiguration',
18 | 'localAPIEndpoint' => {
19 | 'advertiseAddress' => advertise_address
20 | },
21 | 'nodeRegistration' => {
22 | 'name' => @host.hostname
23 | }
24 | }
25 |
26 | unless master_taint?
27 | config['nodeRegistration']['taints'] = []
28 | end
29 |
30 | config
31 | end
32 |
33 | # Used for internal k8s api access (aka address that works without cni/overlay)
34 | #
35 | # @return [String]
36 | def advertise_address
37 | @config.regions.size == 1 ? @host.peer_address : @host.address
38 | end
39 |
40 | # @return [Boolean]
41 | def master_taint?
42 | return true unless @host.taints
43 |
44 | # matching the taint used by kubeadm
45 | @host.taints.any?{ |taint| taint.key == 'node-role.kubernetes.io/master' && taint.effect == 'NoSchedule' }
46 | end
47 | end
48 | end
49 | end
50 |
--------------------------------------------------------------------------------
/lib/pharos/phases/apply_manifests.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class ApplyManifests < Pharos::Phase
6 | title "Apply configured kubernetes manifests"
7 |
8 | def call
9 | manifest_paths = config.manifests.to_a
10 | logger.info "Applying configured manifests: "
11 | manifest_paths.each do |path|
12 | logger.info " - #{path}"
13 | end
14 | manifests = []
15 | manifest_paths.each { |manifest_path| manifests += load_manifests(manifest_path) }
16 | stack = Pharos::Kube::Stack.new("pharos-manifests", manifests)
17 | stack.apply(kube_client)
18 | end
19 |
20 | # Load resources from path containing erb-templated YAML files
21 | #
22 | # @param path [String]
23 | # @return [Array]
24 | def load_manifests(path)
25 | path = Pathname.new(path).freeze
26 | files = if File.file?(path)
27 | [path]
28 | else
29 | Pathname.glob(path.join('*.{yml,yaml,yml.erb,yaml.erb}')).sort_by(&:to_s)
30 | end
31 | resources = files.flat_map do |file|
32 | Pharos::YamlFile.new(file).load_stream do |doc|
33 | K8s::Resource.new(doc)
34 | end
35 | end.select do |r|
36 | # Take in only resources that are valid kube resources
37 | r.kind && r.apiVersion
38 | end
39 |
40 | resources
41 | end
42 | end
43 | end
44 | end
45 |
--------------------------------------------------------------------------------
/lib/pharos/retry.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Retry
5 | DEFAULT_RETRY_ERRORS = [
6 | OpenSSL::SSL::SSLError,
7 | Excon::Error,
8 | K8s::Error
9 | ].freeze
10 |
11 | # @param seconds [Integer] seconds for how long the block will be retried
12 | # @param yield_object [Object] object to yield into block
13 | # @param wait [Integer] duration to wait between retries
14 | # @param logger [Logger] logger for errors
15 | # @param exceptions [Array] an array of exceptions to rescue from
16 | def self.perform(seconds = 600, yield_object: nil, wait: 2, logger: nil, exceptions: nil)
17 | start_time = Time.now
18 | retry_count = 0
19 | begin
20 | yield yield_object
21 | rescue *(exceptions || DEFAULT_RETRY_ERRORS) => exc
22 | if Time.now - start_time > seconds
23 | logger&.error "Retry time limit exceeded"
24 | raise
25 | end
26 |
27 | retry_count += 1
28 |
29 | if logger
30 | logger.warn "Retried 5 times, increasing verbosity" if retry_count == 5
31 | logger.send(retry_count >= 5 ? :error : :debug, exc)
32 | logger.warn { "Retrying after #{wait} second#{'s' if wait > 1} (##{retry_count}) ..." }
33 | end
34 |
35 | sleep wait
36 | retry
37 | rescue StandardError => exc
38 | logger&.debug "Unretriable exception, reraising"
39 | logger&.debug exc
40 | raise
41 | end
42 | end
43 | end
44 | end
45 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Pharos Cluster
2 |
3 | [](https://travis-ci.org/kontena/pharos-cluster)
4 | [](https://join.slack.com/t/kontenacommunity/shared_invite/enQtOTc5NjAyNjYyOTk4LWU1NDQ0ZGFkOWJkNTRhYTc2YjVmZDdkM2FkNGM5MjhiYTRhMDU2NDQ1MzIyMDA4ZGZlNmExOTc0N2JmY2M3ZGI)
5 |
6 | Pharos Cluster is a [Kontena Pharos](https://pharos.sh) (Kubernetes distribution) management tool. It handles cluster bootstrapping, upgrades and other maintenance tasks via SSH connection and Kubernetes API access.
7 |
8 | ## Installation
9 |
10 | ### Download binaries
11 |
12 | The binary packages are available on the [releases](https://github.com/kontena/pharos-cluster/releases) page.
13 |
14 | ### Build and install Ruby gem
15 |
16 | You need Ruby version 2.5
17 |
18 | ```
19 | $ gem build pharos-cluster.gemspec
20 | $ gem install pharos-cluster*.gem
21 | $ pharos --help
22 | ```
23 |
24 | ## Usage
25 |
26 | See [documentation](https://docs.k8spharos.dev/).
27 |
28 | ## Further Information
29 |
30 | - [Slack](https://kontenacommunity.slack.com) (get invite [here](https://join.slack.com/t/kontenacommunity/shared_invite/enQtOTc5NjAyNjYyOTk4LWU1NDQ0ZGFkOWJkNTRhYTc2YjVmZDdkM2FkNGM5MjhiYTRhMDU2NDQ1MzIyMDA4ZGZlNmExOTc0N2JmY2M3ZGI))
31 | - [Website](https://pharos.sh/)
32 |
33 | ## Contributing
34 |
35 | Bug reports and pull requests are welcome on GitHub at https://github.com/kontena/pharos-cluster.
36 |
--------------------------------------------------------------------------------
/lib/pharos/kubeconfig_command.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | class KubeconfigCommand < Pharos::Command
5 | options :load_config, :tf_json
6 |
7 | option ['-n', '--name'], 'NAME', 'overwrite cluster name', attribute_name: :new_name
8 | option ['-C', '--context'], 'CONTEXT', 'overwrite context name', attribute_name: :new_context
9 | option ['-m', '--merge'], '[FILE]', 'merge with existing configuration file', multivalued: true
10 |
11 | REMOTE_FILE = "/etc/kubernetes/admin.conf"
12 |
13 | def execute
14 | Dir.chdir(config_yaml.dirname) do
15 | transport.connect
16 |
17 | config = Pharos::Kube::Config.new(config_file_content)
18 | config.rename_cluster(new_name) if new_name
19 | config.rename_context(new_context) if new_context
20 | config.update_server_address(master_host.api_address)
21 | merge_list.each do |merge|
22 | merge_config = Pharos::Kube::Config.new(File.read(merge))
23 | config << merge_config
24 | end
25 | puts config
26 | end
27 | end
28 |
29 | private
30 |
31 | def config_file_content
32 | file = transport.file(REMOTE_FILE)
33 | signal_usage_error "Remote file #{REMOTE_FILE} not found" unless file.exist?
34 | file.read
35 | end
36 |
37 | def master_host
38 | @master_host ||= load_config.master_host
39 | end
40 |
41 | # @return [Pharos::Config]
42 | def transport
43 | @transport ||= master_host.transport
44 | end
45 | end
46 | end
47 |
--------------------------------------------------------------------------------
/lib/pharos/phases/upgrade_check.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class UpgradeCheck < Pharos::Phase
6 | title "Check for Pharos upgrade"
7 |
8 | VERSION_URL = ENV["PHAROS_UPGRADE_CHECK_URL"] || 'https://get.pharos.sh/versions/latest'
9 |
10 | def call
11 | logger.info 'Checking for a new version ...'
12 | check_version
13 | end
14 |
15 | def check_version
16 | if latest_version > current_version
17 | logger.warn "There's a new version available: #{latest_version}."
18 | else
19 | logger.info 'Already at the latest version'
20 | end
21 | rescue StandardError => ex
22 | logger.debug { "Upgrade check encountered an error: #{ex} : #{ex.message}" }
23 | end
24 |
25 | private
26 |
27 | def current_version
28 | @current_version ||= Gem::Version.new(Pharos::VERSION)
29 | end
30 |
31 | def latest_version
32 | return @latest_version if @latest_version
33 |
34 | version = Excon.get(
35 | VERSION_URL,
36 | headers: { 'User-Agent' => "pharos/#{Pharos.version}" },
37 | query: channel_query
38 | ).body
39 |
40 | raise "Invalid version response format: #{version}" unless version.match?(/^\d+\.\d+\.\d+(\-\S+)?$/)
41 |
42 | @latest_version = Gem::Version.new(version.gsub(/\+.*/, ''))
43 | end
44 |
45 | def channel_query
46 | ENV['PHAROS_CHANNEL'] == 'pre' ? { pre: true } : {}
47 | end
48 | end
49 | end
50 | end
51 |
--------------------------------------------------------------------------------
/spec/pharos/host/ubuntu/ubuntu_xenial_spec.rb:
--------------------------------------------------------------------------------
1 | require 'pharos/config'
2 | require 'pharos/host/ubuntu/ubuntu_xenial'
3 |
4 | describe Pharos::Host::UbuntuXenial do
5 | let(:host) do
6 | host = Pharos::Configuration::Host.new(peer_address: '192.168.100')
7 | host.cpu_arch = Pharos::Configuration::CpuArch.new(id: 'amd64')
8 | host
9 | end
10 | let(:ssh) { instance_double(Pharos::Transport::SSH) }
11 | let(:cluster_config) { double(:cluster_config, image_repository: 'quay.io/kontena') }
12 | let(:subject) { described_class.new(host) }
13 | before do
14 | allow(host).to receive(:config).and_return(cluster_config)
15 | allow(host).to receive(:transport).and_return(ssh)
16 | end
17 |
18 | describe '#docker_version' do
19 | it 'returns correct version' do
20 | expect(subject.docker_version).to eq(Pharos::Host::UbuntuXenial::DOCKER_VERSION)
21 | end
22 | end
23 |
24 | describe '#configure_container_runtime' do
25 | context 'docker' do
26 | it 'configures docker' do
27 | allow(subject).to receive(:docker?).and_return(true)
28 | allow(subject).to receive(:insecure_registries)
29 | expect(subject).to receive(:exec_script).with('configure-docker.sh', anything)
30 | subject.configure_container_runtime
31 | end
32 | end
33 |
34 | context 'unknown' do
35 | it 'raises error' do
36 | allow(host).to receive(:container_runtime).and_return('moby')
37 | expect {
38 | subject.configure_container_runtime
39 | }.to raise_error(Pharos::Error)
40 | end
41 | end
42 | end
43 | end
44 |
--------------------------------------------------------------------------------
/examples/vagrant/centos7/Vagrantfile:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | # -*- mode: ruby -*-
4 | # vi: set ft=ruby :
5 |
6 | # All Vagrant configuration is done below. The "2" in Vagrant.configure
7 | # configures the configuration version (we support older styles for
8 | # backwards compatibility). Please don't change it unless you know what
9 | # you're doing.
10 | Vagrant.configure("2") do |config|
11 | # The most common configuration options are documented and commented below.
12 | # For a complete reference, please see the online documentation at
13 | # https://docs.vagrantup.com.
14 |
15 | # Every Vagrant development environment requires a box. You can search for
16 | # boxes at https://atlas.hashicorp.com/search.
17 | config.vm.box = "centos/7"
18 |
19 | # Disable automatic box update checking. If you disable this, then
20 | # boxes will only be checked for updates when the user runs
21 | # `vagrant box outdated`. This is not recommended.
22 | config.vm.box_check_update = false
23 | config.vm.synced_folder '.', '/vagrant', disabled: true
24 |
25 | 3.times.each do |i|
26 | vm_name = "host-%02d" % { index: i }
27 | config.vm.define(vm_name) do |host|
28 | host.vm.hostname = vm_name
29 | host.ssh.insert_key = false
30 | host.vm.provider :virtualbox do |vb|
31 | vb.gui = false
32 | vb.memory = "2048"
33 | vb.cpus = 1
34 | end
35 |
36 | host.vm.network "private_network", ip: "192.168.110.#{i + 100}"
37 | host.vm.provision("shell", path: 'proxy-only.sh') if ENV['VAGRANT_HTTP_PROXY']
38 | end
39 | end
40 | end
41 |
--------------------------------------------------------------------------------
/lib/pharos/phases/configure_client.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Phases
5 | class ConfigureClient < Pharos::Phase
6 | title "Configure kube client"
7 |
8 | REMOTE_FILE = "/etc/kubernetes/admin.conf"
9 |
10 | def call
11 | return unless kubeconfig?
12 |
13 | if host.local?
14 | cluster_context['kube_client'] = Pharos::Kube.client('localhost', k8s_config, 6443)
15 | else
16 | transport.close(cluster_context['kube_client'].transport.server[/:(\d+)/, 1].to_i) if cluster_context['kube_client']
17 | cluster_context['kube_client'] = Pharos::Kube.client('localhost', k8s_config, transport.forward('localhost', 6443))
18 | end
19 |
20 | client_prefetch
21 | end
22 |
23 | def kubeconfig
24 | @kubeconfig ||= transport.file(REMOTE_FILE)
25 | end
26 |
27 | # @return [String]
28 | def kubeconfig?
29 | kubeconfig.exist?
30 | end
31 |
32 | # @return [K8s::Config]
33 | def k8s_config
34 | logger.info { "Fetching kubectl config ..." }
35 | config = YAML.safe_load(kubeconfig.read)
36 |
37 | logger.debug { "New config: #{config}" }
38 | K8s::Config.new(config)
39 | end
40 |
41 | # prefetch client resources to warm up caches
42 | def client_prefetch
43 | logger.info "Populating client cache"
44 | kube_client.apis(prefetch_resources: true)
45 | rescue Excon::Error::Certificate
46 | logger.warn "Certificate validation failed"
47 | end
48 | end
49 | end
50 | end
51 |
--------------------------------------------------------------------------------
/lib/pharos/transport/command/ssh.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Transport
5 | module Command
6 | class SSH < Pharos::Transport::Command::Local
7 | attr_reader :cmd, :result
8 |
9 | def hostname
10 | @client.host.to_s
11 | end
12 |
13 | # @return [Pharos::Transport::CommandResult]
14 | def run
15 | @client.connect unless @client.connected?
16 |
17 | result.append(@source.nil? ? @cmd : "#{@cmd} < #{@source}", :cmd)
18 | response = @client.session.open_channel do |channel|
19 | channel.env('LC_ALL', 'C.UTF-8')
20 | channel.exec @cmd do |_, success|
21 | raise Pharos::ExecError, "Failed to exec #{cmd}" unless success
22 |
23 | channel.on_data do |_, data|
24 | result.append(data, :stdout)
25 | end
26 |
27 | channel.on_extended_data do |_c, _type, data|
28 | result.append(data, :stderr)
29 | end
30 |
31 | channel.on_request("exit-status") do |_, data|
32 | result.exit_status = data.read_long
33 | end
34 |
35 | if @stdin
36 | result.append(@stdin, :stdin)
37 | channel.send_data(@stdin)
38 | channel.eof!
39 | end
40 | end
41 | end
42 |
43 | response.wait
44 |
45 | result
46 | rescue IOError
47 | @client.disconnect
48 | retry
49 | end
50 | end
51 | end
52 | end
53 | end
54 |
--------------------------------------------------------------------------------
/spec/pharos/terraform/legacy_json_parser_spec.rb:
--------------------------------------------------------------------------------
1 | require 'pharos/terraform/legacy_json_parser'
2 |
3 | describe Pharos::Terraform::LegacyJsonParser do
4 |
5 | let(:subject) { described_class.new(fixture('terraform/tf.json')) }
6 |
7 | describe '#hosts' do
8 | it 'parses valid terraform json file' do
9 | hosts = subject.hosts
10 | expect(hosts.size).to eq(4)
11 | expect(hosts.select{ |h| h[:role] == 'master' }.size).to eq(1)
12 | expect(hosts.select{ |h| h[:role] == 'worker' }.size).to eq(3)
13 | master = hosts.select{ |h| h[:role] == 'master' }.first
14 | worker = hosts.select{ |h| h[:role] == 'worker' }.first
15 | expect(master[:user]).to eq('root')
16 | expect(worker[:user]).to eq('ubuntu')
17 | expect(worker[:environment]).to eq({ 'BAR' => 'baz' })
18 | end
19 |
20 | it 'raises error on invalid json' do
21 | subject = described_class.new('{"asdsds": "asdsdasd"')
22 | expect {
23 | subject.hosts
24 | }.to raise_error(Pharos::Terraform::ParserError)
25 | end
26 | end
27 |
28 | describe '#addons' do
29 | let(:subject) { described_class.new(fixture('terraform/with_addons.json')) }
30 |
31 | it 'parses valid terraform json file' do
32 | addons = subject.addons
33 | expect(addons.keys.size).to eq(1)
34 | expect(addons['addon1']).to eq({ "foo" => "bar", "bar" => "baz" })
35 | end
36 |
37 | it 'returns empty hash if no addons are defined' do
38 | subject = described_class.new(fixture('terraform/tf.json'))
39 | expect(subject.addons).to eq({})
40 | end
41 | end
42 | end
43 |
--------------------------------------------------------------------------------
/lib/pharos/terraform/base_command.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'open3'
4 |
5 | module Pharos
6 | module Terraform
7 | class BaseCommand < Pharos::Command
8 | options :yes?
9 |
10 | option "--workspace", "NAME", "terraform workspace", default: "default"
11 | option "--var", "VAR", "set a variable in the terraform configuration.", multivalued: true
12 | option "--var-file", "FILE", 'set variables in the terraform configuration from a file (default: terraform.tfvars or any .auto.tfvars)'
13 | option "--state", "PATH", "Path to the state file. Defaults to 'terraform.tfstate'."
14 |
15 | def tf_workspace
16 | return 0 if run_cmd("terraform workspace select #{workspace} 2> /dev/null")
17 |
18 | run_cmd("terraform workspace new #{workspace}")
19 | end
20 |
21 | def workspace_file
22 | ".#{workspace}.json"
23 | end
24 |
25 | def run_cmd!(cmd)
26 | success = run_cmd(cmd)
27 | signal_error "#{cmd} failed" unless success
28 | end
29 |
30 | # @param cmd [String]
31 | # @return [Boolean]
32 | def run_cmd(cmd)
33 | system(cmd)
34 | end
35 |
36 | # Returns common options for both apply and destroy commands.
37 | # @return [Array]
38 | def common_tf_options
39 | opts = []
40 | opts << "-auto-approve" if yes?
41 | opts << "-state #{state}" if state
42 | opts << "-var-file #{var_file}" if var_file
43 | opts += var_list.map { |var| "-var #{var}" } if var_list
44 |
45 | opts
46 | end
47 | end
48 | end
49 | end
50 |
--------------------------------------------------------------------------------
/lib/pharos/host/ubuntu/ubuntu_focal.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative 'ubuntu'
4 |
5 | module Pharos
6 | module Host
7 | class UbuntuFocal < Ubuntu
8 | register_config 'ubuntu', '20.04'
9 |
10 | register_component(
11 | name: 'docker', version: DOCKER_VERSION, license: 'Apache License 2.0',
12 | enabled: proc { |c| c.hosts.any? { |h| h.container_runtime == 'docker' } }
13 | )
14 |
15 | register_component(
16 | name: 'containerd', version: CONTAINERD_VERSION, license: 'Apache License 2.0',
17 | enabled: proc { |c| c.hosts.any? { |h| h.container_runtime == 'containerd' } }
18 | )
19 |
20 | register_component(
21 | name: 'cfssl', version: CFSSL_VERSION, license: 'MIT',
22 | enabled: proc { |c| !c.etcd&.endpoints }
23 | )
24 |
25 | # @return [Array]
26 | def kubelet_args
27 | kubelet_args = super
28 |
29 | kubelet_args
30 | end
31 |
32 | def default_repositories
33 | [
34 | Pharos::Configuration::Repository.new(
35 | name: "pharos-kubernetes.list",
36 | key_url: "https://packages.cloud.google.com/apt/doc/apt-key.gpg",
37 | contents: "deb https://apt.kubernetes.io/ kubernetes-xenial main\n"
38 | ),
39 | Pharos::Configuration::Repository.new(
40 | name: "docker-ce.list",
41 | key_url: "https://download.docker.com/linux/ubuntu/gpg",
42 | contents: "deb https://download.docker.com/linux/ubuntu bionic stable\n"
43 | )
44 | ]
45 | end
46 | end
47 | end
48 | end
49 |
--------------------------------------------------------------------------------
/lib/pharos/host/ubuntu/ubuntu_bionic.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require_relative 'ubuntu'
4 |
5 | module Pharos
6 | module Host
7 | class UbuntuBionic < Ubuntu
8 | register_config 'ubuntu', '18.04'
9 |
10 | register_component(
11 | name: 'docker', version: DOCKER_VERSION, license: 'Apache License 2.0',
12 | enabled: proc { |c| c.hosts.any? { |h| h.container_runtime == 'docker' } }
13 | )
14 |
15 | register_component(
16 | name: 'containerd', version: CONTAINERD_VERSION, license: 'Apache License 2.0',
17 | enabled: proc { |c| c.hosts.any? { |h| h.container_runtime == 'containerd' } }
18 | )
19 |
20 | register_component(
21 | name: 'cfssl', version: CFSSL_VERSION, license: 'MIT',
22 | enabled: proc { |c| !c.etcd&.endpoints }
23 | )
24 |
25 | # @return [Array]
26 | def kubelet_args
27 | kubelet_args = super
28 |
29 | kubelet_args
30 | end
31 |
32 | def default_repositories
33 | [
34 | Pharos::Configuration::Repository.new(
35 | name: "pharos-kubernetes.list",
36 | key_url: "https://packages.cloud.google.com/apt/doc/apt-key.gpg",
37 | contents: "deb https://apt.kubernetes.io/ kubernetes-xenial main\n"
38 | ),
39 | Pharos::Configuration::Repository.new(
40 | name: "docker-ce.list",
41 | key_url: "https://download.docker.com/linux/ubuntu/gpg",
42 | contents: "deb https://download.docker.com/linux/ubuntu bionic stable\n"
43 | )
44 | ]
45 | end
46 | end
47 | end
48 | end
49 |
--------------------------------------------------------------------------------
/spec/pharos/kubeadm/kubeproxy_config_spec.rb:
--------------------------------------------------------------------------------
1 | require "pharos/phases/configure_master"
2 |
3 | describe Pharos::Kubeadm::KubeProxyConfig do
4 | let(:master) { Pharos::Configuration::Host.new(address: 'test', private_address: 'private', role: 'master') }
5 | let(:config_hosts_count) { 1 }
6 |
7 | let(:config) { Pharos::Config.new(
8 | hosts: (1..config_hosts_count).map { |i| Pharos::Configuration::Host.new(role: 'worker') },
9 | network: {
10 | service_cidr: '1.2.3.4/16',
11 | pod_network_cidr: '10.0.0.0/16'
12 | },
13 | addons: {},
14 | etcd: {}
15 | ) }
16 |
17 | subject { described_class.new(config, master) }
18 |
19 | describe '#generate' do
20 | context 'with kube-proxy ipvs configuration' do
21 | let(:config) { Pharos::Config.new(
22 | hosts: (1..config_hosts_count).map { |i| Pharos::Configuration::Host.new() },
23 | network: {},
24 | kube_proxy: {
25 | mode: 'ipvs',
26 | }
27 | ) }
28 |
29 | it 'configures kube-proxy' do
30 | config = subject.generate
31 | expect(config['mode']).to eq('ipvs')
32 | end
33 | end
34 |
35 | context 'with kube-proxy iptables configuration' do
36 | let(:config) { Pharos::Config.new(
37 | hosts: (1..config_hosts_count).map { |i| Pharos::Configuration::Host.new() },
38 | network: {},
39 | kube_proxy: {
40 | mode: 'iptables',
41 | }
42 | ) }
43 |
44 | it 'configures kube-proxy' do
45 | config = subject.generate
46 | expect(config['mode']).to eq('iptables')
47 | end
48 | end
49 | end
50 | end
51 |
--------------------------------------------------------------------------------
/lib/pharos/command_options/filtered_hosts.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module CommandOptions
5 | module FilteredHosts
6 | def self.included(base)
7 | base.prepend(InstanceMethods)
8 | base.options :load_config
9 | base.option ['-r', '--role'], 'ROLE', 'select a host by role'
10 | base.option ['-l', '--label'], 'LABEL=VALUE', 'select a host by label, can be specified multiple times', multivalued: true do |pair|
11 | key, value = pair.split('=', 2)
12 | signal_usage_error "invalid syntax for label : #{pair}, see --help for usage" if value.nil?
13 | { key: key, value: value }
14 | end
15 | base.option ['-a', '--address'], 'ADDRESS', 'select a host by public address'
16 |
17 | base.option ['-f', '--first'], :flag, 'only perform on the first matching host'
18 | end
19 |
20 | module InstanceMethods
21 | private
22 |
23 | def filtered_hosts
24 | @filtered_hosts ||= Array(
25 | load_config.hosts.send(first? ? :find : :select) do |host|
26 | next if role && host.role != role
27 | next if address && host.address != address
28 |
29 | unless label_list.empty?
30 | next if host.labels.nil?
31 | next unless label_list.all? { |l| host.labels[l[:key]] == l[:value] }
32 | end
33 |
34 | true
35 | end
36 | ).tap do |result|
37 | signal_usage_error 'no host matched in configuration' if result.empty?
38 | end
39 | end
40 | end
41 | end
42 | end
43 | end
44 |
--------------------------------------------------------------------------------
/lib/pharos/resources/pharos/20-deployment.yml.erb:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | labels:
6 | k8s-app: pharos-cloud-controller
7 | name: pharos-cloud-controller
8 | namespace: kube-system
9 | spec:
10 | strategy:
11 | type: Recreate
12 | replicas: 1
13 | selector:
14 | matchLabels:
15 | k8s-app: pharos-cloud-controller
16 | template:
17 | metadata:
18 | labels:
19 | k8s-app: pharos-cloud-controller
20 | spec:
21 | serviceAccountName: cloud-controller-manager
22 | hostNetwork: true
23 | containers:
24 | - name: pharos-cloud-controller
25 | image: <%= image_repository %>/pharos-cloud-controller:0.1.0
26 | args:
27 | - --cloud-provider=pharos
28 | - --node-status-update-frequency=60s
29 | - --allow-untagged-cloud
30 | - --leader-elect=true
31 | - --use-service-account-credentials
32 | resources:
33 | limits:
34 | cpu: 100m
35 | memory: 50Mi
36 | requests:
37 | cpu: 50m
38 | memory: 30Mi
39 | tolerations:
40 | # this is required so CCM can bootstrap itself
41 | - key: node.cloudprovider.kubernetes.io/uninitialized
42 | value: "true"
43 | effect: NoSchedule
44 | # this is to have the daemonset runnable on master nodes
45 | # the taint may vary depending on your cluster setup
46 | - key: node-role.kubernetes.io/master
47 | effect: NoSchedule
48 | # this is to restrict CCM to only run on master nodes
49 | nodeSelector:
50 | node-role.kubernetes.io/master: ""
--------------------------------------------------------------------------------
/lib/pharos/configuration/route.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | require 'ipaddr'
4 |
5 | module Pharos
6 | module Configuration
7 | class Route < Pharos::Configuration::Struct
8 | ROUTE_REGEXP = %r(^((?\S+)\s+)?(?default|[0-9./]+)(\s+via (?\S+))?(\s+dev (?\S+))?(\s+proto (?\S+))?(\s+(?.+))?$).freeze
9 |
10 | # @param line [String]
11 | # @return [Pharos::Configuration::Route]
12 | # @raise [RuntimeError] invalid route
13 | def self.parse(line)
14 | fail "Unmatched ip route: #{line.inspect}" unless match = ROUTE_REGEXP.match(line.strip)
15 |
16 | captures = Hash[match.named_captures.map{ |k, v| [k.to_sym, v] }.reject{ |_k, v| v.nil? }]
17 |
18 | new(raw: line.strip, **captures)
19 | end
20 |
21 | attribute :raw, Pharos::Types::Strict::String
22 | attribute :type, Pharos::Types::Strict::String.optional
23 | attribute :prefix, Pharos::Types::Strict::String
24 | attribute :via, Pharos::Types::Strict::String.optional
25 | attribute :dev, Pharos::Types::Strict::String.optional
26 | attribute :proto, Pharos::Types::Strict::String.optional
27 | attribute :options, Pharos::Types::Strict::String.optional
28 |
29 | def to_s
30 | @raw
31 | end
32 |
33 | # @return [Boolean]
34 | def overlaps?(cidr)
35 | # special-case the default route and ignore it
36 | return nil if prefix == 'default'
37 |
38 | route_prefix = IPAddr.new(prefix)
39 | cidr = IPAddr.new(cidr)
40 |
41 | route_prefix.include?(cidr) || cidr.include?(route_prefix)
42 | end
43 | end
44 | end
45 | end
46 |
--------------------------------------------------------------------------------
/lib/pharos/host/debian/debian.rb:
--------------------------------------------------------------------------------
1 | # frozen_string_literal: true
2 |
3 | module Pharos
4 | module Host
5 | class Debian < Configurer
6 | def install_essentials
7 | exec_script(
8 | 'configure-essentials.sh'
9 | )
10 | end
11 |
12 | def configure_netfilter
13 | exec_script('configure-netfilter.sh')
14 | end
15 |
16 | def configure_cfssl
17 | exec_script(
18 | 'configure-cfssl.sh',
19 | IMAGE: "docker.io/jakolehm/cfssl:0.1.1"
20 | )
21 | end
22 |
23 | def ensure_kubelet(args)
24 | exec_script(
25 | 'ensure-kubelet.sh',
26 | args
27 | )
28 | end
29 |
30 | def install_kube_packages(args)
31 | exec_script(
32 | 'install-kube-packages.sh',
33 | args
34 | )
35 | end
36 |
37 | def upgrade_kubeadm(version)
38 | exec_script(
39 | "upgrade-kubeadm.sh",
40 | VERSION: version,
41 | ARCH: host.cpu_arch.name
42 | )
43 | end
44 |
45 | def configure_firewalld
46 | exec_script("configure-firewalld.sh")
47 | end
48 |
49 | def reset
50 | exec_script("reset.sh")
51 | end
52 |
53 | def configure_repos
54 | host_repositories.each do |repo|
55 | repo_path = "/etc/apt/sources.list.d/#{repo.name}"
56 | transport.exec!("curl -fsSL #{repo.key_url} | sudo apt-key add -") if repo.key_url
57 | transport.file(repo_path).write(repo.contents)
58 | end
59 | transport.exec!("DEBIAN_FRONTEND=noninteractive sudo -E apt-get update -y")
60 | end
61 | end
62 | end
63 | end
64 |
--------------------------------------------------------------------------------