├── docs
├── guide
│ ├── gke
│ │ ├── account-setup.md
│ │ └── cluster.md
│ ├── backup
│ │ └── backup.md
│ ├── bare-metal
│ │ ├── cluster.md
│ │ └── cluster-manager.md
│ ├── gcp
│ │ ├── cluster-manager.md
│ │ ├── cluster.md
│ │ └── account-setup.md
│ ├── triton
│ │ ├── cluster-manager.md
│ │ └── cluster.md
│ ├── building-cli.md
│ ├── aws
│ │ ├── cluster-manager.md
│ │ └── cluster.md
│ ├── azure
│ │ ├── cluster-manager.md
│ │ ├── cluster-manager-ha.md
│ │ └── cluster.md
│ ├── installing-cli.md
│ ├── release-process.md
│ └── vSphere
│ │ └── cluster.md
└── imgs
│ ├── Triton-Kubernetes.jpg
│ ├── Triton-Kubernetes.pdf
│ └── Triton-Kubernetes.png
├── terraform
├── modules
│ ├── triton-rancher-k8s-host
│ │ ├── outputs.tf
│ │ ├── versions.tf
│ │ ├── files
│ │ │ └── install_rancher_agent.sh.tpl
│ │ ├── main.tf
│ │ └── variables.tf
│ ├── aws-rancher
│ │ ├── files
│ │ │ ├── rancher_server.sh
│ │ │ ├── setup_rancher.sh.tpl
│ │ │ ├── install_docker_rancher.sh.tpl
│ │ │ └── install_rancher_master.sh.tpl
│ │ ├── versions.tf
│ │ ├── outputs.tf
│ │ └── variables.tf
│ ├── azure-rke
│ │ ├── files
│ │ │ ├── rancher_server.sh
│ │ │ ├── install_docker_rancher.sh.tpl
│ │ │ ├── wait_for_docker_install.sh
│ │ │ └── setup_rancher.sh.tpl
│ │ ├── versions.tf
│ │ ├── outputs.tf
│ │ └── variables.tf
│ ├── gcp-rancher
│ │ ├── files
│ │ │ ├── rancher_server.sh
│ │ │ ├── setup_rancher.sh.tpl
│ │ │ ├── install_docker_rancher.sh.tpl
│ │ │ └── install_rancher_master.sh.tpl
│ │ ├── versions.tf
│ │ ├── outputs.tf
│ │ └── variables.tf
│ ├── azure-rancher
│ │ ├── files
│ │ │ ├── rancher_server.sh
│ │ │ ├── setup_rancher.sh.tpl
│ │ │ ├── install_docker_rancher.sh.tpl
│ │ │ └── install_rancher_master.sh.tpl
│ │ ├── versions.tf
│ │ ├── outputs.tf
│ │ └── variables.tf
│ ├── triton-rancher
│ │ ├── files
│ │ │ ├── rancher_server.sh
│ │ │ ├── setup_rancher.sh.tpl
│ │ │ ├── install_docker_rancher.sh.tpl
│ │ │ └── install_rancher_master.sh.tpl
│ │ ├── versions.tf
│ │ ├── outputs.tf
│ │ └── variables.tf
│ ├── bare-metal-rancher
│ │ ├── files
│ │ │ ├── rancher_server.sh
│ │ │ ├── setup_rancher.sh.tpl
│ │ │ ├── install_docker_rancher.sh.tpl
│ │ │ └── install_rancher_master.sh.tpl
│ │ ├── versions.tf
│ │ ├── outputs.tf
│ │ └── variables.tf
│ ├── aks-rancher-k8s
│ │ ├── versions.tf
│ │ ├── outputs.tf
│ │ ├── variables.tf
│ │ └── main.tf
│ ├── aws-rancher-k8s
│ │ ├── versions.tf
│ │ ├── outputs.tf
│ │ └── variables.tf
│ ├── gcp-rancher-k8s
│ │ ├── versions.tf
│ │ ├── outputs.tf
│ │ ├── variables.tf
│ │ └── main.tf
│ ├── gke-rancher-k8s
│ │ ├── versions.tf
│ │ ├── outputs.tf
│ │ ├── variables.tf
│ │ └── main.tf
│ ├── k8s-backup-manta
│ │ ├── versions.tf
│ │ ├── variables.tf
│ │ ├── main.tf
│ │ └── files
│ │ │ └── minio-manta-deployment.yaml
│ ├── k8s-backup-s3
│ │ ├── versions.tf
│ │ ├── variables.tf
│ │ └── main.tf
│ ├── aws-rancher-k8s-host
│ │ ├── versions.tf
│ │ ├── files
│ │ │ └── install_rancher_agent.sh.tpl
│ │ ├── main.tf
│ │ └── variables.tf
│ ├── azure-rancher-k8s
│ │ ├── versions.tf
│ │ ├── outputs.tf
│ │ ├── variables.tf
│ │ └── main.tf
│ ├── gcp-rancher-k8s-host
│ │ ├── versions.tf
│ │ ├── files
│ │ │ └── install_rancher_agent.sh.tpl
│ │ ├── main.tf
│ │ └── variables.tf
│ ├── vsphere-rancher-k8s
│ │ ├── versions.tf
│ │ ├── outputs.tf
│ │ ├── main.tf
│ │ └── variables.tf
│ ├── azure-rancher-k8s-host
│ │ ├── versions.tf
│ │ ├── files
│ │ │ └── install_rancher_agent.sh.tpl
│ │ └── variables.tf
│ ├── bare-metal-rancher-k8s
│ │ ├── versions.tf
│ │ ├── outputs.tf
│ │ ├── main.tf
│ │ └── variables.tf
│ ├── vsphere-rancher-k8s-host
│ │ ├── versions.tf
│ │ ├── files
│ │ │ └── install_rancher_agent.sh.tpl
│ │ ├── variables.tf
│ │ └── main.tf
│ ├── bare-metal-rancher-k8s-host
│ │ ├── versions.tf
│ │ ├── files
│ │ │ └── install_rancher_agent.sh.tpl
│ │ ├── main.tf
│ │ └── variables.tf
│ ├── triton-rancher-k8s
│ │ ├── versions.tf
│ │ ├── outputs.tf
│ │ ├── main.tf
│ │ └── variables.tf
│ └── files
│ │ ├── install_rancher_master.sh.tpl
│ │ ├── rancher_server.sh
│ │ ├── install_docker_rancher.sh.tpl
│ │ └── setup_rancher.sh.tpl
└── README.md
├── scripts
└── docker
│ └── README.md
├── shell
├── shell_options.go
└── run_shell_cmd.go
├── static
└── logos.tar.gz
├── main.go
├── examples
├── apps
│ ├── ingress
│ │ ├── ingress.yaml
│ │ └── README.md
│ └── guestbook
│ │ └── README.md
└── silent-install
│ ├── gcp
│ ├── manager-on-gcp.yaml
│ └── cluster-gcp-ha.yaml
│ └── triton
│ ├── manager-on-triton.yaml
│ └── cluster-triton-ha.yaml
├── packer
├── variables
│ ├── triton.yaml
│ ├── triton-us-east-1.yaml
│ └── triton-us-west-1.yaml
├── rancher-host.yaml
├── README.md
├── rancher-server.yaml
├── builders
│ ├── triton-kvm-ubuntu.yaml
│ ├── triton-kvm-rancheragent.yaml
│ └── triton-kvm-rancherserver.yaml
└── rancher-agent.yaml
├── test_pkg
├── t.go
└── alter.go
├── cmd
├── version.go
├── version_test.go
├── get.go
├── create.go
├── destroy.go
└── root.go
├── .gitignore
├── backend
├── backend.go
└── mocks
│ └── Backend.go
├── util
├── confirm_prompt.go
├── ssh_utils.go
└── backend_prompt_test.go
├── create
├── cluster_bare_metal.go
└── node_test.go
├── .travis.yml
├── get
├── manager_test.go
├── manager.go
└── cluster.go
├── destroy
├── manager_test.go
├── manager.go
└── cluster_test.go
├── go.mod
├── README.md
└── Makefile
/docs/guide/gke/account-setup.md:
--------------------------------------------------------------------------------
1 | ../gcp/account-setup.md
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher-k8s-host/outputs.tf:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/scripts/docker/README.md:
--------------------------------------------------------------------------------
1 | Files from github.com/rancher/install-docker
2 |
--------------------------------------------------------------------------------
/terraform/modules/aws-rancher/files/rancher_server.sh:
--------------------------------------------------------------------------------
1 | ../../files/rancher_server.sh
--------------------------------------------------------------------------------
/terraform/modules/azure-rke/files/rancher_server.sh:
--------------------------------------------------------------------------------
1 | ../../files/rancher_server.sh
--------------------------------------------------------------------------------
/terraform/modules/gcp-rancher/files/rancher_server.sh:
--------------------------------------------------------------------------------
1 | ../../files/rancher_server.sh
--------------------------------------------------------------------------------
/terraform/modules/aws-rancher/files/setup_rancher.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/setup_rancher.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/azure-rancher/files/rancher_server.sh:
--------------------------------------------------------------------------------
1 | ../../files/rancher_server.sh
--------------------------------------------------------------------------------
/terraform/modules/gcp-rancher/files/setup_rancher.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/setup_rancher.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher/files/rancher_server.sh:
--------------------------------------------------------------------------------
1 | ../../files/rancher_server.sh
--------------------------------------------------------------------------------
/terraform/modules/azure-rancher/files/setup_rancher.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/setup_rancher.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/bare-metal-rancher/files/rancher_server.sh:
--------------------------------------------------------------------------------
1 | ../../files/rancher_server.sh
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher/files/setup_rancher.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/setup_rancher.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/bare-metal-rancher/files/setup_rancher.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/setup_rancher.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/aws-rancher/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/azure-rke/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/gcp-rancher/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/shell/shell_options.go:
--------------------------------------------------------------------------------
1 | package shell
2 |
3 | type ShellOptions struct {
4 | WorkingDir string
5 | }
6 |
--------------------------------------------------------------------------------
/static/logos.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TritonDataCenter/triton-kubernetes/HEAD/static/logos.tar.gz
--------------------------------------------------------------------------------
/terraform/modules/aks-rancher-k8s/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/aws-rancher-k8s/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/aws-rancher/files/install_docker_rancher.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/install_docker_rancher.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/aws-rancher/files/install_rancher_master.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/install_rancher_master.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/azure-rancher/files/install_docker_rancher.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/install_docker_rancher.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/azure-rancher/files/install_rancher_master.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/install_rancher_master.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/azure-rancher/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/azure-rke/files/install_docker_rancher.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/install_docker_rancher.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/gcp-rancher-k8s/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/gcp-rancher/files/install_docker_rancher.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/install_docker_rancher.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/gcp-rancher/files/install_rancher_master.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/install_rancher_master.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/gke-rancher-k8s/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/k8s-backup-manta/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/k8s-backup-s3/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/aws-rancher-k8s-host/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/azure-rancher-k8s/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/bare-metal-rancher/files/install_docker_rancher.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/install_docker_rancher.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/bare-metal-rancher/files/install_rancher_master.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/install_rancher_master.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/bare-metal-rancher/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/gcp-rancher-k8s-host/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher/files/install_docker_rancher.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/install_docker_rancher.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher/files/install_rancher_master.sh.tpl:
--------------------------------------------------------------------------------
1 | ../../files/install_rancher_master.sh.tpl
--------------------------------------------------------------------------------
/terraform/modules/vsphere-rancher-k8s/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/azure-rancher-k8s-host/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/bare-metal-rancher-k8s/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/vsphere-rancher-k8s-host/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/terraform/modules/bare-metal-rancher-k8s-host/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.12"
4 | }
5 |
--------------------------------------------------------------------------------
/docs/imgs/Triton-Kubernetes.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TritonDataCenter/triton-kubernetes/HEAD/docs/imgs/Triton-Kubernetes.jpg
--------------------------------------------------------------------------------
/docs/imgs/Triton-Kubernetes.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TritonDataCenter/triton-kubernetes/HEAD/docs/imgs/Triton-Kubernetes.pdf
--------------------------------------------------------------------------------
/docs/imgs/Triton-Kubernetes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/TritonDataCenter/triton-kubernetes/HEAD/docs/imgs/Triton-Kubernetes.png
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import "github.com/joyent/triton-kubernetes/cmd"
4 |
5 | func main() {
6 | cmd.Execute()
7 | }
8 |
--------------------------------------------------------------------------------
/examples/apps/ingress/ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: basic-ingress
5 | spec:
6 | backend:
7 | serviceName: web
8 | servicePort: 8080
9 |
--------------------------------------------------------------------------------
/packer/variables/triton.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | triton_url: https://us-east-1.api.joyent.com
3 | triton_account: fayazg
4 | triton_key_material: /Users/fayaz.ghiasy/.ssh/id_rsa
5 | triton_key_id: 2c:53:bc:63:97:9e:79:3f:91:35:5e:f4:c8:23:88:37
6 |
--------------------------------------------------------------------------------
/packer/variables/triton-us-east-1.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | triton_url: https://us-east-1.api.joyent.com
3 | triton_account: fayazg
4 | triton_key_material: /Users/fayaz.ghiasy/.ssh/id_rsa
5 | triton_key_id: 2c:53:bc:63:97:9e:79:3f:91:35:5e:f4:c8:23:88:37
6 |
--------------------------------------------------------------------------------
/packer/variables/triton-us-west-1.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | triton_url: https://us-west-1.api.joyent.com
3 | triton_account: fayazg
4 | triton_key_material: /Users/fayaz.ghiasy/.ssh/id_rsa
5 | triton_key_id: 2c:53:bc:63:97:9e:79:3f:91:35:5e:f4:c8:23:88:37
6 |
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher-k8s/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 0.13"
3 |
4 | required_providers {
5 | triton = {
6 | source = "joyent/triton"
7 | version = "0.8.1"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher-k8s-host/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 0.13"
3 |
4 | required_providers {
5 | triton = {
6 | source = "joyent/triton"
7 | version = "0.8.1"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher/versions.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.13"
4 |
5 | required_providers {
6 | triton = {
7 | source = "joyent/triton"
8 | version = "0.8.1"
9 | }
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/terraform/modules/azure-rke/files/wait_for_docker_install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Wait for docker to be installed
4 | printf 'Waiting for docker to be installed'
5 | while [ -z "$(command -v docker)" ]; do
6 | printf '.'
7 | sleep 5
8 | done
9 |
10 | # Let things settle
11 | # TODO Figure out why this is needed
12 | sleep 30
--------------------------------------------------------------------------------
/terraform/modules/aws-rancher/outputs.tf:
--------------------------------------------------------------------------------
1 | output "rancher_url" {
2 | value = "https://${local.rancher_master_ip}"
3 | }
4 |
5 | output "rancher_access_key" {
6 | value = data.external.rancher_server.result["name"]
7 | }
8 |
9 | output "rancher_secret_key" {
10 | value = data.external.rancher_server.result["token"]
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/terraform/modules/azure-rancher/outputs.tf:
--------------------------------------------------------------------------------
1 | output "rancher_url" {
2 | value = "https://${local.rancher_master_ip}"
3 | }
4 |
5 | output "rancher_access_key" {
6 | value = data.external.rancher_server.result["name"]
7 | }
8 |
9 | output "rancher_secret_key" {
10 | value = data.external.rancher_server.result["token"]
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/terraform/modules/gcp-rancher/outputs.tf:
--------------------------------------------------------------------------------
1 | output "rancher_url" {
2 | value = "https://${local.rancher_master_ip}"
3 | }
4 |
5 | output "rancher_access_key" {
6 | value = data.external.rancher_server.result["name"]
7 | }
8 |
9 | output "rancher_secret_key" {
10 | value = data.external.rancher_server.result["token"]
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/terraform/modules/bare-metal-rancher/outputs.tf:
--------------------------------------------------------------------------------
1 | output "rancher_url" {
2 | value = "https://${local.rancher_master_ip}"
3 | }
4 |
5 | output "rancher_access_key" {
6 | value = data.external.rancher_server.result["name"]
7 | }
8 |
9 | output "rancher_secret_key" {
10 | value = data.external.rancher_server.result["token"]
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher/outputs.tf:
--------------------------------------------------------------------------------
1 | output "rancher_url" {
2 | value = "https://${local.rancher_master_ip}"
3 | }
4 |
5 | output "rancher_access_key" {
6 | value = lookup(data.external.rancher_server.result, "name")
7 | }
8 |
9 | output "rancher_secret_key" {
10 | value = lookup(data.external.rancher_server.result, "token")
11 | }
12 |
--------------------------------------------------------------------------------
/packer/rancher-host.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | variables:
3 | - !include variables/triton.yaml
4 |
5 | builders:
6 | - !include builders/triton-kvm-ubuntu.yaml
7 |
8 | provisioners:
9 | - type: shell
10 | inline:
11 | - sudo curl "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh" | sh
12 | - sudo apt-get install jq -y
13 |
--------------------------------------------------------------------------------
/packer/README.md:
--------------------------------------------------------------------------------
1 | k8s-triton-supervisor packer
2 | =========================
3 |
4 | Build
5 | 1. Create a symbolic link for the variable file you want to use. `ln -s triton-us-west-1.yaml triton.yaml`
6 | 1. For each of the yaml files:
7 | 1. Convert the yaml into packer json `./packer-config rancher-host.yaml > rancher-host.json`
8 | 1. Build `packer build rancher-host.json`
9 |
--------------------------------------------------------------------------------
/terraform/modules/aks-rancher-k8s/outputs.tf:
--------------------------------------------------------------------------------
1 | output "rancher_cluster_id" {
2 | value = data.external.rancher_cluster.result["cluster_id"]
3 | }
4 |
5 | output "rancher_cluster_registration_token" {
6 | value = data.external.rancher_cluster.result["registration_token"]
7 | }
8 |
9 | output "rancher_cluster_ca_checksum" {
10 | value = data.external.rancher_cluster.result["ca_checksum"]
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/terraform/modules/gke-rancher-k8s/outputs.tf:
--------------------------------------------------------------------------------
1 | output "rancher_cluster_id" {
2 | value = data.external.rancher_cluster.result["cluster_id"]
3 | }
4 |
5 | output "rancher_cluster_registration_token" {
6 | value = data.external.rancher_cluster.result["registration_token"]
7 | }
8 |
9 | output "rancher_cluster_ca_checksum" {
10 | value = data.external.rancher_cluster.result["ca_checksum"]
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/terraform/modules/bare-metal-rancher-k8s/outputs.tf:
--------------------------------------------------------------------------------
1 | output "rancher_cluster_id" {
2 | value = data.external.rancher_cluster.result["cluster_id"]
3 | }
4 |
5 | output "rancher_cluster_registration_token" {
6 | value = data.external.rancher_cluster.result["registration_token"]
7 | }
8 |
9 | output "rancher_cluster_ca_checksum" {
10 | value = data.external.rancher_cluster.result["ca_checksum"]
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/packer/rancher-server.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | variables:
3 | - !include variables/triton.yaml
4 |
5 | builders:
6 | - !include builders/triton-kvm-rancherserver.yaml
7 |
8 | provisioners:
9 | - type: shell
10 | inline:
11 | - sudo curl "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh" | sh
12 | - sudo apt-get install jq -y
13 | - sudo docker pull rancher/server:v1.6.14
14 |
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher-k8s/outputs.tf:
--------------------------------------------------------------------------------
1 | output "rancher_cluster_id" {
2 | value = lookup(data.external.rancher_cluster.result, "cluster_id")
3 | }
4 |
5 | output "rancher_cluster_registration_token" {
6 | value = lookup(data.external.rancher_cluster.result, "registration_token")
7 | }
8 |
9 | output "rancher_cluster_ca_checksum" {
10 | value = lookup(data.external.rancher_cluster.result, "ca_checksum")
11 | }
12 |
--------------------------------------------------------------------------------
/terraform/README.md:
--------------------------------------------------------------------------------
1 | triton-kubernetes terraform
2 | ============================
3 |
4 | Overview
5 | - A set of terraform modules that provisions Rancher Masters and Hosts.
6 |
7 | Getting Started
8 | - Checkout this project
9 | - Modify create-rancher.tf or create your own
10 | - `terraform init`
11 | - `terraform apply -target module.create_rancher` # Create Rancher Cluster
12 | - `terraform apply -target module.triton_example` # Create Rancher Environment
13 |
--------------------------------------------------------------------------------
/shell/run_shell_cmd.go:
--------------------------------------------------------------------------------
1 | package shell
2 |
3 | import (
4 | "os"
5 | "os/exec"
6 | )
7 |
8 | func runShellCommand(options *ShellOptions, command string, args ...string) error {
9 | cmd := exec.Command(command, args...)
10 | cmd.Stdin = os.Stdin
11 | cmd.Stdout = os.Stdout
12 | cmd.Stderr = os.Stderr
13 |
14 | if options != nil {
15 | cmd.Dir = options.WorkingDir
16 | }
17 |
18 | err := cmd.Start()
19 | if err != nil {
20 | return err
21 | }
22 |
23 | err = cmd.Wait()
24 | if err != nil {
25 | return err
26 | }
27 |
28 | return nil
29 | }
30 |
--------------------------------------------------------------------------------
/terraform/modules/azure-rke/outputs.tf:
--------------------------------------------------------------------------------
1 | output "rancher_url" {
2 | value = "https://${var.fqdn}"
3 | }
4 |
5 | output "rancher_access_key" {
6 | value = data.external.rancher_server.result["name"]
7 | }
8 |
9 | output "rancher_secret_key" {
10 | value = data.external.rancher_server.result["token"]
11 | }
12 |
13 | output "rke_cluster_yaml" {
14 | sensitive = true
15 | value = rke_cluster.cluster[0].rke_cluster_yaml
16 | }
17 |
18 | output "kube_config_yaml" {
19 | sensitive = true
20 | value = rke_cluster.cluster[0].kube_config_yaml
21 | }
22 |
23 |
--------------------------------------------------------------------------------
/terraform/modules/gcp-rancher-k8s/outputs.tf:
--------------------------------------------------------------------------------
1 | output "rancher_cluster_id" {
2 | value = data.external.rancher_cluster.result["cluster_id"]
3 | }
4 |
5 | output "rancher_cluster_registration_token" {
6 | value = data.external.rancher_cluster.result["registration_token"]
7 | }
8 |
9 | output "rancher_cluster_ca_checksum" {
10 | value = data.external.rancher_cluster.result["ca_checksum"]
11 | }
12 |
13 | output "gcp_compute_network_name" {
14 | value = google_compute_network.default.name
15 | }
16 |
17 | output "gcp_compute_firewall_host_tag" {
18 | value = "${var.name}-nodes"
19 | }
20 |
21 |
--------------------------------------------------------------------------------
/terraform/modules/files/install_rancher_master.sh.tpl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Wait for docker to be installed
4 | printf 'Waiting for docker to be installed'
5 | while [ -z "$(command -v docker)" ]; do
6 | printf '.'
7 | sleep 5
8 | done
9 |
10 | # Wait for rancher_server_image to finish downloading
11 | printf 'Waiting for Rancher Server Image to download\n'
12 | while [ -z "$(sudo docker images -q ${rancher_server_image})" ]; do
13 | printf '.'
14 | sleep 5
15 | done
16 |
17 | # Run Rancher docker container
18 | sudo docker run -d --restart=unless-stopped -p 80:80 -p 443:443 ${rancher_server_image}
19 |
--------------------------------------------------------------------------------
/test_pkg/t.go:
--------------------------------------------------------------------------------
1 | package test_pkg
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 | "testing"
7 | )
8 |
9 | type T struct {
10 | t *testing.T
11 | }
12 |
13 | func NewT(t *testing.T) T {
14 | return T{t: t}
15 | }
16 |
17 | func (t *T) Fatal(context string, expected, actual interface{}) {
18 | divider := strings.Repeat("=", len(context))
19 |
20 | output := fmt.Sprintf(
21 | "\n\n%v\n%v\n\nexpected:\n\n\t%v\n\ngot:\n\n\t%v\n",
22 | context,
23 | divider,
24 | expected,
25 | actual,
26 | )
27 |
28 | t.t.Fatal(output)
29 | }
30 |
31 | func (t *T) Logf(fmt string, args ...interface{}) {
32 | t.t.Logf(fmt, args...)
33 | }
34 |
--------------------------------------------------------------------------------
/terraform/modules/aws-rancher-k8s/outputs.tf:
--------------------------------------------------------------------------------
1 | output "rancher_cluster_id" {
2 | value = data.external.rancher_cluster.result["cluster_id"]
3 | }
4 |
5 | output "rancher_cluster_registration_token" {
6 | value = data.external.rancher_cluster.result["registration_token"]
7 | }
8 |
9 | output "rancher_cluster_ca_checksum" {
10 | value = data.external.rancher_cluster.result["ca_checksum"]
11 | }
12 |
13 | output "aws_subnet_id" {
14 | value = aws_subnet.public.id
15 | }
16 |
17 | output "aws_security_group_id" {
18 | value = aws_security_group.rke_ports.id
19 | }
20 |
21 | output "aws_key_name" {
22 | value = var.aws_key_name
23 | }
24 |
25 |
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher-k8s/main.tf:
--------------------------------------------------------------------------------
1 | data "external" "rancher_cluster" {
2 | program = ["bash", "${path.module}/files/rancher_cluster.sh"]
3 |
4 | query = {
5 | rancher_api_url = var.rancher_api_url
6 | rancher_access_key = var.rancher_access_key
7 | rancher_secret_key = var.rancher_secret_key
8 | name = var.name
9 | k8s_version = var.k8s_version
10 | k8s_network_provider = var.k8s_network_provider
11 | k8s_registry = var.k8s_registry
12 | k8s_registry_username = var.k8s_registry_username
13 | k8s_registry_password = var.k8s_registry_password
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/cmd/version.go:
--------------------------------------------------------------------------------
1 | package cmd
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/spf13/cobra"
7 | )
8 |
9 | func init() {
10 | rootCmd.AddCommand(versionCmd)
11 | }
12 |
13 | var cliVersion string
14 |
15 | var versionCmd = &cobra.Command{
16 | Use: "version",
17 | Short: "Print the version number of triton-kubernetes",
18 | Long: `All software has versions. This is triton-kubernetes's version.`,
19 | Run: func(cmd *cobra.Command, args []string) {
20 | if cliVersion == "" {
21 | fmt.Print("no version set for this build... ")
22 | cliVersion = "local"
23 | }
24 | fmt.Printf("triton-kubernetes 1.0.1-pre1 (%s)\n", cliVersion)
25 | },
26 | }
27 |
--------------------------------------------------------------------------------
/terraform/modules/bare-metal-rancher-k8s/main.tf:
--------------------------------------------------------------------------------
1 | data "external" "rancher_cluster" {
2 | program = ["bash", "${path.module}/files/rancher_cluster.sh"]
3 |
4 | query = {
5 | rancher_api_url = var.rancher_api_url
6 | rancher_access_key = var.rancher_access_key
7 | rancher_secret_key = var.rancher_secret_key
8 | name = var.name
9 | k8s_version = var.k8s_version
10 | k8s_network_provider = var.k8s_network_provider
11 | k8s_registry = var.k8s_registry
12 | k8s_registry_username = var.k8s_registry_username
13 | k8s_registry_password = var.k8s_registry_password
14 | }
15 | }
16 |
17 |
--------------------------------------------------------------------------------
/packer/builders/triton-kvm-ubuntu.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | name: triton-kvm-ubuntu
3 |
4 | type: triton
5 | triton_url: '{{user `triton_url`}}'
6 | triton_account: '{{user `triton_account`}}'
7 | triton_key_material: '{{user `triton_key_material`}}'
8 | triton_key_id: '{{user `triton_key_id`}}'
9 |
10 | source_machine_name: packer-builder-ubuntu-16.04-docker17.03-{{timestamp}}
11 | source_machine_image: 342045ce-6af1-4adf-9ef1-e5bfaf9de28c # ubuntu-certified-16.04/20170619.1
12 | source_machine_package: k4-highcpu-kvm-1.75G
13 |
14 | communicator: ssh
15 | ssh_username: ubuntu
16 | ssh_private_key_file: '{{user `triton_key_material`}}'
17 |
18 | image_name: ubuntu-16.04-docker17.03
19 | image_version: 1.0.0
20 |
--------------------------------------------------------------------------------
/packer/builders/triton-kvm-rancheragent.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | name: triton-kvm-ubuntu
3 |
4 | type: triton
5 | triton_url: '{{user `triton_url`}}'
6 | triton_account: '{{user `triton_account`}}'
7 | triton_key_material: '{{user `triton_key_material`}}'
8 | triton_key_id: '{{user `triton_key_id`}}'
9 |
10 | source_machine_name: packer-builder-ubuntu-16.04-rancheragent-{{timestamp}}
11 | source_machine_image: 342045ce-6af1-4adf-9ef1-e5bfaf9de28c # ubuntu-certified-16.04/20170619.1
12 | source_machine_package: k4-highcpu-kvm-1.75G
13 |
14 | communicator: ssh
15 | ssh_username: ubuntu
16 | ssh_private_key_file: '{{user `triton_key_material`}}'
17 |
18 | image_name: ubuntu-16.04-rancheragent
19 | image_version: 1.0.0
20 |
--------------------------------------------------------------------------------
/packer/builders/triton-kvm-rancherserver.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | name: triton-kvm-ubuntu
3 |
4 | type: triton
5 | triton_url: '{{user `triton_url`}}'
6 | triton_account: '{{user `triton_account`}}'
7 | triton_key_material: '{{user `triton_key_material`}}'
8 | triton_key_id: '{{user `triton_key_id`}}'
9 |
10 | source_machine_name: packer-builder-ubuntu-16.04-rancherserver-{{timestamp}}
11 | source_machine_image: 342045ce-6af1-4adf-9ef1-e5bfaf9de28c # ubuntu-certified-16.04/20170619.1
12 | source_machine_package: k4-highcpu-kvm-1.75G
13 |
14 | communicator: ssh
15 | ssh_username: ubuntu
16 | ssh_private_key_file: '{{user `triton_key_material`}}'
17 |
18 | image_name: ubuntu-16.04-rancherserver
19 | image_version: 1.0.0
20 |
--------------------------------------------------------------------------------
/docs/guide/backup/backup.md:
--------------------------------------------------------------------------------
1 | ## Backup
2 |
3 | `triton-kubernetes` is able to backup deployments into Manta, or any s3 compatible storage using minio.
4 |
5 | Here is a demo of running backup against the [demo baremetal cluster](https://github.com/joyent/triton-kubernetes/blob/master/docs/guide/bare-metal/cluster.md):
6 | [](https://asciinema.org/a/9O7U5UgUtaMZDnARV8KoDahsq)
7 |
8 | `triton-kubernetes` cli can take a configuration file (yaml) with `--config` option to run in silent mode. To read about the yaml arguments, look at the [silent-install documentation](https://github.com/joyent/triton-kubernetes/tree/master/docs/guide/silent-install-yaml.md).
9 |
--------------------------------------------------------------------------------
/terraform/modules/azure-rancher-k8s/outputs.tf:
--------------------------------------------------------------------------------
1 | output "rancher_cluster_id" {
2 | value = data.external.rancher_cluster.result["cluster_id"]
3 | }
4 |
5 | output "rancher_cluster_registration_token" {
6 | value = data.external.rancher_cluster.result["registration_token"]
7 | }
8 |
9 | output "rancher_cluster_ca_checksum" {
10 | value = data.external.rancher_cluster.result["ca_checksum"]
11 | }
12 |
13 | output "azure_resource_group_name" {
14 | value = azurerm_resource_group.resource_group.name
15 | }
16 |
17 | output "azure_network_security_group_id" {
18 | value = azurerm_network_security_group.firewall.id
19 | }
20 |
21 | output "azure_subnet_id" {
22 | value = azurerm_subnet.subnet.id
23 | }
24 |
25 |
--------------------------------------------------------------------------------
/test_pkg/alter.go:
--------------------------------------------------------------------------------
1 | package test_pkg
2 |
3 | import (
4 | "io/ioutil"
5 | "os"
6 | )
7 |
8 | func AlterStdout(f func()) (<-chan []byte, <-chan error) {
9 | errch := make(chan error)
10 | outch := make(chan []byte)
11 |
12 | oldstdout := os.Stdout
13 |
14 | r, w, err := os.Pipe()
15 | if err != nil {
16 | go func() { errch <- err }()
17 | return outch, errch
18 | }
19 |
20 | os.Stdout = w
21 |
22 | f()
23 |
24 | os.Stdout = oldstdout
25 |
26 | go func() {
27 | alteredStdout, err := ioutil.ReadAll(r)
28 | defer r.Close()
29 | if err != nil {
30 | errch <- err
31 | return
32 | }
33 |
34 | outch <- alteredStdout
35 | }()
36 |
37 | w.Close()
38 |
39 | return outch, errch
40 | }
41 |
--------------------------------------------------------------------------------
/examples/silent-install/gcp/manager-on-gcp.yaml:
--------------------------------------------------------------------------------
1 | # This sample config file will create a Cluster Manager which will be running on Google Cloud Platform
2 | backend_provider: local
3 | name: manager-on-gcp
4 | manager_cloud_provider: gcp
5 | private_registry: ""
6 | private_registry_username: ""
7 | private_registry_password: ""
8 | rancher_server_image: ""
9 | rancher_agent_image: ""
10 | gcp_path_to_credentials: ~/gcp.json
11 | gcp_compute_region: us-east1
12 | gcp_instance_zone: us-east1-c
13 | gcp_machine_type: n1-standard-1
14 | gcp_image: ubuntu-1604-xenial-v20180424
15 | gcp_public_key_path: ~/.ssh/id_rsa.pub
16 | gcp_private_key_path: ~/.ssh/id_rsa
17 | gcp_ssh_user: root
18 | rancher_admin_password: admin
19 |
--------------------------------------------------------------------------------
/terraform/modules/k8s-backup-s3/variables.tf:
--------------------------------------------------------------------------------
1 | variable "rancher_api_url" {
2 | }
3 |
4 | variable "rancher_access_key" {
5 | }
6 |
7 | variable "rancher_secret_key" {
8 | }
9 |
10 | variable "rancher_cluster_id" {
11 | }
12 |
13 | variable "aws_access_key" {
14 | default = ""
15 | description = "AWS access key"
16 | }
17 |
18 | variable "aws_secret_key" {
19 | default = ""
20 | description = "AWS secret key"
21 | }
22 |
23 | variable "aws_region" {
24 | default = ""
25 | description = "AWS region where the Heptio ARK backup will be stored."
26 | }
27 |
28 | variable "aws_s3_bucket" {
29 | default = ""
30 | description = "Name of the AWS bucket where the Heptio ARK backup will be stored."
31 | }
32 |
33 |
--------------------------------------------------------------------------------
/terraform/modules/vsphere-rancher-k8s/outputs.tf:
--------------------------------------------------------------------------------
1 | output "rancher_cluster_id" {
2 | value = data.external.rancher_cluster.result["cluster_id"]
3 | }
4 |
5 | output "rancher_cluster_registration_token" {
6 | value = data.external.rancher_cluster.result["registration_token"]
7 | }
8 |
9 | output "rancher_cluster_ca_checksum" {
10 | value = data.external.rancher_cluster.result["ca_checksum"]
11 | }
12 |
13 | output "vsphere_datacenter_name" {
14 | value = var.vsphere_datacenter_name
15 | }
16 |
17 | output "vsphere_datastore_name" {
18 | value = var.vsphere_datastore_name
19 | }
20 |
21 | output "vsphere_resource_pool_name" {
22 | value = var.vsphere_resource_pool_name
23 | }
24 |
25 | output "vsphere_network_name" {
26 | value = var.vsphere_network_name
27 | }
28 |
29 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | bin/terraform*
2 | */terraform.tfstate*
3 | terraform/.terraform*
4 | .terraform*
5 | build/
6 |
7 | triton-kubernetes
8 | main.tf.json
9 |
10 |
11 | # Created by https://www.gitignore.io/api/macos
12 |
13 | ### macOS ###
14 | *.DS_Store
15 | .AppleDouble
16 | .LSOverride
17 |
18 | # Icon must end with two \r
19 | Icon
20 |
21 | # Thumbnails
22 | ._*
23 |
24 | # Files that might appear in the root of a volume
25 | .DocumentRevisions-V100
26 | .fseventsd
27 | .Spotlight-V100
28 | .TemporaryItems
29 | .Trashes
30 | .VolumeIcon.icns
31 | .com.apple.timemachine.donotpresent
32 |
33 | # Directories potentially created on remote AFP share
34 | .AppleDB
35 | .AppleDesktop
36 | Network Trash Folder
37 | Temporary Items
38 | .apdisk
39 |
40 |
41 | # End of https://www.gitignore.io/api/macos
--------------------------------------------------------------------------------
/examples/apps/guestbook/README.md:
--------------------------------------------------------------------------------
1 | # Example: Guestbook application on Kubernetes
2 |
3 | This example is based on the [PHP Guestbook application](https://github.com/kubernetes/examples/tree/master/guestbook) managed by Kubernetes org.
4 |
5 | ## Changes
6 |
7 | To run this example, make sure you have copied and set up your `~/.kube/conf`.
8 |
9 | ```bash
10 | # git the repo
11 | git clone https://github.com/kubernetes/examples.git
12 | # modify yaml to expose the app
13 | sed -i -- 's/# type: LoadBalancer/type: LoadBalancer/g' ~/Downloads/examples/guestbook/all-in-one/guestbook-all-in-one.yaml
14 | # run the app
15 | kubectl create -f guestbook/all-in-one/guestbook-all-in-one.yaml
16 | ```
17 |
18 | >This example uses service type loadbalancer which might not be available on all clusters.
--------------------------------------------------------------------------------
/examples/silent-install/triton/manager-on-triton.yaml:
--------------------------------------------------------------------------------
1 | # This sample config file will create a Cluster Manager on Joyent's Public Cloud (triton)
2 | backend_provider: local
3 | name: manager-on-triton
4 | manager_cloud_provider: triton
5 | private_registry: ""
6 | private_registry_username: ""
7 | private_registry_password: ""
8 | rancher_server_image: ""
9 | rancher_agent_image: ""
10 | triton_account: fayazg
11 | triton_key_path: ~/.ssh/id_rsa
12 | # triton_key_id: 2c:53:bc:63:97:9e:79:3f:91:35:5e:f4:c8:23:88:37
13 | triton_url: https://us-east-1.api.joyent.com
14 | triton_network_names:
15 | - sdc_nat
16 | triton_image_name: ubuntu-certified-18.04
17 | triton_image_version: 20190627.1.1
18 | triton_ssh_user: ubuntu
19 | master_triton_machine_package: sample-bhyve-flexible-1G
20 | rancher_admin_password: admin
21 |
--------------------------------------------------------------------------------
/terraform/modules/k8s-backup-manta/variables.tf:
--------------------------------------------------------------------------------
1 | variable "rancher_api_url" {
2 | }
3 |
4 | variable "rancher_access_key" {
5 | }
6 |
7 | variable "rancher_secret_key" {
8 | }
9 |
10 | variable "rancher_cluster_id" {
11 | }
12 |
13 | variable "triton_key_path" {
14 | default = ""
15 | description = "The path to a private key that is authorized to communicate with the triton_ssh_host."
16 | }
17 |
18 | variable "triton_account" {
19 | description = "The Triton account name, usually the username of your root user."
20 | }
21 |
22 | variable "triton_key_id" {
23 | description = "The md5 fingerprint of the key at triton_key_path. Obtained by running `ssh-keygen -E md5 -lf ~/path/to.key`"
24 | }
25 |
26 | variable "manta_subuser" {
27 | default = ""
28 | description = "The Manta subuser"
29 | }
30 |
31 |
--------------------------------------------------------------------------------
/backend/backend.go:
--------------------------------------------------------------------------------
1 | package backend
2 |
3 | import (
4 | "github.com/joyent/triton-kubernetes/state"
5 | )
6 |
7 | type Backend interface {
8 | // State returns the current state.
9 | //
10 | // If the named state doesn't exist it will be created.
11 | State(name string) (state.State, error)
12 |
13 | // DeleteState removes the named state if it exists.
14 | //
15 | // DeleteState does not prevent deleting a state that is in use.
16 | DeleteState(name string) error
17 |
18 | // PersistState persist the given state.
19 | PersistState(state state.State) error
20 |
21 | // States returns a list of configured named states.
22 | States() ([]string, error)
23 |
24 | // StateTerraformConfig returns the path and object that
25 | // represents a terraform backend configuration
26 | StateTerraformConfig(name string) (string, interface{})
27 | }
28 |
--------------------------------------------------------------------------------
/util/confirm_prompt.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/manifoldco/promptui"
7 | )
8 |
9 | // Returns true if user selects 'Yes'. false if 'No'
10 | func PromptForConfirmation(label, selected string) (bool, error) {
11 | confirmOptions := []struct {
12 | Name string
13 | Value bool
14 | }{
15 | {"Yes", true},
16 | {"No", false},
17 | }
18 | confirmPrompt := promptui.Select{
19 | Label: label,
20 | Items: confirmOptions,
21 | Templates: &promptui.SelectTemplates{
22 | Label: "{{ . }}?",
23 | Active: fmt.Sprintf("%s {{ .Name | underline }}", promptui.IconSelect),
24 | Inactive: " {{.Name}}",
25 | Selected: fmt.Sprintf(" %s? {{.Name}}", selected),
26 | },
27 | }
28 |
29 | i, _, err := confirmPrompt.Run()
30 | if err != nil {
31 | return false, err
32 | }
33 |
34 | return confirmOptions[i].Value, nil
35 | }
36 |
--------------------------------------------------------------------------------
/terraform/modules/aks-rancher-k8s/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {
2 | description = "Human readable name used as prefix to generated names."
3 | }
4 |
5 | variable "rancher_api_url" {
6 | description = ""
7 | }
8 |
9 | variable "rancher_access_key" {
10 | description = ""
11 | }
12 |
13 | variable "rancher_secret_key" {
14 | description = ""
15 | }
16 |
17 | variable "azure_subscription_id" {
18 | }
19 |
20 | variable "azure_client_id" {
21 | }
22 |
23 | variable "azure_client_secret" {
24 | }
25 |
26 | variable "azure_tenant_id" {
27 | }
28 |
29 | variable "azure_environment" {
30 | default = "public"
31 | }
32 |
33 | variable "azure_location" {
34 | }
35 |
36 | variable "azure_size" {
37 | }
38 |
39 | variable "azure_ssh_user" {
40 | default = "root"
41 | }
42 |
43 | variable "azure_public_key_path" {
44 | default = "~/.ssh/id_rsa.pub"
45 | }
46 |
47 | variable "k8s_version" {
48 | default = "1.9.6"
49 | }
50 |
51 | variable "node_count" {
52 | }
53 |
54 |
--------------------------------------------------------------------------------
/terraform/modules/files/rancher_server.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # SSH into a remote box and cat a file that contains the rancher server information.
4 |
5 | # Exit if any of the intermediate steps fail
6 | set -e
7 |
8 | # Extract arguments from the input into shell variables.
9 | # jq will ensure that the values are properly quoted
10 | # and escaped for consumption by the shell.
11 | eval "$(jq -r '@sh "id=\(.id) ssh_host=\(.ssh_host) ssh_user=\(.ssh_user) key_path=\(.key_path) file_path=\(.file_path)"')"
12 |
13 | result=$(ssh -o UserKnownHostsFile=/dev/null \
14 | -o StrictHostKeyChecking=no \
15 | -i $key_path \
16 | $ssh_user@$ssh_host \
17 | "cat $file_path")
18 |
19 | name=""
20 | token=""
21 | if [ "$result" != "" ]; then
22 | name=$(echo $result | jq -r .name)
23 | token=$(echo $result | jq -r .token | cut -d: -f2)
24 | fi
25 |
26 | # Safely produce a JSON object containing the result value.
27 | # jq will ensure that the value is properly quoted
28 | # and escaped to produce a valid JSON string.
29 | jq -n --arg name "$name" \
30 | --arg token "$token" \
31 | '{"name":$name,"token":$token}'
32 |
--------------------------------------------------------------------------------
/create/cluster_bare_metal.go:
--------------------------------------------------------------------------------
1 | package create
2 |
3 | import (
4 | "github.com/joyent/triton-kubernetes/backend"
5 | "github.com/joyent/triton-kubernetes/state"
6 | )
7 |
8 | const (
9 | bareMetalRancherKubernetesTerraformModulePath = "terraform/modules/bare-metal-rancher-k8s"
10 | )
11 |
12 | // This struct represents the definition of a Terraform .tf file.
13 | // Marshalled into json this struct can be passed directly to Terraform.
14 | type bareMetalClusterTerraformConfig struct {
15 | baseClusterTerraformConfig
16 | }
17 |
18 | // Returns the name of the cluster that was created and the new state.
19 | func newBareMetalCluster(remoteBackend backend.Backend, currentState state.State) (string, error) {
20 | baseConfig, err := getBaseClusterTerraformConfig(bareMetalRancherKubernetesTerraformModulePath)
21 | if err != nil {
22 | return "", err
23 | }
24 |
25 | cfg := bareMetalClusterTerraformConfig{
26 | baseClusterTerraformConfig: baseConfig,
27 | }
28 |
29 | // Add new cluster to terraform config
30 | err = currentState.AddCluster("baremetal", cfg.Name, &cfg)
31 | if err != nil {
32 | return "", err
33 | }
34 |
35 | return cfg.Name, nil
36 | }
37 |
--------------------------------------------------------------------------------
/terraform/modules/bare-metal-rancher-k8s-host/files/install_rancher_agent.sh.tpl:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # This script just wraps https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh
3 | # It disables firewalld on CentOS.
4 | # TODO: Replace firewalld with iptables.
5 |
6 | if [ -n "$(command -v firewalld)" ]; then
7 | sudo systemctl stop firewalld.service
8 | sudo systemctl disable firewalld.service
9 | fi
10 |
11 | sudo curl ${docker_engine_install_url} | sh
12 | sudo service docker stop
13 | sudo bash -c 'echo "{
14 | \"storage-driver\": \"overlay2\"
15 | }" > /etc/docker/daemon.json'
16 | sudo service docker restart
17 |
18 | # Run docker login if requested
19 | if [ "${rancher_registry_username}" != "" ]; then
20 | sudo docker login -u ${rancher_registry_username} -p ${rancher_registry_password} ${rancher_registry}
21 | fi
22 |
23 | # Run Rancher agent container
24 | sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run ${rancher_agent_image} --server ${rancher_api_url} --token ${rancher_cluster_registration_token} --ca-checksum ${rancher_cluster_ca_checksum} --${rancher_node_role}
25 |
--------------------------------------------------------------------------------
/cmd/version_test.go:
--------------------------------------------------------------------------------
1 | package cmd
2 |
3 | import (
4 | "regexp"
5 | "testing"
6 |
7 | "github.com/joyent/triton-kubernetes/test_pkg"
8 | )
9 |
10 | func TestVersion(t *testing.T) {
11 | tCase := test_pkg.NewT(t)
12 | cliVersion = "beta"
13 |
14 | outch, errch := test_pkg.AlterStdout(func() {
15 | versionCmd.Run(versionCmd, []string{})
16 | })
17 |
18 | expected := "triton-kubernetes 1.0.1-pre1 (beta)\n"
19 |
20 | select {
21 | case err := <-errch:
22 | tCase.Fatal("altering output", nil, err)
23 | case actual := <-outch:
24 | if expected != string(actual) {
25 | tCase.Fatal("output", expected, string(actual))
26 | }
27 | }
28 | }
29 |
30 | func TestMissingVersion(t *testing.T) {
31 | tCase := test_pkg.NewT(t)
32 |
33 | cliVersion = ""
34 |
35 | outch, errch := test_pkg.AlterStdout(func() {
36 | versionCmd.Run(versionCmd, []string{})
37 | })
38 |
39 | match := "no version set for this build"
40 |
41 | select {
42 | case err := <-errch:
43 | tCase.Fatal("altering output", nil, err)
44 | case output := <-outch:
45 | if match, err := regexp.Match(match, output); !match || err != nil {
46 | tCase.Fatal("output contents", match, string(output))
47 | }
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/util/ssh_utils.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import (
4 | "crypto/md5"
5 | "fmt"
6 | "io/ioutil"
7 |
8 | "github.com/manifoldco/promptui"
9 | "golang.org/x/crypto/ssh"
10 | )
11 |
12 | // GetPublicKeyFingerprintFromPrivateKey takes in location of a private key and returns the md5 fingerprint
13 | func GetPublicKeyFingerprintFromPrivateKey(privateKeyPath string) (string, error) {
14 | var fingerprint string
15 | var err error
16 |
17 | key, err := ioutil.ReadFile(privateKeyPath)
18 | if err != nil {
19 | return "", fmt.Errorf("Unable to read private key: %v", err)
20 | }
21 | signer, err := ssh.ParsePrivateKey(key)
22 | if err != nil {
23 | prompt := promptui.Prompt{
24 | Label: "Private Key Password",
25 | Mask: '*',
26 | }
27 | password, _ := prompt.Run()
28 | signer, err = ssh.ParsePrivateKeyWithPassphrase(key, []byte(password))
29 | if err != nil {
30 | return "", fmt.Errorf("Unable to parse private key: %v", err)
31 | }
32 | }
33 | h := md5.New()
34 | h.Write(signer.PublicKey().Marshal())
35 | for i, b := range h.Sum(nil) {
36 | fingerprint += fmt.Sprintf("%02x", b)
37 | if i < len(h.Sum(nil))-1 {
38 | fingerprint += fmt.Sprintf(":")
39 | }
40 | }
41 | return fingerprint, err
42 | }
43 |
--------------------------------------------------------------------------------
/terraform/modules/vsphere-rancher-k8s-host/files/install_rancher_agent.sh.tpl:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # This script just wraps https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh
3 | # It disables firewalld on CentOS.
4 | # TODO: Replace firewalld with iptables.
5 |
6 | if [ -n "$(command -v firewalld)" ]; then
7 | sudo systemctl stop firewalld.service
8 | sudo systemctl disable firewalld.service
9 | fi
10 |
11 | sudo curl ${docker_engine_install_url} | sh
12 | sudo service docker stop
13 | sudo bash -c 'echo "{
14 | \"storage-driver\": \"overlay2\"
15 | }" > /etc/docker/daemon.json'
16 | sudo service docker restart
17 |
18 | sudo hostnamectl set-hostname ${hostname}
19 |
20 | # Run docker login if requested
21 | if [ "${rancher_registry_username}" != "" ]; then
22 | sudo docker login -u ${rancher_registry_username} -p ${rancher_registry_password} ${rancher_registry}
23 | fi
24 |
25 | # Run Rancher agent container
26 | sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run ${rancher_agent_image} --server ${rancher_api_url} --token ${rancher_cluster_registration_token} --ca-checksum ${rancher_cluster_ca_checksum} --${rancher_node_role}
27 |
--------------------------------------------------------------------------------
/terraform/modules/files/install_docker_rancher.sh.tpl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install Docker
4 | sudo curl "${docker_engine_install_url}" | sh
5 |
6 | # Needed on CentOS, TODO: Replace firewalld with iptables.
7 | sudo service firewalld stop
8 |
9 | sudo service docker stop
10 | DOCKER_SERVICE=$(systemctl status docker.service --no-pager | grep Loaded | sed 's~\(.*\)loaded (\(.*\)docker.service\(.*\)$~\2docker.service~g')
11 | sed 's~ExecStart=/usr/bin/dockerd -H\(.*\)~ExecStart=/usr/bin/dockerd --graph="/mnt/docker" -H\1~g' $DOCKER_SERVICE > /home/ubuntu/docker.conf && sudo mv /home/ubuntu/docker.conf $DOCKER_SERVICE
12 | sudo mkdir /mnt/docker
13 | sudo bash -c "mv /var/lib/docker/* /mnt/docker/"
14 | sudo rm -rf /var/lib/docker
15 | sudo bash -c 'echo "{
16 | \"storage-driver\": \"overlay2\"
17 | }" > /etc/docker/daemon.json'
18 | sudo systemctl daemon-reload
19 | sudo systemctl restart docker
20 |
21 | sudo adduser ubuntu docker
22 |
23 | # Run docker login if requested
24 | if [ "${rancher_registry_username}" != "" ]; then
25 | sudo docker login -u ${rancher_registry_username} -p ${rancher_registry_password} ${rancher_registry}
26 | fi
27 |
28 | # Pull the rancher_server_image in preparation of running it
29 | sudo docker pull ${rancher_server_image}
--------------------------------------------------------------------------------
/terraform/modules/gke-rancher-k8s/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {
2 | description = "Human readable name used as prefix to generated names."
3 | }
4 |
5 | variable "rancher_api_url" {
6 | description = ""
7 | }
8 |
9 | variable "rancher_access_key" {
10 | description = ""
11 | }
12 |
13 | variable "rancher_secret_key" {
14 | description = ""
15 | }
16 |
17 | variable "gcp_path_to_credentials" {
18 | description = "Location of GCP JSON credentials file."
19 | }
20 |
21 | variable "gcp_project_id" {
22 | description = "GCP project ID that will be running the instances and managing the network"
23 | }
24 |
25 | variable "gcp_compute_region" {
26 | description = "GCP region to host your network"
27 | }
28 |
29 | variable "gcp_zone" {
30 | description = "Zone to deploy GKE cluster in"
31 | }
32 |
33 | variable "gcp_additional_zones" {
34 | type = list(string)
35 | description = "Zones to deploy GKE cluster nodes in"
36 | }
37 |
38 | variable "gcp_machine_type" {
39 | default = "n1-standard-1"
40 | description = "GCP machine type to launch the instance with"
41 | }
42 |
43 | variable "k8s_version" {
44 | default = "1.8.8-gke.0"
45 | }
46 |
47 | variable "node_count" {
48 | }
49 |
50 | variable "password" {
51 | }
52 |
53 |
--------------------------------------------------------------------------------
/docs/guide/bare-metal/cluster.md:
--------------------------------------------------------------------------------
1 | ## Cluster
2 |
3 | A cluster is a group of physical (or virtual) computers that share resources to accomplish tasks as if they were a single system.
4 | The `create cluster` command allows you to create dedicated nodes for the etcd, worker and control. Creating clusters require a cluster manager to be running already.
5 |
6 | To check if cluster manager exists, run the following:
7 |
8 | ```
9 | $ triton-kubernetes get manager
10 | ```
11 |
12 | Below is a demo of how to create baremetal cluster:
13 | [](https://asciinema.org/a/HAIwMBxNDk2yylCLXk488zAal)
14 |
15 | To destroy cluster, run the following:
16 |
17 | ```
18 | $ triton-kubernetes destroy cluster
19 | ✔ Backend Provider: Local
20 | ✔ Cluster Manager: dev-manager
21 | ✔ Cluster: dev-cluster
22 | Destroy "dev-cluster"? Yes
23 | ```
24 |
25 | To get cluster, run the following:
26 |
27 | ```
28 | $ triton-kubernetes get cluster
29 | ```
30 |
31 |
32 | `triton-kubernetes` cli can take a configuration file (yaml) with `--config` option to run in silent mode. To read more about the yaml arguments, look at the [silent-install documentation](https://github.com/joyent/triton-kubernetes/tree/master/docs/guide/silent-install-yaml.md).
33 |
--------------------------------------------------------------------------------
/terraform/modules/bare-metal-rancher-k8s/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {
2 | description = "Human readable name used as prefix to generated names."
3 | }
4 |
5 | variable "rancher_api_url" {
6 | description = ""
7 | }
8 |
9 | variable "rancher_access_key" {
10 | description = ""
11 | }
12 |
13 | variable "rancher_secret_key" {
14 | description = ""
15 | }
16 |
17 | variable "k8s_version" {
18 | default = "v1.18.12-rancher1-1"
19 | }
20 |
21 | variable "k8s_network_provider" {
22 | default = "flannel"
23 | }
24 |
25 | variable "rancher_registry" {
26 | default = ""
27 | description = "The docker registry to use for Rancher images"
28 | }
29 |
30 | variable "rancher_registry_username" {
31 | default = ""
32 | description = "The username to login as."
33 | }
34 |
35 | variable "rancher_registry_password" {
36 | default = ""
37 | description = "The password to use."
38 | }
39 |
40 | variable "k8s_registry" {
41 | default = ""
42 | description = "The docker registry to use for Kubernetes images"
43 | }
44 |
45 | variable "k8s_registry_username" {
46 | default = ""
47 | description = "The username to login as."
48 | }
49 |
50 | variable "k8s_registry_password" {
51 | default = ""
52 | description = "The password to use."
53 | }
54 |
55 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: go
2 | go:
3 | - 1.9.x
4 | before_script:
5 | - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
6 | script:
7 | - dep ensure
8 | - go test ./...
9 | before_deploy:
10 | - sudo apt-get install ruby ruby-dev build-essential rpm
11 | - gem install --no-ri --no-rdoc fpm
12 | - make build VERSION=`echo ${TRAVIS_TAG//v/}`
13 | deploy:
14 | provider: releases
15 | api-key:
16 | secure: "IsRXeSnyLrj2wc5SeCKMtlKZ+IcHoCkQnAd9aA5dB2n0tEux1Ixy2B48iGvGnEpViaPK+fyyZooumhVdq7xM+zofHdetWtngi8X49CprTAG4jgnbclEO9f45G0DA3nZFw/SJVhmssGqLUoPYOaN2h3dEKC9aylEoou0H8WAo3lRXPy0Rt+yleTDz+/9zTqiqTEOUxL1LQONYcE9VSVM1Lb85pzLYafIhqmys5UgwA7B9/zebdv9kFduy1iAQhGxJFVBk3tSvWMv3Ped+iMVC8h+VLpEywaLGwEw+1JhpYVzqFzBqEkzzPsLwo0HPOK7JWRT9QPSa4xbd8hu3wc5HdhN87qM6NsXhkbeVvksMqaUYJja/k+V705cgkuVOUskHwaEyi3o1WxpTTLRxfppm6zl6A4byIq6YyceoPz8Nzp12Lc3QWCj0d31iZgxGJwtXg/7Waj1ErVe3mXk6HICSc0jaa0nLLI90Gk0lr2NPFlprNCfatXFu/K2zqpekmJho//QU8/fbIlhje0I94Nm/+TWViuEqMP8fAS7DRHCPIlkJ9pNGFxOJdhCD25iUzVhWEq0lkh8beNDeHXpI6rsBXduyQHup8SmJWLP0b4QOiF46eo3zMF3ToVF+y21SFMXKDfwKw0fVxOQiQr/pPwZO9oKrZRtIOu6T8vflo/enq7o="
17 | file_glob: true
18 | file: build/*
19 | skip_cleanup: true
20 | on:
21 | tags: true
22 | condition: $TRAVIS_TAG =~ ^v[0-9]+.[0-9]+.[0-9]+
23 | # whitelist
24 | branches:
25 | only:
26 | - master
27 |
--------------------------------------------------------------------------------
/terraform/modules/vsphere-rancher-k8s/main.tf:
--------------------------------------------------------------------------------
1 | data "external" "rancher_cluster" {
2 | program = ["bash", "${path.module}/files/rancher_cluster.sh"]
3 |
4 | query = {
5 | rancher_api_url = var.rancher_api_url
6 | rancher_access_key = var.rancher_access_key
7 | rancher_secret_key = var.rancher_secret_key
8 | name = var.name
9 | k8s_version = var.k8s_version
10 | k8s_network_provider = var.k8s_network_provider
11 | k8s_registry = var.k8s_registry
12 | k8s_registry_username = var.k8s_registry_username
13 | k8s_registry_password = var.k8s_registry_password
14 | }
15 | }
16 |
17 | provider "vsphere" {
18 | user = var.vsphere_user
19 | password = var.vsphere_password
20 | vsphere_server = var.vsphere_server
21 |
22 | allow_unverified_ssl = "true"
23 | }
24 |
25 | data "vsphere_datacenter" "dc" {
26 | name = var.vsphere_datacenter_name
27 | }
28 |
29 | data "vsphere_datastore" "datastore" {
30 | name = var.vsphere_datastore_name
31 | datacenter_id = data.vsphere_datacenter.dc.id
32 | }
33 |
34 | data "vsphere_resource_pool" "pool" {
35 | name = var.vsphere_resource_pool_name
36 | datacenter_id = data.vsphere_datacenter.dc.id
37 | }
38 |
39 | data "vsphere_network" "network" {
40 | name = var.vsphere_network_name
41 | datacenter_id = data.vsphere_datacenter.dc.id
42 | }
43 |
44 |
--------------------------------------------------------------------------------
/get/manager_test.go:
--------------------------------------------------------------------------------
1 | package get
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/joyent/triton-kubernetes/backend/mocks"
7 | "github.com/spf13/viper"
8 | )
9 |
10 | func TestNoClusterManager(t *testing.T) {
11 |
12 | localBackend := &mocks.Backend{}
13 | localBackend.On("States").Return([]string{}, nil)
14 |
15 | expected := "No cluster managers."
16 |
17 | err := GetManager(localBackend)
18 | if expected != err.Error() {
19 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
20 | }
21 | }
22 |
23 | func TestMissingClusterManager(t *testing.T) {
24 | viper.Set("non-interactive", true)
25 |
26 | localBackend := &mocks.Backend{}
27 | localBackend.On("States").Return([]string{"dev-manager", "beta-manager"}, nil)
28 |
29 | expected := "cluster_manager must be specified"
30 |
31 | err := GetManager(localBackend)
32 | if expected != err.Error() {
33 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
34 | }
35 | }
36 |
37 | func TestClusterManagerNotExists(t *testing.T) {
38 | viper.Set("cluster_manager", "prod-cluster")
39 |
40 | localBackend := &mocks.Backend{}
41 | localBackend.On("States").Return([]string{"dev-manager", "beta-manager"}, nil)
42 |
43 | expected := "Selected cluster manager 'prod-cluster' does not exist."
44 |
45 | err := GetManager(localBackend)
46 | if expected != err.Error() {
47 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/docs/guide/bare-metal/cluster-manager.md:
--------------------------------------------------------------------------------
1 | ## Cluster Manager
2 |
3 | Cluster Managers can manage multiple clusters across regions/data-centers and/or clouds. They can run anywhere (Triton/AWS/Azure/GCP/Baremetal) and manage Kubernetes environments running on any region of any supported cloud.
4 |
5 | To create a cluster manager, run the following:
6 | ```
7 | $ triton-kubernetes create manager
8 | ✔ Backend Provider: Local
9 | ✔ Cloud Provider: BareMetal
10 | ✔ Cluster Manager Name: test
11 | ✔ Private Registry: None
12 | ✔ Rancher Server Image: Default
13 | ✔ Rancher Agent Image: Default
14 | ✔ Set UI Admin Password: ****
15 | ✔ Host/IP for cluster manager: 10.25.65.44
16 | ✔ SSH User: ubuntu
17 | ✔ Bastion Host: None
18 | ✔ Key Path: ~/.ssh/id_rsa
19 | Proceed? Yes
20 | ```
21 |
22 | To destroy cluster manager, run the following:
23 |
24 | ```
25 | $ triton-kubernetes destroy manager
26 | ✔ Backend Provider: Local
27 | ✔ Cluster Manager: dev-manager
28 | Destroy "dev-manager"? Yes
29 | ```
30 | > Note: Destorying cluster manager will destroy all your clusters and nodes attached to the cluster manager.
31 |
32 | To get cluster manager, run the following:
33 |
34 | ```
35 | $ triton-kubernetes get manager
36 | ```
37 |
38 | `triton-kubernetes` cli can take a configuration file (yaml) with `--config` option to run in silent mode. To read about the yaml arguments, look at the [silent-install documentation](https://github.com/joyent/triton-kubernetes/tree/master/docs/guide/silent-install-yaml.md).
39 |
--------------------------------------------------------------------------------
/terraform/modules/bare-metal-rancher-k8s-host/main.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | rancher_node_role = element(keys(var.rancher_host_labels), 0)
3 | }
4 |
5 | data "template_file" "install_rancher_agent" {
6 | template = file("${path.module}/files/install_rancher_agent.sh.tpl")
7 |
8 | vars = {
9 | docker_engine_install_url = var.docker_engine_install_url
10 | rancher_api_url = var.rancher_api_url
11 | rancher_cluster_registration_token = var.rancher_cluster_registration_token
12 | rancher_cluster_ca_checksum = var.rancher_cluster_ca_checksum
13 | rancher_node_role = local.rancher_node_role == "control" ? "controlplane" : local.rancher_node_role
14 | rancher_agent_image = var.rancher_agent_image
15 | rancher_registry = var.rancher_registry
16 | rancher_registry_username = var.rancher_registry_username
17 | rancher_registry_password = var.rancher_registry_password
18 | }
19 | }
20 |
21 | resource "null_resource" "install_rancher_agent" {
22 | triggers = {
23 | host = var.host
24 | }
25 |
26 | connection {
27 | type = "ssh"
28 | user = var.ssh_user
29 | bastion_host = var.bastion_host
30 | host = var.host
31 | private_key = file(var.key_path)
32 | }
33 |
34 | provisioner "remote-exec" {
35 | inline = < 1
21 | {[]string{"foo", "bar"}, "test", 3, []string{"test-1", "test-2", "test-3"}},
22 | {[]string{"test"}, "test", 3, []string{"test-1", "test-2", "test-3"}},
23 | {[]string{"test-1", "test-2", "bar-3", "bar-4"}, "test", 3, []string{"test-3", "test-4", "test-5"}},
24 | }
25 |
26 | func TestGetNewHostnames(t *testing.T) {
27 | for _, tc := range getNewHostnamesTestCases {
28 | output := getNewHostnames(tc.ExistingNames, tc.NodeName, tc.NodesToAdd)
29 | if !isEqual(tc.Expected, output) {
30 | msg := fmt.Sprintf("\nInput: (%q, %q, %d)\n", tc.ExistingNames, tc.NodeName, tc.NodesToAdd)
31 | msg += fmt.Sprintf("Output: %q\n", output)
32 | msg += fmt.Sprintf("Expected: %q\n", tc.Expected)
33 | t.Error(msg)
34 | }
35 | }
36 | }
37 |
38 | func isEqual(expected, actual []string) bool {
39 | if len(expected) != len(actual) {
40 | return false
41 | }
42 | for index, expectedItem := range expected {
43 | if actual[index] != expectedItem {
44 | return false
45 | }
46 | }
47 | return true
48 | }
49 |
--------------------------------------------------------------------------------
/examples/silent-install/gcp/cluster-gcp-ha.yaml:
--------------------------------------------------------------------------------
1 | # This example config file will create an HA cluster on Joyent Cloud (Triton) attached to triton-ha Cluster Manager
2 | cluster_manager: manager-on-triton
3 | backend_provider: local
4 | name: gcp-ha
5 | cluster_cloud_provider: gcp
6 | k8s_version: v1.17.6-rancher2-1
7 | k8s_network_provider: calico
8 | private_registry: ""
9 | private_registry_username: ""
10 | private_registry_password: ""
11 | k8s_registry: ""
12 | k8s_registry_username: ""
13 | k8s_registry_password: ""
14 | gcp_path_to_credentials: ~/gcp.json
15 | gcp_compute_region: us-east1
16 | nodes:
17 | - node_count: 3
18 | rancher_host_label: etcd
19 | hostname: gcp-ha-e
20 | gcp_instance_zone: us-east1-c
21 | gcp_machine_type: n1-standard-1
22 | gcp_image: ubuntu-1604-xenial-v20180424
23 | gcp_public_key_path: ~/.ssh/id_rsa.pub
24 | gcp_private_key_path: ~/.ssh/id_rsa
25 | gcp_ssh_user: root
26 | - node_count: 3
27 | rancher_host_label: control
28 | hostname: gcp-ha-c
29 | gcp_instance_zone: us-east1-c
30 | gcp_machine_type: n1-standard-1
31 | gcp_image: ubuntu-1604-xenial-v20180424
32 | gcp_public_key_path: ~/.ssh/id_rsa.pub
33 | gcp_private_key_path: ~/.ssh/id_rsa
34 | gcp_ssh_user: root
35 | - node_count: 4
36 | rancher_host_label: worker
37 | hostname: gcp-ha-w
38 | gcp_instance_zone: us-east1-c
39 | gcp_machine_type: n1-standard-1
40 | gcp_image: ubuntu-1604-xenial-v20180424
41 | gcp_public_key_path: ~/.ssh/id_rsa.pub
42 | gcp_private_key_path: ~/.ssh/id_rsa
43 | gcp_ssh_user: root
44 |
--------------------------------------------------------------------------------
/terraform/modules/gcp-rancher-k8s/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {
2 | description = "Human readable name used as prefix to generated names."
3 | }
4 |
5 | variable "rancher_api_url" {
6 | description = ""
7 | }
8 |
9 | variable "rancher_access_key" {
10 | description = ""
11 | }
12 |
13 | variable "rancher_secret_key" {
14 | description = ""
15 | }
16 |
17 | variable "k8s_version" {
18 | default = "v1.18.12-rancher1-1"
19 | }
20 |
21 | variable "k8s_network_provider" {
22 | default = "flannel"
23 | }
24 |
25 | variable "rancher_registry" {
26 | default = ""
27 | description = "The docker registry to use for Rancher images"
28 | }
29 |
30 | variable "rancher_registry_username" {
31 | default = ""
32 | description = "The username to login as."
33 | }
34 |
35 | variable "rancher_registry_password" {
36 | default = ""
37 | description = "The password to use."
38 | }
39 |
40 | variable "k8s_registry" {
41 | default = ""
42 | description = "The docker registry to use for Kubernetes images"
43 | }
44 |
45 | variable "k8s_registry_username" {
46 | default = ""
47 | description = "The username to login as."
48 | }
49 |
50 | variable "k8s_registry_password" {
51 | default = ""
52 | description = "The password to use."
53 | }
54 |
55 | variable "gcp_path_to_credentials" {
56 | description = "Location of GCP JSON credentials file."
57 | }
58 |
59 | variable "gcp_compute_region" {
60 | description = "GCP region to host your network"
61 | }
62 |
63 | variable "gcp_project_id" {
64 | description = "GCP project ID that will be running the instances and managing the network"
65 | }
66 |
67 |
--------------------------------------------------------------------------------
/destroy/manager_test.go:
--------------------------------------------------------------------------------
1 | package destroy
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/joyent/triton-kubernetes/backend/mocks"
7 | "github.com/spf13/viper"
8 | )
9 |
10 | func TestDeleteManagerNoClusterManager(t *testing.T) {
11 |
12 | localBackend := &mocks.Backend{}
13 | localBackend.On("States").Return([]string{}, nil)
14 |
15 | expected := "No cluster managers, please create a cluster manager before creating a kubernetes cluster."
16 |
17 | err := DeleteManager(localBackend)
18 | if expected != err.Error() {
19 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
20 | }
21 | }
22 |
23 | func TestDeleteManagerMissingClusterManager(t *testing.T) {
24 | viper.Reset()
25 | viper.Set("non-interactive", true)
26 |
27 | localBackend := &mocks.Backend{}
28 | localBackend.On("States").Return([]string{"dev-manager", "beta-manager"}, nil)
29 |
30 | expected := "cluster_manager must be specified"
31 |
32 | err := DeleteManager(localBackend)
33 | if expected != err.Error() {
34 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
35 | }
36 | }
37 |
38 | func TestDeleteManagerNotExist(t *testing.T) {
39 | viper.Reset()
40 | viper.Set("non-interactive", true)
41 | viper.Set("cluster_manager", "prod-cluster")
42 |
43 | localBackend := &mocks.Backend{}
44 | localBackend.On("States").Return([]string{"dev-manager", "beta-manager"}, nil)
45 |
46 | expected := "Selected cluster manager 'prod-cluster' does not exist."
47 |
48 | err := DeleteManager(localBackend)
49 | if expected != err.Error() {
50 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/docs/guide/gcp/cluster-manager.md:
--------------------------------------------------------------------------------
1 | ## Cluster Manager
2 |
3 | Cluster Managers can manage multiple clusters across regions/data-centers and/or clouds. They can run anywhere (Triton/AWS/Azure/GCP/Baremetal) and manage Kubernetes environments running on any region of any supported cloud.
4 |
5 | To create cluster manager, run the following:
6 | ```
7 | $ triton-kubernetes create manager
8 | ✔ Backend Provider: Local
9 | ✔ Cloud Provider: GCP
10 | ✔ Cluster Manager Name: dev-manager
11 | ✔ Private Registry: None
12 | ✔ Rancher Server Image: Default
13 | ✔ Rancher Agent Image: Default
14 | ✔ Set UI Admin Password: *****
15 | ✔ Path to Google Cloud Platform Credentials File: ~/gcp.json
16 | ✔ GCP Compute Region: us-east1
17 | ✔ GCP Instance Zone: us-east1-c
18 | ✔ GCP Machine Type: n1-standard-1
19 | ✔ GCP Image: ubuntu-1604-xenial-v20180424
20 | ✔ GCP Public Key Path: ~/.ssh/id_rsa.pub
21 | ✔ GCP Private Key Path: ~/.ssh/id_rsa
22 | ✔ GCP SSH User: root
23 | Proceed? Yes
24 | ```
25 |
26 | To destroy cluster manager, run the following:
27 |
28 | ```
29 | $ triton-kubernetes destroy manager
30 | ✔ Backend Provider: Local
31 | ✔ Cluster Manager: dev-manager
32 | Destroy "dev-manager"? Yes
33 | ```
34 | > Note: Destorying cluster manager will destroy all your clusters and nodes attached to the cluster manager.
35 |
36 | To get cluster manager, run the following:
37 |
38 | ```
39 | $ triton-kubernetes get manager
40 | ```
41 |
42 | `triton-kubernetes` cli can takes a configuration file (yaml) with `--config` option to run in silent mode. To read about the yaml arguments, look at the [silent-install documentation](https://github.com/joyent/triton-kubernetes/tree/master/docs/guide/silent-install-yaml.md).
--------------------------------------------------------------------------------
/get/manager.go:
--------------------------------------------------------------------------------
1 | package get
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 |
7 | "github.com/joyent/triton-kubernetes/backend"
8 | "github.com/joyent/triton-kubernetes/shell"
9 |
10 | "github.com/manifoldco/promptui"
11 | "github.com/spf13/viper"
12 | )
13 |
14 | func GetManager(remoteBackend backend.Backend) error {
15 | nonInteractiveMode := viper.GetBool("non-interactive")
16 | clusterManagers, err := remoteBackend.States()
17 | if err != nil {
18 | return err
19 | }
20 |
21 | if len(clusterManagers) == 0 {
22 | return fmt.Errorf("No cluster managers.")
23 | }
24 |
25 | selectedClusterManager := ""
26 | if viper.IsSet("cluster_manager") {
27 | selectedClusterManager = viper.GetString("cluster_manager")
28 | } else if nonInteractiveMode {
29 | return errors.New("cluster_manager must be specified")
30 | } else {
31 | prompt := promptui.Select{
32 | Label: "Cluster Manager",
33 | Items: clusterManagers,
34 | }
35 |
36 | _, value, err := prompt.Run()
37 | if err != nil {
38 | return err
39 | }
40 |
41 | selectedClusterManager = value
42 | }
43 |
44 | // Verify selected cluster manager exists
45 | found := false
46 | for _, clusterManager := range clusterManagers {
47 | if selectedClusterManager == clusterManager {
48 | found = true
49 | break
50 | }
51 | }
52 | if !found {
53 | return fmt.Errorf("Selected cluster manager '%s' does not exist.", selectedClusterManager)
54 | }
55 |
56 | state, err := remoteBackend.State(selectedClusterManager)
57 | if err != nil {
58 | return err
59 | }
60 |
61 | err = shell.RunTerraformOutputWithState(state, "cluster-manager")
62 | if err != nil {
63 | return err
64 | }
65 |
66 | return nil
67 | }
68 |
--------------------------------------------------------------------------------
/terraform/modules/bare-metal-rancher-k8s-host/variables.tf:
--------------------------------------------------------------------------------
1 | variable "hostname" {
2 | description = ""
3 | }
4 |
5 | variable "rancher_api_url" {
6 | description = ""
7 | }
8 |
9 | variable "rancher_cluster_registration_token" {
10 | }
11 |
12 | variable "rancher_cluster_ca_checksum" {
13 | }
14 |
15 | variable "rancher_host_labels" {
16 | type = map(string)
17 | description = "A map of key/value pairs that get passed to the rancher agent on the host."
18 | }
19 |
20 | variable "rancher_agent_image" {
21 | default = "rancher/rancher-agent:v2.4.11"
22 | description = "The Rancher Agent image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
23 | }
24 |
25 | variable "rancher_registry" {
26 | default = ""
27 | description = "The docker registry to use for rancher images"
28 | }
29 |
30 | variable "rancher_registry_username" {
31 | default = ""
32 | description = "The username to login as."
33 | }
34 |
35 | variable "rancher_registry_password" {
36 | default = ""
37 | description = "The password to use."
38 | }
39 |
40 | variable "docker_engine_install_url" {
41 | default = "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh"
42 | description = "The URL to the shell script to install the docker engine."
43 | }
44 |
45 | variable "ssh_user" {
46 | default = "ubuntu"
47 | description = ""
48 | }
49 |
50 | variable "host" {
51 | description = ""
52 | }
53 |
54 | variable "bastion_host" {
55 | default = ""
56 | description = ""
57 | }
58 |
59 | variable "key_path" {
60 | default = "~/.ssh/id_rsa"
61 | description = ""
62 | }
63 |
64 |
--------------------------------------------------------------------------------
/docs/guide/triton/cluster-manager.md:
--------------------------------------------------------------------------------
1 | ## Cluster Manager
2 |
3 | Cluster Managers can manage multiple clusters across regions/data-centers and/or clouds. They can run anywhere (Triton/AWS/Azure/GCP/Baremetal) and manage Kubernetes environments running on any region of any supported cloud.
4 |
5 | To create a cluster manager, run the following:
6 | ```
7 | $ triton-kubernetes create manager
8 | ✔ Backend Provider: Local
9 | ✔ Cloud Provider: Triton
10 | ✔ Cluster Manager Name: dev-manager
11 | ✔ Private Registry: None
12 | ✔ Rancher Server Image: Default
13 | ✔ Rancher Agent Image: Default
14 | ✔ Set UI Admin Password: ****
15 | ✔ Triton Account Name: [changeme]
16 | ✔ Triton Key Path: ~/.ssh/id_rsa
17 | ✔ Triton URL: https://us-east-1.api.joyent.com
18 | ✔ Triton Networks: Joyent-SDC-Public
19 | Attach another? No
20 | ✔ Triton Image: ubuntu-certified-16.04@20180222
21 | ✔ Triton SSH User: ubuntu
22 | ✔ Rancher Master Triton Machine Package: k4-highcpu-kvm-1.75G
23 | Proceed? Yes
24 | ```
25 |
26 | To destroy cluster manager, run the following:
27 |
28 | ```
29 | $ triton-kubernetes destroy manager
30 | ✔ Backend Provider: Local
31 | ✔ Cluster Manager: dev-manager
32 | Destroy "dev-manager"? Yes
33 | ```
34 | > Note: Destorying cluster manager will destroy all your clusters and nodes attached to the cluster manager.
35 |
36 | To get cluster manager, run the following:
37 |
38 | ```
39 | $ triton-kubernetes get manager
40 | ```
41 |
42 | `triton-kubernetes` cli can take a configuration file (yaml) with `--config` option to run in silent mode. To read about the yaml arguments, look at the [silent-install documentation](https://github.com/joyent/triton-kubernetes/tree/master/docs/guide/silent-install-yaml.md).
43 |
--------------------------------------------------------------------------------
/docs/guide/building-cli.md:
--------------------------------------------------------------------------------
1 | # Build and install `triton-kubernetes`
2 |
3 | ## Install Go
4 |
5 | - [Download and Install](https://github.com/golang/go#download-and-install)
6 |
7 | ## Setting `GOPATH`
8 |
9 | `GOPATH` can be any directory on your system. In Unix examples, we will set it to `$HOME/go`. Another common setup is to set `GOPATH=$HOME`.
10 |
11 | ### Bash
12 |
13 | Edit your `~/.bash_profile` to add the following line:
14 |
15 | ```bash
16 | export GOPATH=$HOME/go
17 | ```
18 |
19 | Save and exit your editor. Then, source your `~/.bash_profile`.
20 |
21 | ```bash
22 | source ~/.bash_profile
23 | ```
24 |
25 | Note: Set the GOBIN path to generate a binary file when `go install` is run.
26 |
27 | ```bash
28 | export GOBIN=$HOME/go/bin
29 | ```
30 |
31 | ### Zsh
32 |
33 | Edit your `~/.zshrc` file to add the following line:
34 |
35 | ```bash
36 | export GOPATH=$HOME/go
37 | ```
38 |
39 | Save and exit your editor. Then, source your `~/.zshrc`.
40 |
41 | ```bash
42 | source ~/.zshrc
43 | ```
44 |
45 | ### Build binary via MakeFie
46 |
47 | To build binaries for `osx`, `linux`, `linux-rpm` and `linux-debian` under `build` folder, run the following:
48 |
49 | ```bash
50 | make build
51 | ```
52 |
53 | ### Install and Run
54 |
55 | - Clone this repository into `$GOPATH/src/github.com/joyent/triton-kubernetes`
56 | - Run `go get` and `go install` from that directory
57 |
58 | This will build the `triton-kubernetes` binary into `$GOBIN`.
59 |
60 | You can now run cli in your terminal like below
61 |
62 | ```bash
63 | triton-kubernetes --help
64 | ```
65 |
66 | Note: To build the project with an embedded git hash:
67 |
68 | ```bash
69 | go build -ldflags "-X main.GitHash=$(git rev-list -1 HEAD)"
70 | ```
71 |
--------------------------------------------------------------------------------
/examples/silent-install/triton/cluster-triton-ha.yaml:
--------------------------------------------------------------------------------
1 | # This example config file will create an HA Kubernetes cluster on Joyent Cloud (Triton) attached to manager-on-triton Cluster Manager
2 | cluster_manager: manager-on-triton
3 | backend_provider: local
4 | name: triton-ha
5 | cluster_cloud_provider: triton
6 | k8s_version: v1.18.12-rancher1-1
7 | k8s_network_provider: flannel
8 | private_registry: ""
9 | private_registry_username: ""
10 | private_registry_password: ""
11 | k8s_registry: ""
12 | k8s_registry_username: ""
13 | k8s_registry_password: ""
14 | triton_account: fayazg
15 | triton_key_path: ~/.ssh/id_rsa
16 | # triton_key_id: 2c:53:bc:63:97:9e:79:3f:91:35:5e:f4:c8:23:88:37
17 | triton_url: https://us-east-1.api.joyent.com
18 | nodes:
19 | - node_count: 3
20 | rancher_host_label: etcd
21 | hostname: triton-ha-e
22 | triton_network_names:
23 | - Joyent-SDC-Public
24 | triton_image_name: ubuntu-certified-18.04
25 | triton_image_version: 20190627.1.1
26 | triton_ssh_user: ubuntu
27 | triton_machine_package: sample-bhyve-flexible-1G
28 | - node_count: 3
29 | rancher_host_label: control
30 | hostname: triton-ha-c
31 | triton_network_names:
32 | - Joyent-SDC-Public
33 | triton_image_name: ubuntu-certified-18.04
34 | triton_image_version: 20190627.1.1
35 | triton_ssh_user: ubuntu
36 | triton_machine_package: sample-bhyve-flexible-1G
37 | - node_count: 4
38 | rancher_host_label: worker
39 | hostname: triton-ha-w
40 | triton_network_names:
41 | - Joyent-SDC-Public
42 | triton_image_name: ubuntu-certified-18.04
43 | triton_image_version: 20190627.1.1
44 | triton_ssh_user: ubuntu
45 | triton_machine_package: sample-bhyve-flexible-1G
46 |
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher-k8s-host/files/install_rancher_agent.sh.tpl:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # This script just wraps https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh
3 | # It disables firewalld on CentOS.
4 | # TODO: Replace firewalld with iptables.
5 |
6 | if [ -n "$(command -v firewalld)" ]; then
7 | sudo systemctl stop firewalld.service
8 | sudo systemctl disable firewalld.service
9 | fi
10 |
11 | sudo curl ${docker_engine_install_url} | sh
12 |
13 | sudo service docker stop
14 | DOCKER_SERVICE=$(systemctl status docker.service --no-pager | grep Loaded | sed 's~\(.*\)loaded (\(.*\)docker.service\(.*\)$~\2docker.service~g')
15 | sed 's~ExecStart=/usr/bin/dockerd -H\(.*\)~ExecStart=/usr/bin/dockerd --graph="/mnt/docker" -H\1~g' $DOCKER_SERVICE > /home/ubuntu/docker.conf && sudo mv /home/ubuntu/docker.conf $DOCKER_SERVICE
16 | sudo mkdir /mnt/docker
17 | sudo bash -c "mv /var/lib/docker/* /mnt/docker/"
18 | sudo rm -rf /var/lib/docker
19 | sudo bash -c 'echo "{
20 | \"storage-driver\": \"overlay2\"
21 | }" > /etc/docker/daemon.json'
22 | sudo systemctl daemon-reload
23 | sudo systemctl restart docker
24 |
25 | sudo hostnamectl set-hostname ${hostname}
26 |
27 | # Run docker login if requested
28 | if [ "${rancher_registry_username}" != "" ]; then
29 | sudo docker login -u ${rancher_registry_username} -p ${rancher_registry_password} ${rancher_registry}
30 | fi
31 |
32 | # Run Rancher agent container
33 | sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run ${rancher_agent_image} --server ${rancher_api_url} --token ${rancher_cluster_registration_token} --ca-checksum ${rancher_cluster_ca_checksum} --${rancher_node_role}
34 |
--------------------------------------------------------------------------------
/terraform/modules/bare-metal-rancher/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {
2 | description = "Human readable name used as prefix to generated names."
3 | }
4 |
5 | variable "rancher_admin_password" {
6 | description = "The Rancher admin password"
7 | }
8 |
9 | variable "docker_engine_install_url" {
10 | default = "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh"
11 | description = "The URL to the shell script to install the docker engine."
12 | }
13 |
14 | variable "rancher_server_image" {
15 | default = "rancher/rancher:v2.4.11"
16 | description = "The Rancher Server image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
17 | }
18 |
19 | variable "rancher_agent_image" {
20 | default = "rancher/rancher-agent:v2.4.11"
21 | description = "The Rancher Agent image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
22 | }
23 |
24 | variable "rancher_registry" {
25 | default = ""
26 | description = "The docker registry to use for rancher server and agent images"
27 | }
28 |
29 | variable "rancher_registry_username" {
30 | default = ""
31 | description = "The username to login as."
32 | }
33 |
34 | variable "rancher_registry_password" {
35 | default = ""
36 | description = "The password to use."
37 | }
38 |
39 | variable "ssh_user" {
40 | default = "ubuntu"
41 | description = ""
42 | }
43 |
44 | variable "host" {
45 | description = ""
46 | }
47 |
48 | variable "bastion_host" {
49 | default = ""
50 | description = ""
51 | }
52 |
53 | variable "key_path" {
54 | default = "~/.ssh/id_rsa"
55 | description = ""
56 | }
57 |
58 |
--------------------------------------------------------------------------------
/terraform/modules/gcp-rancher-k8s/main.tf:
--------------------------------------------------------------------------------
1 | data "external" "rancher_cluster" {
2 | program = ["bash", "${path.module}/files/rancher_cluster.sh"]
3 |
4 | query = {
5 | rancher_api_url = var.rancher_api_url
6 | rancher_access_key = var.rancher_access_key
7 | rancher_secret_key = var.rancher_secret_key
8 | name = var.name
9 | k8s_version = var.k8s_version
10 | k8s_network_provider = var.k8s_network_provider
11 | k8s_registry = var.k8s_registry
12 | k8s_registry_username = var.k8s_registry_username
13 | k8s_registry_password = var.k8s_registry_password
14 | }
15 | }
16 |
17 | provider "google" {
18 | credentials = file(var.gcp_path_to_credentials)
19 | project = var.gcp_project_id
20 | region = var.gcp_compute_region
21 | }
22 |
23 | resource "google_compute_network" "default" {
24 | name = var.name
25 | auto_create_subnetworks = "true"
26 | }
27 |
28 | # Firewall requirements taken from:
29 | # https://rancher.com/docs/rancher/v2.0/en/quick-start-guide/
30 | resource "google_compute_firewall" "rke_ports" {
31 | name = "${var.name}-rke-ports"
32 | network = google_compute_network.default.name
33 | source_tags = ["${var.name}-nodes"]
34 |
35 | allow {
36 | protocol = "tcp"
37 |
38 | ports = [
39 | "22", # SSH
40 | "80", # Canal
41 | "443", # Canal
42 | "6443", # Kubernetes API server
43 | "2379-2380", # etcd server client API
44 | "10250", # kubelet API
45 | "10251", # scheduler
46 | "10252", # controller
47 | "10256", # kubeproxy
48 | "30000-32767", # NodePort Services
49 | ]
50 | }
51 | }
52 |
53 |
--------------------------------------------------------------------------------
/docs/guide/aws/cluster-manager.md:
--------------------------------------------------------------------------------
1 | ## Cluster Manager
2 |
3 | Cluster Managers can manage multiple clusters across regions/data-centers and/or clouds. They can run anywhere (Triton/AWS/Azure/GCP/Baremetal) and manage Kubernetes environments running on any region of any supported cloud.
4 |
5 | To create cluster manager, run the following:
6 | ```
7 | $ triton-kubernetes create manager
8 | ✔ Backend Provider: Local
9 | ✔ Cloud Provider: AWS
10 | ✔ Cluster Manager Name: dev-manager
11 | ✔ Private Registry: None
12 | ✔ Rancher Server Image: Default
13 | ✔ Rancher Agent Image: Default
14 | ✔ Set UI Admin Password: ****
15 | ✔ AWS Access Key: [changeme]
16 | ✔ AWS Secret Key: [changeme]
17 | ✔ AWS Region: us-east-1
18 | ✔ Name for new aws public key: triton-kubernetes_public_key
19 | ✔ AWS Public Key Path: ~/.ssh/id_rsa.pub
20 | ✔ AWS Private Key Path: ~/.ssh/id_rsa
21 | ✔ AWS SSH User: ubuntu
22 | ✔ AWS VPC CIDR: 10.0.0.0/16
23 | ✔ AWS Subnet CIDR: 10.0.2.0/24
24 | ✔ AWS AMI: ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20180405
25 | ✔ AWS Instance Type: t2.micro
26 | Proceed? Yes
27 | ```
28 |
29 | To destroy cluster manager, run the following:
30 |
31 | ```
32 | $ triton-kubernetes destroy manager
33 | ✔ Backend Provider: Local
34 | ✔ Cluster Manager: dev-manager
35 | Destroy "dev-manager"? Yes
36 | ```
37 | > Note: Destorying cluster manager will destroy all your clusters and nodes attached to the cluster manager.
38 |
39 | To get cluster manager, run the following:
40 |
41 | ```
42 | $ triton-kubernetes get manager
43 | ```
44 |
45 | `triton-kubernetes` cli can take a configuration file (yaml) with `--config` option to run in silent mode. To read about the yaml arguments, look at the [silent-install documentation](https://github.com/joyent/triton-kubernetes/tree/master/docs/guide/silent-install-yaml.md).
--------------------------------------------------------------------------------
/docs/guide/azure/cluster-manager.md:
--------------------------------------------------------------------------------
1 | ## Cluster Manager
2 |
3 | Cluster Managers can manage multiple clusters across regions/data-centers and/or clouds. They can run anywhere (Triton/AWS/Azure/GCP/Baremetal) and manage Kubernetes environments running on any region of any supported cloud.
4 |
5 | To create cluster manager, run the following:
6 | ```
7 | $ triton-kubernetes create manager
8 | ✔ Backend Provider: Local
9 | ✔ Cloud Provider: Azure
10 | ✔ Cluster Manager Name: dev-manager
11 | ✔ Private Registry: None
12 | ✔ Rancher Server Image: Default
13 | ✔ Rancher Agent Image: Default
14 | ✔ Set UI Admin Password: *****
15 | ✔ Azure Subscription ID: 0535d7cf-a52e-491b-b7bc-37f674787ab8
16 | ✔ Azure Client ID: 22520959-c5bb-499a-b3d0-f97e8849385e
17 | ✔ Azure Client Secret: a19ed50f-f7c1-4ef4-9862-97bc880d2536
18 | ✔ Azure Tenant ID: 324e4a5e-53a9-4be4-a3a5-fcd3e79f2c5b
19 | ✔ Azure Environment: public
20 | ✔ Azure Location: West US
21 | ✔ Azure Size: Standard_B1ms
22 | ✔ Azure SSH User: ubuntu
23 | ✔ Azure Public Key Path: ~/.ssh/id_rsa.pub
24 | ✔ Azure Private Key Path: ~/.ssh/id_rsa
25 | Proceed? Yes
26 |
27 | ```
28 |
29 | To destroy cluster manager, run the following:
30 |
31 | ```
32 | $ triton-kubernetes destroy manager
33 | ✔ Backend Provider: Local
34 | ✔ Cluster Manager: dev-manager
35 | Destroy "dev-manager"? Yes
36 | ```
37 | > Note: Destorying cluster manager will destroy all your clusters and nodes attached to the cluster manager.
38 |
39 | To get cluster manager, run the following:
40 |
41 | ```
42 | $ triton-kubernetes get manager
43 | ```
44 |
45 | `triton-kubernetes` cli can takes a configuration file (yaml) with `--config` option to run in silent mode. To read about the yaml arguments, look at the [silent-install documentation](https://github.com/joyent/triton-kubernetes/tree/master/docs/guide/silent-install-yaml.md).
--------------------------------------------------------------------------------
/docs/guide/gke/cluster.md:
--------------------------------------------------------------------------------
1 | ## Cluster
2 |
3 | A cluster is a group of physical (or virtual) computers that share resources to accomplish tasks as if they were a single system. Creating clusters require a cluster manager to be running already.
4 |
5 | To check if cluster manager exists, run the following:
6 |
7 | ```
8 | $ triton-kubernetes get manager
9 | ```
10 |
11 | > NOTE: GCP Service Account must have the following permissions:
12 | > - compute.regions.list
13 | > - iam.serviceAccountActor
14 | > - container.clusterRoleBindings.create
15 |
16 | To create a cluster on GKE, run the following:
17 |
18 | ```
19 | $ triton-kubernetes create cluster
20 | ✔ Backend Provider: Local
21 | ✔ Cluster Manager: dev-manager
22 | ✔ Cloud Provider: GKE
23 | ✔ Cluster Name: dev-cluster
24 | ✔ Path to Google Cloud Platform Credentials File: /Users/fayazg/fayazg-5b46508599f1.json
25 | ✔ GCP Compute Region: us-central1
26 | ✔ GCP Zone: us-central1-a
27 | ✔ GCP Additional Zones: us-central1-b
28 | Add another? No
29 | ✔ GCP Machine Type: n1-standard-1
30 | ✔ Kubernetes Version: v1.9.7
31 | ✔ Number of nodes to create: 3
32 | ✔ Kubernetes Master Password: ***************************
33 | ```
34 |
35 | To destroy cluster, run the following:
36 |
37 | ```
38 | $ triton-kubernetes destroy cluster
39 | ✔ Backend Provider: Local
40 | ✔ Cluster Manager: dev-manager
41 | ✔ Cluster: dev-cluster
42 | Destroy "dev-cluster"? Yes
43 | ```
44 |
45 | To get cluster, run the following:
46 |
47 | ```
48 | $ triton-kubernetes get cluster
49 | ```
50 |
51 |
52 | `triton-kubernetes` cli can take a configuration file (yaml) with `--config` option to run in silent mode. To read about the yaml arguments, look at the [silent-install documentation](https://github.com/joyent/triton-kubernetes/tree/master/docs/guide/silent-install-yaml.md).
--------------------------------------------------------------------------------
/terraform/modules/gcp-rancher-k8s-host/files/install_rancher_agent.sh.tpl:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # This script just wraps https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh
3 | # It disables firewalld on CentOS.
4 | # TODO: Replace firewalld with iptables.
5 |
6 | if [ -n "$(command -v firewalld)" ]; then
7 | sudo systemctl stop firewalld.service
8 | sudo systemctl disable firewalld.service
9 | fi
10 |
11 | sudo curl ${docker_engine_install_url} | sh
12 | sudo service docker stop
13 | sudo bash -c 'echo "{
14 | \"storage-driver\": \"overlay2\"
15 | }" > /etc/docker/daemon.json'
16 | sudo service docker restart
17 |
18 | sudo hostnamectl set-hostname ${hostname}
19 |
20 | # Run docker login if requested
21 | if [ "${rancher_registry_username}" != "" ]; then
22 | sudo docker login -u ${rancher_registry_username} -p ${rancher_registry_password} ${rancher_registry}
23 | fi
24 |
25 | # Mounting the Volume
26 | MOUNT_PATH='${disk_mount_path}'
27 | if [ $$MOUNT_PATH != '' ]; then
28 | # For GCP Ubuntu instances, the device name for the GCP disk starts with 'sdb'.
29 | if [ -b /dev/sdb ]; then
30 | INSTANCE_STORE_BLOCK_DEVICE=/dev/sdb
31 | fi
32 |
33 | echo $${INSTANCE_STORE_BLOCK_DEVICE}
34 |
35 | if [ -b $${INSTANCE_STORE_BLOCK_DEVICE} ]; then
36 | sudo mke2fs -F -E nodiscard -L $$MOUNT_PATH -j $${INSTANCE_STORE_BLOCK_DEVICE} &&
37 | sudo tune2fs -r 0 $${INSTANCE_STORE_BLOCK_DEVICE} &&
38 | echo "LABEL=$$MOUNT_PATH $$MOUNT_PATH ext4 defaults,noatime 1 1" | sudo tee /etc/fstab > /dev/null &&
39 | sudo mkdir $$MOUNT_PATH &&
40 | sudo mount $$MOUNT_PATH
41 | fi
42 | fi
43 |
44 | sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run ${rancher_agent_image} --server ${rancher_api_url} --token ${rancher_cluster_registration_token} --ca-checksum ${rancher_cluster_ca_checksum} --${rancher_node_role}
45 |
--------------------------------------------------------------------------------
/terraform/modules/azure-rke/files/setup_rancher.sh.tpl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # HA setup requires use of FQDN. However DNS on FQDN might not be setup so we need to set it in /etc/hosts
4 | sudo sed -i '$ a 127.0.0.1 ${fqdn}' /etc/hosts
5 |
6 | # Wait for Rancher UI to boot
7 | printf 'Waiting for Rancher to start'
8 | until $(curl --output /dev/null --silent --head --fail ${rancher_host}); do
9 | printf '.'
10 | sleep 5
11 | done
12 |
13 | sudo apt-get install jq -y || sudo yum install jq -y
14 |
15 | # Login as default admin user
16 | login_response=$(curl -X POST \
17 | -d '{"description":"Initial Token", "password":"admin", "ttl": 60000, "username":"admin"}' \
18 | '${rancher_host}/v3-public/localProviders/local?action=login')
19 | initial_token=$(echo $login_response | jq -r '.token')
20 |
21 | # Create token
22 | token_response=$(curl -X POST \
23 | -u $initial_token \
24 | -H 'Accept: application/json' \
25 | -H 'Content-Type: application/json' \
26 | -d '{"expired":false,"isDerived":false,"ttl":0,"type":"token","description":"Managed by Terraform","name":"triton-kubernetes"}' \
27 | '${rancher_host}/v3/token')
28 | echo $token_response > ~/rancher_api_key
29 | access_key=$(echo $token_response | jq -r '.name')
30 | secret_key=$(echo $token_response | jq -r '.token' | cut -d: -f2)
31 |
32 | # Change default admin password
33 | curl -X POST \
34 | -u $access_key:$secret_key \
35 | -H 'Accept: application/json' \
36 | -H 'Content-Type: application/json' \
37 | -d '{"currentPassword":"admin","newPassword":"${rancher_admin_password}"}' \
38 | '${rancher_host}/v3/users?action=changepassword'
39 |
40 | # Setup server url
41 | curl -X PUT \
42 | -u $access_key:$secret_key \
43 | -H 'Accept: application/json' \
44 | -H 'Content-Type: application/json' \
45 | -d '{"baseType": "setting", "id": "server-url", "name": "server-url", "type": "setting", "value": "${rancher_host}" }' \
46 | '${rancher_host}/v3/settings/server-url'
47 |
48 | # Remove FQDN reference to localhost
49 | sudo sed -i '$ d' /etc/hosts
--------------------------------------------------------------------------------
/terraform/modules/azure-rancher-k8s-host/files/install_rancher_agent.sh.tpl:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # This script just wraps https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh
3 | # It disables firewalld on CentOS.
4 | # TODO: Replace firewalld with iptables.
5 |
6 | if [ -n "$(command -v firewalld)" ]; then
7 | sudo systemctl stop firewalld.service
8 | sudo systemctl disable firewalld.service
9 | fi
10 |
11 | sudo curl ${docker_engine_install_url} | sh
12 | sudo service docker stop
13 | sudo bash -c 'echo "{
14 | \"storage-driver\": \"overlay2\"
15 | }" > /etc/docker/daemon.json'
16 | sudo service docker restart
17 |
18 | sudo hostnamectl set-hostname ${hostname}
19 | sudo bash -c 'echo "127.0.0.1 ${hostname}" >> /etc/hosts'
20 |
21 | # Run docker login if requested
22 | if [ "${rancher_registry_username}" != "" ]; then
23 | sudo docker login -u ${rancher_registry_username} -p ${rancher_registry_password} ${rancher_registry}
24 | fi
25 |
26 | # Mounting the Volume
27 | MOUNT_PATH='${disk_mount_path}'
28 | if [ $$MOUNT_PATH != '' ]; then
29 | # For Azure instances, the mounted volume's block device name is /dev/sdc since this is the storage data disk
30 | if [ -b /dev/sdc ]; then
31 | INSTANCE_STORE_BLOCK_DEVICE=/dev/sdc
32 | fi
33 |
34 | echo $${INSTANCE_STORE_BLOCK_DEVICE}
35 |
36 | if [ -b $${INSTANCE_STORE_BLOCK_DEVICE} ]; then
37 | sudo mke2fs -F -E nodiscard -L $$MOUNT_PATH -j $${INSTANCE_STORE_BLOCK_DEVICE} &&
38 | sudo tune2fs -r 0 $${INSTANCE_STORE_BLOCK_DEVICE} &&
39 | echo "LABEL=$$MOUNT_PATH $$MOUNT_PATH ext4 defaults,noatime 1 1" | sudo tee /etc/fstab > /dev/null &&
40 | sudo mkdir $$MOUNT_PATH &&
41 | sudo mount $$MOUNT_PATH
42 | fi
43 | fi
44 |
45 | sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run ${rancher_agent_image} --server ${rancher_api_url} --token ${rancher_cluster_registration_token} --ca-checksum ${rancher_cluster_ca_checksum} --${rancher_node_role}
46 |
--------------------------------------------------------------------------------
/terraform/modules/vsphere-rancher-k8s/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {
2 | description = "Human readable name used as prefix to generated names."
3 | }
4 |
5 | variable "rancher_api_url" {
6 | description = ""
7 | }
8 |
9 | variable "rancher_access_key" {
10 | description = ""
11 | }
12 |
13 | variable "rancher_secret_key" {
14 | description = ""
15 | }
16 |
17 | variable "rancher_registry" {
18 | default = ""
19 | description = "The docker registry to use for rancher images"
20 | }
21 |
22 | variable "rancher_registry_username" {
23 | default = ""
24 | description = "The username to login as."
25 | }
26 |
27 | variable "rancher_registry_password" {
28 | default = ""
29 | description = "The password to use."
30 | }
31 |
32 | variable "k8s_registry" {
33 | default = ""
34 | description = "The docker registry to use for k8s images"
35 | }
36 |
37 | variable "k8s_registry_username" {
38 | default = ""
39 | description = "The username to login as."
40 | }
41 |
42 | variable "k8s_registry_password" {
43 | default = ""
44 | description = "The password to use."
45 | }
46 |
47 | variable "k8s_version" {
48 | default = "v1.18.12-rancher1-1"
49 | }
50 |
51 | variable "k8s_network_provider" {
52 | default = "flannel"
53 | }
54 |
55 | variable "vsphere_user" {
56 | description = "The username of the vCenter Server user."
57 | }
58 |
59 | variable "vsphere_password" {
60 | description = "The password of the vCenter Server user."
61 | }
62 |
63 | variable "vsphere_server" {
64 | description = "The IP address or FQDN of the vCenter Server."
65 | }
66 |
67 | variable "vsphere_datacenter_name" {
68 | description = "Name of the datacenter to use."
69 | }
70 |
71 | variable "vsphere_datastore_name" {
72 | description = "Name of the datastore to use."
73 | }
74 |
75 | variable "vsphere_resource_pool_name" {
76 | description = "Name of the resource pool to use."
77 | }
78 |
79 | variable "vsphere_network_name" {
80 | description = "Name of the network to use."
81 | }
82 |
83 |
--------------------------------------------------------------------------------
/cmd/get.go:
--------------------------------------------------------------------------------
1 | package cmd
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "os"
7 |
8 | "github.com/joyent/triton-kubernetes/get"
9 | "github.com/joyent/triton-kubernetes/util"
10 |
11 | "github.com/spf13/cobra"
12 | )
13 |
14 | // getCmd represents the get command
15 | var getCmd = &cobra.Command{
16 | Use: "get [manager or cluster]",
17 | Short: "Display resource information",
18 | Long: `Get allows you to get cluster manager details.`,
19 | ValidArgs: []string{"manager", "cluster"},
20 | Args: func(cmd *cobra.Command, args []string) error {
21 | if len(args) != 1 {
22 | return errors.New(`"triton-kubernetes get" requires one argument`)
23 | }
24 |
25 | for _, validArg := range cmd.ValidArgs {
26 | if validArg == args[0] {
27 | return nil
28 | }
29 | }
30 |
31 | return fmt.Errorf(`invalid argument "%s" for "triton-kubernetes get"`, args[0])
32 | },
33 | Run: getCmdFunc,
34 | }
35 |
36 | func getCmdFunc(cmd *cobra.Command, args []string) {
37 | remoteBackend, err := util.PromptForBackend()
38 | if err != nil {
39 | fmt.Println(err)
40 | os.Exit(1)
41 | }
42 |
43 | getType := args[0]
44 | switch getType {
45 | case "manager":
46 | fmt.Println("get manager called")
47 | err := get.GetManager(remoteBackend)
48 | if err != nil {
49 | fmt.Println(err)
50 | os.Exit(1)
51 | }
52 | case "cluster":
53 | fmt.Println("get cluster called")
54 | err := get.GetCluster(remoteBackend)
55 | if err != nil {
56 | fmt.Println(err)
57 | os.Exit(1)
58 | }
59 | }
60 | }
61 |
62 | func init() {
63 | rootCmd.AddCommand(getCmd)
64 |
65 | // Here you will define your flags and configuration settings.
66 |
67 | // Cobra supports Persistent Flags which will work for this command
68 | // and all subcommands, e.g.:
69 | // getCmd.PersistentFlags().String("foo", "", "A help for foo")
70 |
71 | // Cobra supports local flags which will only run when this command
72 | // is called directly, e.g.:
73 | // getCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
74 |
75 | }
76 |
--------------------------------------------------------------------------------
/docs/guide/installing-cli.md:
--------------------------------------------------------------------------------
1 | # Installing `triton-kubernetes` CLI
2 |
3 | Each [release on github](https://github.com/joyent/triton-kubernetes/releases) has associated binaries built which can be used to easily install `triton-kubernetes` CLI.
4 |
5 | ## Install on Linux
6 |
7 | There are three packages available for Linux. An RPM, DEB and a standalone binary.
8 |
9 | ### Linux install using RPM package
10 |
11 | Download the `triton-kubernetes` [rpm package](https://github.com/joyent/triton-kubernetes/releases).
12 |
13 | From the same directory as where rpm package was downloaded, run the following command:
14 |
15 | ```bash
16 | rpm -i triton-kubernetes_v0.9.0_linux-amd64.rpm
17 | ```
18 |
19 | > Replace `triton-kubernetes_v0.9.0_linux-amd64.rpm` with the package name that was downloaded.
20 |
21 | ### Linux install using DEB package
22 |
23 | Download the `triton-kubernetes` [deb package](https://github.com/joyent/triton-kubernetes/releases).
24 |
25 | From the same directory as where deb package was downloaded, run the following command:
26 |
27 | ```bash
28 | dpkg -i triton-kubernetes_v0.9.0_linux-amd64.deb
29 | apt-get install -f
30 | ```
31 |
32 | > Replace `triton-kubernetes_v0.9.0_linux-amd64.deb` with the package name that was downloaded.
33 |
34 | ### Linux install using standalone binary
35 |
36 | Triton Multi-Cloud Kubernetes CLI has a standalone Linux binary available.
37 | Download the `triton-kubernetes` [Linux binary](https://github.com/joyent/triton-kubernetes/releases).
38 | Move the binary to `/usr/local/bin/` or somewhere in your `$PATH`.
39 |
40 | ## Install on OSX
41 |
42 | Triton Multi-Cloud Kubernetes CLI has a standalone OSX binary available.
43 | Download the `triton-kubernetes` [OSX binary](https://github.com/joyent/triton-kubernetes/releases).
44 | Move the binary to `/usr/local/bin/` or somewhere in your `$PATH`.
45 |
46 | `triton-kubernetes` CLI can also be installed using _Homebrew_.
47 | To install the latest version:
48 |
49 | ```bash
50 | brew tap joyent/tap
51 | brew install triton-kubernetes
52 | ```
53 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/joyent/triton-kubernetes
2 |
3 | go 1.14
4 |
5 | require (
6 | github.com/Azure/azure-sdk-for-go v16.2.1+incompatible
7 | github.com/Azure/go-autorest v10.8.1+incompatible
8 | github.com/Jeffail/gabs v1.0.0
9 | github.com/aws/aws-sdk-go v1.12.61
10 | github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
11 | github.com/chzyer/logex v1.1.10 // indirect
12 | github.com/chzyer/readline v0.0.0-20171208011716-f6d7a1f6fbf3 // indirect
13 | github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 // indirect
14 | github.com/go-ini/ini v1.32.0 // indirect
15 | github.com/hashicorp/go-cleanhttp v0.0.0-20171218145408-d5fe4b57a186 // indirect
16 | github.com/hashicorp/go-getter v0.0.0-20180809191950-4bda8fa99001
17 | github.com/hashicorp/go-safetemp v0.0.0-20180326211150-b1a1dbde6fdc // indirect
18 | github.com/hashicorp/go-version v0.0.0-20180716215031-270f2f71b1ee // indirect
19 | github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 // indirect
20 | github.com/joyent/triton-go v1.8.4
21 | github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a // indirect
22 | github.com/lunixbochs/vtclean v0.0.0-20170504063817-d14193dfc626 // indirect
23 | github.com/manifoldco/promptui v0.2.2-0.20180102185639-4baa1188b83a
24 | github.com/mattn/go-colorable v0.0.9 // indirect
25 | github.com/mitchellh/go-homedir v1.1.0
26 | github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 // indirect
27 | github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
28 | github.com/smartystreets/goconvey v1.6.4 // indirect
29 | github.com/spf13/cobra v0.0.5
30 | github.com/spf13/viper v1.4.0
31 | github.com/stretchr/testify v1.3.0
32 | github.com/ulikunitz/xz v0.5.4 // indirect
33 | golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8
34 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
35 | google.golang.org/api v0.0.0-20180112000342-37df4fabefb0
36 | gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
37 | gopkg.in/ini.v1 v1.56.0 // indirect
38 | )
39 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Triton Kubernetes is a multi-cloud Kubernetes solution. It has a global cluster manager (control plane) which can run and manage Kubernetes environments on any cloud - Public, Private or Bare Metal.
2 |
3 | The cluster manager manages environments running on any region. AWS, Azure, Google and Triton (public and private) are supported. If you don't want to use a cloud, environments on bare metal servers and VMWare are supported as well.
4 |
5 | View the [Quick Start Guide](docs/guide/) for installation instructions.
6 |
7 | ## Using The CLI
8 |
9 | Triton Kubernetes allows you to create and destroy global cluster managers, kubernetes environments and individual cluster nodes. You can also get information on a cluster manager or kubernetes environment.
10 |
11 | For help with a command, use the --help flag. For example:
12 |
13 | ```bash
14 | $ triton-kubernetes --help
15 | This is a multi-cloud Kubernetes solution. Triton Kubernetes has a global
16 | cluster manager which can manage multiple clusters across regions/data-centers and/or clouds.
17 | Cluster manager can run anywhere (Triton/AWS/Azure/GCP/Baremetal) and manage Kubernetes environments running on any region of any supported cloud.
18 | For an example set up, look at the How-To section.
19 |
20 | Usage:
21 | triton-kubernetes [command]
22 |
23 | Available Commands:
24 | create Create resources
25 | destroy Destroy cluster managers, kubernetes clusters or individual kubernetes cluster nodes.
26 | get Display resource information
27 | help Help about any command
28 | version Print the version number of triton-kubernetes
29 |
30 | Flags:
31 | --config string config file (default is $HOME/.triton-kubernetes.yaml)
32 | -h, --help help for triton-kubernetes
33 | --non-interactive Prevent interactive prompts
34 | --terraform-configuration Create terraform configuration only
35 | -t, --toggle Help message for toggle
36 |
37 | Use "triton-kubernetes [command] --help" for more information about a command.
38 | ```
39 |
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher-k8s-host/main.tf:
--------------------------------------------------------------------------------
1 | provider "triton" {
2 | account = var.triton_account
3 | key_material = file(var.triton_key_path)
4 | key_id = var.triton_key_id
5 | url = var.triton_url
6 | }
7 |
8 | data "triton_network" "networks" {
9 | count = length(var.triton_network_names)
10 | name = element(var.triton_network_names, count.index)
11 | }
12 |
13 | data "triton_image" "image" {
14 | name = var.triton_image_name
15 | version = var.triton_image_version
16 | }
17 |
18 | locals {
19 | rancher_node_role = element(keys(var.rancher_host_labels), 0)
20 | }
21 |
22 | data "template_file" "install_rancher_agent" {
23 | template = file("${path.module}/files/install_rancher_agent.sh.tpl")
24 |
25 | vars = {
26 | hostname = var.hostname
27 | docker_engine_install_url = var.docker_engine_install_url
28 |
29 | rancher_api_url = var.rancher_api_url
30 | rancher_cluster_registration_token = var.rancher_cluster_registration_token
31 | rancher_cluster_ca_checksum = var.rancher_cluster_ca_checksum
32 | rancher_node_role = local.rancher_node_role == "control" ? "controlplane" : local.rancher_node_role
33 | rancher_agent_image = var.rancher_agent_image
34 |
35 | rancher_registry = var.rancher_registry
36 | rancher_registry_username = var.rancher_registry_username
37 | rancher_registry_password = var.rancher_registry_password
38 | }
39 | }
40 |
41 | resource "triton_machine" "host" {
42 | package = var.triton_machine_package
43 | image = data.triton_image.image.id
44 | name = var.hostname
45 |
46 | user_script = data.template_file.install_rancher_agent.rendered
47 |
48 | networks = data.triton_network.networks[*].id
49 |
50 | cns {
51 | disable = true
52 | # services = ["${element(keys(var.rancher_host_labels), 0)}.${var.hostname}"]
53 | }
54 |
55 | affinity = ["role!=~${element(keys(var.rancher_host_labels), 0)}"]
56 |
57 | tags = {
58 | role = element(keys(var.rancher_host_labels), 0)
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/terraform/modules/aws-rancher-k8s/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {
2 | description = "Human readable name used as prefix to generated names."
3 | }
4 |
5 | variable "rancher_api_url" {
6 | description = ""
7 | }
8 |
9 | variable "rancher_access_key" {
10 | description = ""
11 | }
12 |
13 | variable "rancher_secret_key" {
14 | description = ""
15 | }
16 |
17 | variable "k8s_version" {
18 | default = "v1.18.12-rancher1-1"
19 | }
20 |
21 | variable "k8s_network_provider" {
22 | default = "flannel"
23 | }
24 |
25 | variable "rancher_registry" {
26 | default = ""
27 | description = "The docker registry to use for Rancher images"
28 | }
29 |
30 | variable "rancher_registry_username" {
31 | default = ""
32 | description = "The username to login as."
33 | }
34 |
35 | variable "rancher_registry_password" {
36 | default = ""
37 | description = "The password to use."
38 | }
39 |
40 | variable "k8s_registry" {
41 | default = ""
42 | description = "The docker registry to use for Kubernetes images"
43 | }
44 |
45 | variable "k8s_registry_username" {
46 | default = ""
47 | description = "The username to login as."
48 | }
49 |
50 | variable "k8s_registry_password" {
51 | default = ""
52 | description = "The password to use."
53 | }
54 |
55 | variable "aws_access_key" {
56 | description = "AWS access key"
57 | }
58 |
59 | variable "aws_secret_key" {
60 | description = "AWS secret access key"
61 | }
62 |
63 | variable "aws_region" {
64 | description = "AWS region to host your network"
65 | }
66 |
67 | variable "aws_vpc_cidr" {
68 | description = "CIDR for VPC"
69 | default = "10.0.0.0/16"
70 | }
71 |
72 | variable "aws_subnet_cidr" {
73 | description = "CIDR for subnet"
74 | default = "10.0.2.0/24"
75 | }
76 |
77 | variable "aws_ami_id" {
78 | description = "Base AMI to launch the instances with"
79 | # default="ami-08a099fcfc36dff3f"
80 | default = ""
81 | }
82 |
83 | variable "aws_public_key_path" {
84 | description = "Path to a public key. If set, a key_pair will be made in AWS named aws_key_name"
85 | default = ""
86 | }
87 |
88 | variable "aws_key_name" {
89 | description = "Name of the public key to be used for provisioning"
90 | }
91 |
92 |
--------------------------------------------------------------------------------
/examples/apps/ingress/README.md:
--------------------------------------------------------------------------------
1 | # Setting up HTTP Load Balancing with Ingress
2 |
3 | This example shows how to run an web application behind an HTTP load balancer by configuring the Ingress resource.
4 |
5 | ## Deploy a web application
6 |
7 | Create a Deployment using the sample web application container image that listens on a HTTP server on port 8080:
8 |
9 | ```
10 | $kubectl run web --image=gcr.io/google-samples/hello-app:1.0 --port=8080
11 |
12 | ```
13 | Verify the Service was created and a node port was allocated:
14 | ```
15 | $kubectl get service web
16 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
17 | web NodePort 10.96.135.94 8080:32640/TCP 1m
18 | ```
19 |
20 | ## Expose your Deployment as a Service internally
21 |
22 | Create a Service resource to make the web deployment reachable within your container cluster:
23 |
24 | ```
25 | kubectl expose deployment web --target-port=8080 --type=NodePort
26 | ```
27 |
28 | Verify the Service was created and a node port was allocated:
29 | ```
30 | kubectl get service web
31 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
32 | web NodePort 10.96.135.94 8080:32640/TCP 2m
33 | ```
34 | In the sample output above, the node port for the web Service is 32640. Also, note that there is no external IP allocated for this Service. Since the Kubernetes Engine nodes are not externally accessible by default, creating this Service does not make your application accessible from the Internet.
35 |
36 | To make your HTTP(S) web server application publicly accessible, you need to create an Ingress resource.
37 |
38 | ## Create an Ingress resource
39 |
40 | To deploy this Ingress resource run:
41 | ```
42 | kubectl apply -f examples/apps/ingress/ingress.yaml
43 | ```
44 |
45 | ## Visit your application
46 |
47 | Find out the external IP address of the load balancer serving your application by running:
48 |
49 | ```
50 | $kubectl get ingress basic-ingress
51 |
52 | NAME HOSTS ADDRESS PORTS AGE
53 | basic-ingress * 165.225.128.1 80 7m
54 | ````
55 | To see all the external IP addresses run:
56 |
57 | ```
58 | kubectl get ingress basic-ingress -o yaml
59 | ```
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher-k8s/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {
2 | description = "Human readable name used as prefix to generated names."
3 | }
4 |
5 | variable "rancher_api_url" {
6 | description = "Rancher API url"
7 | }
8 |
9 | variable "rancher_access_key" {
10 | description = "Rancher API access key"
11 | }
12 |
13 | variable "rancher_secret_key" {
14 | description = "Rancher API access key."
15 | }
16 |
17 | variable k8s_version {
18 | default = "v1.18.12-rancher1-1"
19 | }
20 |
21 | variable k8s_network_provider {
22 | default = "flannel"
23 | }
24 |
25 | variable "rancher_registry" {
26 | default = ""
27 | description = "The docker registry to use for Rancher images"
28 | }
29 |
30 | variable "rancher_registry_username" {
31 | default = ""
32 | description = "The username to login as."
33 | }
34 |
35 | variable "rancher_registry_password" {
36 | default = ""
37 | description = "The password to use."
38 | }
39 |
40 | variable "k8s_registry" {
41 | default = ""
42 | description = "The docker registry to use for Kubernetes images"
43 | }
44 |
45 | variable "k8s_registry_username" {
46 | default = ""
47 | description = "The username to login as."
48 | }
49 |
50 | variable "k8s_registry_password" {
51 | default = ""
52 | description = "The password to use."
53 | }
54 |
55 | variable "triton_account" {
56 | default = ""
57 | description = "The Triton account name, usually the username of your root user."
58 | }
59 |
60 | variable "triton_key_path" {
61 | default = ""
62 | description = "The path to a private key that is authorized to communicate with the Triton API."
63 | }
64 |
65 | variable "triton_key_id" {
66 | default = ""
67 | description = "The md5 fingerprint of the key at triton_key_path. Obtained by running `ssh-keygen -E md5 -lf ~/path/to.key`"
68 | }
69 |
70 | variable "triton_url" {
71 | default = ""
72 | description = "The CloudAPI endpoint URL. e.g. https://us-west-1.api.joyent.com"
73 | }
74 |
75 | variable "docker_engine_install_url" {
76 | default = "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh"
77 | description = "The URL to the shell script to install the docker engine."
78 | }
79 |
--------------------------------------------------------------------------------
/cmd/create.go:
--------------------------------------------------------------------------------
1 | package cmd
2 |
3 | import (
4 | "fmt"
5 | "os"
6 |
7 | "github.com/joyent/triton-kubernetes/create"
8 | "github.com/joyent/triton-kubernetes/util"
9 |
10 | "github.com/spf13/cobra"
11 | )
12 |
13 | // createCmd represents the create command
14 | var createCmd = &cobra.Command{
15 | Use: "create",
16 | Short: "Create resources",
17 | Long: `Create allows you to create resources that triton-kubernetes can manage.`,
18 | }
19 |
20 | var createManagerCmd = &cobra.Command{
21 | Use: "manager",
22 | Short: "Create Manager",
23 | Run: func(cmd *cobra.Command, args []string) {
24 | remoteBackend, err := util.PromptForBackend()
25 | if err != nil {
26 | fmt.Println(err)
27 | os.Exit(1)
28 | }
29 |
30 | err = create.NewManager(remoteBackend)
31 | if err != nil {
32 | fmt.Println(err)
33 | os.Exit(1)
34 | }
35 | },
36 | }
37 |
38 | var createClusterCmd = &cobra.Command{
39 | Use: "cluster",
40 | Short: "Create Kubernetes Cluster",
41 | Run: func(cmd *cobra.Command, args []string) {
42 | remoteBackend, err := util.PromptForBackend()
43 | if err != nil {
44 | fmt.Println(err)
45 | os.Exit(1)
46 | }
47 |
48 | err = create.NewCluster(remoteBackend)
49 | if err != nil {
50 | fmt.Println(err)
51 | os.Exit(1)
52 | }
53 | },
54 | }
55 |
56 | var createNodeCmd = &cobra.Command{
57 | Use: "node",
58 | Short: "Create Node",
59 | Run: func(cmd *cobra.Command, args []string) {
60 | remoteBackend, err := util.PromptForBackend()
61 | if err != nil {
62 | fmt.Println(err)
63 | os.Exit(1)
64 | }
65 |
66 | err = create.NewNode(remoteBackend)
67 | if err != nil {
68 | fmt.Println(err)
69 | os.Exit(1)
70 | }
71 | },
72 | }
73 |
74 | var createBackupCmd = &cobra.Command{
75 | Use: "backup",
76 | Short: "Create Cluster Backup",
77 | Run: func(cmd *cobra.Command, args []string) {
78 | remoteBackend, err := util.PromptForBackend()
79 | if err != nil {
80 | fmt.Println(err)
81 | os.Exit(1)
82 | }
83 |
84 | err = create.NewBackup(remoteBackend)
85 | if err != nil {
86 | fmt.Println(err)
87 | os.Exit(1)
88 | }
89 | },
90 | }
91 |
92 | func init() {
93 | rootCmd.AddCommand(createCmd)
94 |
95 | createCmd.AddCommand(createManagerCmd, createClusterCmd, createNodeCmd, createBackupCmd)
96 | }
97 |
--------------------------------------------------------------------------------
/packer/rancher-agent.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | variables:
3 | - !include variables/triton.yaml
4 |
5 | builders:
6 | - !include builders/triton-kvm-rancheragent.yaml
7 |
8 | provisioners:
9 | - type: shell
10 | inline:
11 | - sudo curl "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh" | sh
12 | - sudo apt-get install jq -y
13 | - sudo docker pull rancher/healthcheck:v0.3.3
14 | - sudo docker pull rancher/net:v0.13.7
15 | - sudo docker pull rancher/net:holder
16 | - sudo docker pull rancher/k8s:v1.8.5-rancher4
17 | - sudo docker pull rancher/etcd:v2.3.7-13
18 | - sudo docker pull rancher/etc-host-updater:v0.0.3
19 | - sudo docker pull rancher/dns:v0.15.3
20 | - sudo docker pull rancher/agent:v1.2.9
21 | - sudo docker pull busybox
22 | - sudo docker pull rancher/kubectld:v0.8.5
23 | - sudo docker pull rancher/lb-service-rancher:v0.7.17
24 | - sudo docker pull rancher/kubernetes-agent:v0.6.6
25 | - sudo docker pull rancher/kubernetes-auth:v0.0.8
26 | - sudo docker pull rancher/metadata:v0.9.5
27 | - sudo docker pull rancher/network-manager:v0.7.19
28 | - sudo docker pull gcr.io/kubernetes-helm/tiller@sha256:6b561c3bb9fed1b028520cce3852e6c9a6a91161df9b92ca0c3a20ebecc0581a
29 | - sudo docker pull gcr.io/google_containers/pause-amd64:3.0
30 | - sudo docker pull gcr.io/google_containers/heapster-amd64@sha256:b77cebeff2180d03e21cc9f9c6b69a0d9710caa9f6263e675eab7938019631ef
31 | - sudo docker pull gcr.io/google_containers/heapster-grafana-amd64@sha256:4a472eb4df03f4f557d80e7c6b903d9c8fe31493108b99fbd6da6540b5448d70
32 | - sudo docker pull gcr.io/google_containers/heapster-influxdb-amd64@sha256:f433e331c1865ad87bc5387589965528b78cd6b1b2f61697e589584d690c1edd
33 | - sudo docker pull gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64@sha256:46b933bb70270c8a02fa6b6f87d440f6f1fce1a5a2a719e164f83f7b109f7544
34 | - sudo docker pull gcr.io/google_containers/k8s-dns-kube-dns-amd64@sha256:1a3fc069de481ae690188f6f1ba4664b5cc7760af37120f70c86505c79eea61d
35 | - sudo docker pull gcr.io/google_containers/k8s-dns-sidecar-amd64@sha256:9aab42bf6a2a068b797fe7d91a5d8d915b10dbbc3d6f2b10492848debfba6044
36 | - sudo docker pull gcr.io/google_containers/kubernetes-dashboard-amd64@sha256:71a0de5c6a21cb0c2fbcad71a4fef47acd3e61cd78109822d35e1742f9d8140d
37 |
--------------------------------------------------------------------------------
/terraform/modules/k8s-backup-manta/main.tf:
--------------------------------------------------------------------------------
1 | data "template_file" "minio_manta_deployment" {
2 | template = file("${path.module}/files/minio-manta-deployment.yaml")
3 |
4 | vars = {
5 | triton_account = var.triton_account
6 | triton_key_id = var.triton_key_id
7 | triton_key_material = indent(12, file(var.triton_key_path)) // indent to keep yaml multi line compliance
8 | manta_subuser = var.manta_subuser
9 | }
10 | }
11 |
12 | resource "null_resource" "setup_ark_backup" {
13 | provisioner "local-exec" {
14 | command = "curl -LO https://github.com/heptio/ark/releases/download/v0.7.1/ark-v0.7.1-linux-arm64.tar.gz"
15 | }
16 |
17 | provisioner "local-exec" {
18 | command = "tar xvf ark-v0.7.1-linux-arm64.tar.gz"
19 | }
20 |
21 | provisioner "local-exec" {
22 | command = "curl -LO https://github.com/heptio/ark/archive/v0.7.1.tar.gz"
23 | }
24 |
25 | provisioner "local-exec" {
26 | command = "tar xvf v0.7.1.tar.gz"
27 | }
28 |
29 | provisioner "local-exec" {
30 | # Get kubernetes config yaml from Rancher, write it to disk
31 | command = < kubeconfig.yaml
39 |
40 | EOT
41 |
42 | }
43 |
44 | provisioner "local-exec" {
45 | # Write minio_manta_deployment.yaml to disk
46 | command = format(
47 | "cat << EOF > minio_manta_deployment.yaml \n%s\nEOF",
48 | data.template_file.minio_manta_deployment.rendered,
49 | )
50 | }
51 |
52 | provisioner "local-exec" {
53 | command = "kubectl apply -f ark-0.7.1/examples/common/00-prereqs.yaml --kubeconfig=kubeconfig.yaml"
54 | }
55 |
56 | provisioner "local-exec" {
57 | command = "kubectl apply -f minio_manta_deployment.yaml --kubeconfig=kubeconfig.yaml"
58 | }
59 |
60 | provisioner "local-exec" {
61 | command = "kubectl apply -f ark-0.7.1/examples/common/10-deployment.yaml --kubeconfig=kubeconfig.yaml"
62 | }
63 |
64 | provisioner "local-exec" {
65 | command = "rm -rf ark ark-* minio_manta_deployment.yaml kubeconfig.yaml v0.7.1.tar.gz"
66 | }
67 | }
68 |
69 |
--------------------------------------------------------------------------------
/terraform/modules/vsphere-rancher-k8s-host/variables.tf:
--------------------------------------------------------------------------------
1 | variable "hostname" {
2 | description = ""
3 | }
4 |
5 | variable "rancher_api_url" {
6 | description = ""
7 | }
8 |
9 | variable "rancher_cluster_registration_token" {
10 | }
11 |
12 | variable "rancher_cluster_ca_checksum" {
13 | }
14 |
15 | variable "rancher_host_labels" {
16 | type = map(string)
17 | description = "A map of key/value pairs that get passed to the rancher agent on the host."
18 | }
19 |
20 | variable "rancher_agent_image" {
21 | default = "rancher/rancher-agent:v2.4.11"
22 | description = "The Rancher Agent image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
23 | }
24 |
25 | variable "rancher_registry" {
26 | default = ""
27 | description = "The docker registry to use for rancher images"
28 | }
29 |
30 | variable "rancher_registry_username" {
31 | default = ""
32 | description = "The username to login as."
33 | }
34 |
35 | variable "rancher_registry_password" {
36 | default = ""
37 | description = "The password to use."
38 | }
39 |
40 | variable "docker_engine_install_url" {
41 | default = "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh"
42 | description = "The URL to the shell script to install the docker engine."
43 | }
44 |
45 | variable "vsphere_user" {
46 | description = "The username of the vCenter Server user."
47 | }
48 |
49 | variable "vsphere_password" {
50 | description = "The password of the vCenter Server user."
51 | }
52 |
53 | variable "vsphere_server" {
54 | description = "The IP address or FQDN of the vCenter Server."
55 | }
56 |
57 | variable "vsphere_datacenter_name" {
58 | description = "Name of the datacenter to use."
59 | }
60 |
61 | variable "vsphere_datastore_name" {
62 | description = "Name of the datastore to use."
63 | }
64 |
65 | variable "vsphere_resource_pool_name" {
66 | description = "Name of the resource pool to use."
67 | }
68 |
69 | variable "vsphere_network_name" {
70 | description = "Name of the network to use."
71 | }
72 |
73 | variable "vsphere_template_name" {
74 | description = "VM template to use."
75 | }
76 |
77 | variable "ssh_user" {
78 | default = "ubuntu"
79 | description = ""
80 | }
81 |
82 | variable "key_path" {
83 | default = "~/.ssh/id_rsa"
84 | description = ""
85 | }
86 |
87 |
--------------------------------------------------------------------------------
/terraform/modules/gke-rancher-k8s/main.tf:
--------------------------------------------------------------------------------
1 | data "external" "rancher_cluster" {
2 | program = ["bash", "${path.module}/files/rancher_cluster_import.sh"]
3 |
4 | query = {
5 | rancher_api_url = var.rancher_api_url
6 | rancher_access_key = var.rancher_access_key
7 | rancher_secret_key = var.rancher_secret_key
8 | name = var.name
9 | }
10 | }
11 |
12 | provider "google" {
13 | credentials = file(var.gcp_path_to_credentials)
14 | project = var.gcp_project_id
15 | region = var.gcp_compute_region
16 | }
17 |
18 | resource "google_container_cluster" "primary" {
19 | name = var.name
20 | zone = var.gcp_zone
21 | initial_node_count = var.node_count
22 |
23 | min_master_version = var.k8s_version
24 | node_version = var.k8s_version
25 |
26 | additional_zones = var.gcp_additional_zones
27 |
28 | master_auth {
29 | username = "admin"
30 | password = var.password
31 | }
32 |
33 | node_config {
34 | oauth_scopes = [
35 | "https://www.googleapis.com/auth/compute",
36 | "https://www.googleapis.com/auth/devstorage.read_only",
37 | "https://www.googleapis.com/auth/logging.write",
38 | "https://www.googleapis.com/auth/monitoring",
39 | ]
40 | }
41 | }
42 |
43 | locals {
44 | kube_config_path = "./${var.name}_config"
45 | }
46 |
47 | # Bootstrap rancher in gke environment
48 | resource "null_resource" "import_rancher" {
49 | triggers = {
50 | cluster = google_container_cluster.primary.endpoint
51 | }
52 |
53 | provisioner "local-exec" {
54 | command = "gcloud auth activate-service-account --key-file ${var.gcp_path_to_credentials}"
55 | }
56 |
57 | provisioner "local-exec" {
58 | command = "gcloud container clusters get-credentials ${var.name} --zone ${var.gcp_zone} --project ${var.gcp_project_id}"
59 |
60 | environment = {
61 | KUBECONFIG = local.kube_config_path
62 | }
63 | }
64 |
65 | provisioner "local-exec" {
66 | command = "curl --insecure -sfL ${var.rancher_api_url}/v3/import/${data.external.rancher_cluster.result.registration_token}.yaml | kubectl apply -f -"
67 |
68 | environment = {
69 | KUBECONFIG = local.kube_config_path
70 | }
71 | }
72 |
73 | provisioner "local-exec" {
74 | command = "rm ${local.kube_config_path}"
75 | }
76 |
77 | provisioner "local-exec" {
78 | command = "gcloud auth revoke"
79 | }
80 | }
81 |
82 |
--------------------------------------------------------------------------------
/terraform/modules/aws-rancher-k8s-host/files/install_rancher_agent.sh.tpl:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # This script just wraps https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh
3 | # It disables firewalld on CentOS.
4 | # TODO: Replace firewalld with iptables.
5 |
6 | if [ -n "$(command -v firewalld)" ]; then
7 | sudo systemctl stop firewalld.service
8 | sudo systemctl disable firewalld.service
9 | fi
10 |
11 | sudo curl ${docker_engine_install_url} | sh
12 | sudo service docker stop
13 | sudo bash -c 'echo "{
14 | \"storage-driver\": \"overlay2\"
15 | }" > /etc/docker/daemon.json'
16 | sudo service docker restart
17 |
18 | sudo hostnamectl set-hostname ${hostname}
19 |
20 | # Run docker login if requested
21 | if [ "${rancher_registry_username}" != "" ]; then
22 | sudo docker login -u ${rancher_registry_username} -p ${rancher_registry_password} ${rancher_registry}
23 | fi
24 |
25 | # Mounting the Volume
26 | # The device name that the user entered. (not necessarily the one that the OS is using)
27 | # This is assumed to have the format /dev/sd[f-p] (e.g. /dev/sdf, /dev/sdp)
28 | DEVICE_NAME_INPUT='${volume_device_name}'
29 |
30 | if [ $$DEVICE_NAME_INPUT != '' ]; then
31 | MOUNT_PATH='${volume_mount_path}'
32 |
33 | # Extract the last character of the device name
34 | LAST_CHAR=$$(echo -n $$DEVICE_NAME_INPUT | tail -c 1)
35 |
36 | # Finding the device name the OS is using the last character of the device name
37 | # This assumes the OS will map the device name to a format such as "/dev/xvd?"
38 | # where '?' is the last character of the device name chosen by the user
39 | if [ -b /dev/xvd$$LAST_CHAR ]; then
40 | INSTANCE_STORE_BLOCK_DEVICE=/dev/xvd$$LAST_CHAR
41 | fi
42 |
43 | echo $${INSTANCE_STORE_BLOCK_DEVICE}
44 |
45 | if [ -b $${INSTANCE_STORE_BLOCK_DEVICE} ]; then
46 | sudo mke2fs -E nodiscard -L $$MOUNT_PATH -j $${INSTANCE_STORE_BLOCK_DEVICE} &&
47 | sudo tune2fs -r 0 $${INSTANCE_STORE_BLOCK_DEVICE} &&
48 | echo "LABEL=$$MOUNT_PATH $$MOUNT_PATH ext4 defaults,noatime 1 1" >> /etc/fstab &&
49 | sudo mkdir $$MOUNT_PATH &&
50 | sudo mount $$MOUNT_PATH
51 | fi
52 | fi
53 |
54 | sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run ${rancher_agent_image} --server ${rancher_api_url} --token ${rancher_cluster_registration_token} --ca-checksum ${rancher_cluster_ca_checksum} --${rancher_node_role}
55 |
--------------------------------------------------------------------------------
/docs/guide/azure/cluster-manager-ha.md:
--------------------------------------------------------------------------------
1 | ## Cluster Manager
2 |
3 | Cluster Managers can manage multiple clusters across regions/data-centers and/or clouds. They can run anywhere (Triton/AWS/Azure/GCP/Baremetal) and manage Kubernetes environments running on any region of any supported cloud.
4 |
5 | Creating a highly available cluster manager requires a TLS certificate before you run triton-kubernetes.
6 |
7 | You can create a TLS certificate by leveraging [certbot](https://certbot.eff.org/docs/install.html).
8 | ```
9 | sudo certbot certonly --manual -d test.example.com --preferred-challenge dns
10 | ```
11 | Or you may provide your own certificate and key. The only requirement is for both to be PEM encoded.
12 |
13 | To create a cluster manager, run the following:
14 | ```
15 | $ triton-kubernetes create manager
16 | ✔ Backend Provider: Local
17 | ✔ Cloud Provider: Azure
18 | ✔ Cluster Manager Name: dev-manager
19 | Highly Available? Yes
20 | ✔ Fully Qualified Domain Name: test.example.com
21 | ✔ TLS Private Key Path: ~/Documents/privkey.pem
22 | ✔ TLS Certificate Path: ~/Documents/fullchain.pem
23 | ✔ Private Registry: None
24 | ✔ Rancher Server Image: Default
25 | ✔ Rancher Agent Image: Default
26 | ✔ Set UI Admin Password: *****
27 | ✔ Azure Subscription ID: 0535d7cf-a52e-491b-b7bc-37f674787ab8
28 | ✔ Azure Client ID: 22520959-c5bb-499a-b3d0-f97e8849385e
29 | ✔ Azure Client Secret: a19ed50f-f7c1-4ef4-9862-97bc880d2536
30 | ✔ Azure Tenant ID: 324e4a5e-53a9-4be4-a3a5-fcd3e79f2c5b
31 | ✔ Azure Environment: public
32 | ✔ Azure Location: West US
33 | ✔ Azure Size: Standard_B1ms
34 | ✔ Azure SSH User: ubuntu
35 | ✔ Azure Public Key Path: ~/.ssh/id_rsa.pub
36 | ✔ Azure Private Key Path: ~/.ssh/id_rsa
37 | Proceed? Yes
38 |
39 | ```
40 |
41 | To destroy cluster manager, run the following:
42 |
43 | ```
44 | $ triton-kubernetes destroy manager
45 | ✔ Backend Provider: Local
46 | ✔ Cluster Manager: dev-manager
47 | Destroy "dev-manager"? Yes
48 | ```
49 | > Note: Destorying cluster manager will destroy all your clusters and nodes attached to the cluster manager.
50 |
51 | To get cluster manager, run the following:
52 |
53 | ```
54 | $ triton-kubernetes get manager
55 | ```
56 |
57 | `triton-kubernetes` cli can takes a configuration file (yaml) with `--config` option to run in silent mode. To read about the yaml arguments, look at the [silent-install documentation](https://github.com/joyent/triton-kubernetes/tree/master/docs/guide/silent-install-yaml.md).
--------------------------------------------------------------------------------
/cmd/destroy.go:
--------------------------------------------------------------------------------
1 | package cmd
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "os"
7 |
8 | "github.com/joyent/triton-kubernetes/destroy"
9 | "github.com/joyent/triton-kubernetes/util"
10 |
11 | "github.com/spf13/cobra"
12 | )
13 |
14 | // destroyCmd represents the destroy command
15 | var destroyCmd = &cobra.Command{
16 | Use: "destroy [manager or cluster or node]",
17 | Short: "Destroy cluster managers, kubernetes clusters or individual kubernetes cluster nodes.",
18 | Long: `Destroy allows you to destroy an existing cluster manager or a kubernetes cluster or an individual kubernetes cluster node.`,
19 | ValidArgs: []string{"manager", "cluster", "node"},
20 | Args: func(cmd *cobra.Command, args []string) error {
21 | if len(args) != 1 {
22 | return errors.New(`"triton-kubernetes destory" requires one argument`)
23 | }
24 |
25 | for _, validArg := range cmd.ValidArgs {
26 | if validArg == args[0] {
27 | return nil
28 | }
29 | }
30 |
31 | return fmt.Errorf(`invalid argument "%s" for "triton-kubernetes destory"`, args[0])
32 | },
33 | Run: destroyCmdFunc,
34 | }
35 |
36 | func destroyCmdFunc(cmd *cobra.Command, args []string) {
37 | remoteBackend, err := util.PromptForBackend()
38 | if err != nil {
39 | fmt.Println(err)
40 | os.Exit(1)
41 | }
42 |
43 | destroyType := args[0]
44 | switch destroyType {
45 | case "manager":
46 | fmt.Println("destroy manager called")
47 | err := destroy.DeleteManager(remoteBackend)
48 | if err != nil {
49 | fmt.Println(err)
50 | os.Exit(1)
51 | }
52 | case "cluster":
53 | fmt.Println("destroy cluster called")
54 | err := destroy.DeleteCluster(remoteBackend)
55 | if err != nil {
56 | fmt.Println(err)
57 | os.Exit(1)
58 | }
59 | case "node":
60 | fmt.Println("destroy node called")
61 | err := destroy.DeleteNode(remoteBackend)
62 | if err != nil {
63 | fmt.Println(err)
64 | os.Exit(1)
65 | }
66 | }
67 | }
68 |
69 | func init() {
70 | rootCmd.AddCommand(destroyCmd)
71 |
72 | // Here you will define your flags and configuration settings.
73 |
74 | // Cobra supports Persistent Flags which will work for this command
75 | // and all subcommands, e.g.:
76 | // destroyCmd.PersistentFlags().String("foo", "", "A help for foo")
77 |
78 | // Cobra supports local flags which will only run when this command
79 | // is called directly, e.g.:
80 | // destroyCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
81 |
82 | }
83 |
--------------------------------------------------------------------------------
/docs/guide/release-process.md:
--------------------------------------------------------------------------------
1 | # The Release Process
2 |
3 | The release process involves two steps: publishing binaries to Github releases (using Travis CI) and adding the new release to the homebrew registry.
4 |
5 | To start building and releasing, Travis CI must be configured for the triton-kubernetes repo. To let Travis CI upload Github releases, an encrypted Github API Key must be provided in the .travis.yml file.
6 |
7 | 1. Create a Github API Key and save the API Key locally (https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line)
8 | 2. Install the travis cli https://github.com/travis-ci/travis.rb#installation
9 | 3. Run `travis encrypt your-github-api-key-here`
10 | 4. Enter the encrypted text in the .travis.yml file at deploy.api-key.secure
11 |
12 | ## Creating a Release
13 |
14 | To create a release, you just need to create and push a git tag with the proper name (e.g. v0.0.1). Once the tag is set, Travis CI will begin the release process.
15 |
16 | 1. Using the git cli, checkout the commit you would like to release
17 | 2. Run `git tag v0.0.0`, where `v0.0.0` is the desired version number
18 | 3. Run `git push origin v0.0.0`
19 | 4. TravisCI will begin building the binaries using the commit that was tagged.
20 | 5. After Travis CI is done, check the Github releases page to verify that the binaries have been uploaded.
21 |
22 | ## Creating a Release Manually
23 |
24 | To build the binaries locally, you will need to install the following on your machine:
25 |
26 | - rpmbuild (For OS X, you can run `brew install rpm`)
27 | - [fpm](https://github.com/jordansissel/fpm)
28 |
29 | Then run `make build VERSION=0.0.0` where 0.0.0 is replaced with the desired version number.
30 |
31 | ## Updating the homebrew registry
32 |
33 | The github repository at: https://github.com/Homebrew/homebrew-core serves as the homebrew registry. To submit a pull request to that repo:
34 |
35 | 1. Fork the homebrew-core repository
36 | 2. In the Formula folder, add a brew formula for triton-kubernetes (triton-kubernetes.rb). Requires:
37 | - SHA256 checksum of the .tar.gz file from the Github release (use shasum command)
38 | - new version number
39 | - See example at https://github.com/cg50x/homebrew-test/blob/master/Formula/triton-kubernetes.rb
40 | 3. Submit a pull request to the homebrew-core repository. Once it is merged, you can verify that it works by running `brew update` and then `brew install triton-kubernetes`.
41 |
--------------------------------------------------------------------------------
/terraform/modules/aks-rancher-k8s/main.tf:
--------------------------------------------------------------------------------
1 | data "external" "rancher_cluster" {
2 | program = ["bash", "${path.module}/files/rancher_cluster_import.sh"]
3 |
4 | query = {
5 | rancher_api_url = var.rancher_api_url
6 | rancher_access_key = var.rancher_access_key
7 | rancher_secret_key = var.rancher_secret_key
8 | name = var.name
9 | }
10 | }
11 |
12 | provider "azurerm" {
13 | version = "=2.0.0"
14 | subscription_id = var.azure_subscription_id
15 | client_id = var.azure_client_id
16 | client_secret = var.azure_client_secret
17 | tenant_id = var.azure_tenant_id
18 | environment = var.azure_environment
19 | }
20 |
21 | resource "azurerm_resource_group" "resource_group" {
22 | name = "${var.name}-resource_group"
23 | location = var.azure_location
24 | }
25 |
26 | resource "azurerm_kubernetes_cluster" "primary" {
27 | name = var.name
28 | location = azurerm_resource_group.resource_group.location
29 | resource_group_name = azurerm_resource_group.resource_group.name
30 | dns_prefix = var.name
31 |
32 | kubernetes_version = var.k8s_version
33 |
34 | linux_profile {
35 | admin_username = var.azure_ssh_user
36 |
37 | ssh_key {
38 | key_data = file(var.azure_public_key_path)
39 | }
40 | }
41 |
42 | default_node_pool {
43 | name = "default"
44 | node_count = var.node_count
45 | vm_size = var.azure_size
46 | }
47 |
48 | service_principal {
49 | client_id = var.azure_client_id
50 | client_secret = var.azure_client_secret
51 | }
52 | }
53 |
54 | locals {
55 | kube_config_path = "./${var.name}_config"
56 | }
57 |
58 | # Bootstrap rancher in aks environment
59 | resource "null_resource" "import_rancher" {
60 | triggers = {
61 | cluster = azurerm_kubernetes_cluster.primary.id
62 | }
63 |
64 | provisioner "local-exec" {
65 | command = format(
66 | "cat << EOF > %s \n%s\nEOF",
67 | local.kube_config_path,
68 | azurerm_kubernetes_cluster.primary.kube_config_raw,
69 | )
70 | }
71 |
72 | provisioner "local-exec" {
73 | command = "curl --insecure -sfL ${var.rancher_api_url}/v3/import/${data.external.rancher_cluster.result.registration_token}.yaml | kubectl apply -f -"
74 |
75 | environment = {
76 | KUBECONFIG = local.kube_config_path
77 | }
78 | }
79 |
80 | provisioner "local-exec" {
81 | command = "rm ${local.kube_config_path}"
82 | }
83 | }
84 |
85 |
--------------------------------------------------------------------------------
/docs/guide/gcp/cluster.md:
--------------------------------------------------------------------------------
1 | ## Cluster
2 |
3 | A cluster is a group of physical (or virtual) computers that share resources to accomplish tasks as if they were a single system.
4 | Create cluster command allows to create dedicated nodes for the etcd, worker and control. Creating clusters require a cluster manager to be running already.
5 |
6 | To check if cluster manager exists, run the following:
7 |
8 | ```
9 | $ triton-kubernetes get manager
10 | ```
11 |
12 | To create cluster, run the following:
13 |
14 | ```
15 | $ triton-kubernetes create cluster
16 | ✔ Backend Provider: Local
17 | create cluster called
18 | ✔ Cluster Manager: dev-manager
19 | ✔ Cloud Provider: GCP
20 | ✔ Cluster Name: gcp-cluster
21 | ✔ Kubernetes Version: v1.17.6
22 | ✔ Kubernetes Network Provider: calico
23 | ✔ Private Registry: None
24 | ✔ k8s Registry: None
25 | ✔ Path to Google Cloud Platform Credentials File: ~/gcp.json
26 | ✔ GCP Compute Region: us-east1
27 | Create new node? Yes
28 | ✔ Host Type: worker
29 | ✔ Number of nodes to create: 3
30 | ✔ Hostname prefix: gcp-nodew
31 | ✔ GCP Instance Zone: us-east1-c
32 | ✔ GCP Machine Type: n1-standard-1
33 | ✔ GCP Image: ubuntu-1604-xenial-v20180424
34 | 3 nodes added: gcp-nodew-1, gcp-nodew-2, gcp-nodew-3
35 | Create new node? Yes
36 | ✔ Host Type: etcd
37 | Number of nodes to create? 3
38 | ✔ Hostname prefix: gcp-nodee
39 | ✔ GCP Instance Zone: us-east1-b
40 | ✔ GCP Machine Type: n1-standard-1
41 | ✔ GCP Image: ubuntu-1604-xenial-v20180424
42 | 3 nodes added: gcp-nodee-1, gcp-nodee-2, gcp-nodee-3
43 | Create new node? Yes
44 | ✔ Host Type: control
45 | Number of nodes to create? 3
46 | ✔ Hostname prefix: gcp-nodec
47 | ✔ GCP Instance Zone: us-east1-d
48 | ✔ GCP Machine Type: n1-standard-1
49 | ✔ GCP Image: ubuntu-1604-xenial-v20180424
50 | 3 nodes added: gcp-nodec-1, gcp-nodec-2, gcp-nodec-3
51 | Create new node? No
52 | Proceed? Yes
53 | ```
54 | To destroy cluster, run the following:
55 |
56 | ```
57 | $ triton-kubernetes destroy cluster
58 | ✔ Backend Provider: Local
59 | ✔ Cluster Manager: dev-manager
60 | ✔ Cluster: dev-cluster
61 | Destroy "dev-cluster"? Yes
62 | ```
63 |
64 | To get cluster, run the following:
65 |
66 | ```
67 | $ triton-kubernetes get cluster
68 | ```
69 |
70 |
71 | `triton-kubernetes` cli can take a configuration file (yaml) with `--config` option to run in silent mode. To read about the yaml arguments, look at the [silent-install documentation](https://github.com/joyent/triton-kubernetes/tree/master/docs/guide/silent-install-yaml.md).
72 |
--------------------------------------------------------------------------------
/docs/guide/gcp/account-setup.md:
--------------------------------------------------------------------------------
1 | ## Setting up a GCP account
2 |
3 | This document contains instructions for setting up an account on Google Cloud Platform, and preparing it for the creation of a Kubernetes environment using `triton-kubernetes`.
4 |
5 | 1. **Sign up for an GCP account** on https://cloud.google.com/free/.
6 |
7 | You need to enter a valid credit card to complete the Sign Up process. After signing up, you will get a “Free Trial” subscription. With this subscription, you are restricted to a limited number of resources. Most importantly, you have a quota of 8 vCPU’s per zone.
8 |
9 | 2. **Log in to GCP Console**: https://console.cloud.google.com.
10 |
11 | 3. **Find the project ID**: After logging into the GCP Console for the first time, you should already have a project called “My First Project”. On the Dashboard page, find the “Project info” mini-window. You should see the project name, project ID and project number for “My First Project”. Copy down the value for project ID.
12 |
13 | 4. **Enable the Compute Engine service**: After you log in to the GCP Console as a new user, you must open the “Compute Engine” module for the first time in order for it to be configured for use. On GCP Console home page, select the “Compute > Compute Engine” menu. You will notice that the system is being prepared, and that the Create button for VM Instances is still disabled. Once the Compute Engine service is ready for use, the Create button is automatically enabled.
14 |
15 | 5. **Create a GCP Account file**: In order for Terraform to access the GCP Compute Engine service via API, it needs to read a JSON file that contains your service account key (i.e. your credential). To create such a JSON file:
16 |
17 | 1. On the GCP Console home page, select the “APIs & services” menu.
18 | 2. Select the “Credentials” menu.
19 | 3. Select the “Create credentials” dropdown list, and pick “service account key”.
20 | 4. In the “Service account” field, select the value “Compute Engine default service account”. Select “JSON” for Key type. Then, click the “Create” button.
21 | 5. A file called `My First Project-xxxxxxxx.json`, which contains your service account key, is automatically downloaded to your laptop. After the file is downloaded, copy it to your `~/.ssh` folder.
22 | 6. Rename the file by removing the space characters, so that the filename will look like `MyFirstProject-xxxxxxxx.json`. (Note: This is because terraform will have problem reading this file if its filename contains spaces.)
--------------------------------------------------------------------------------
/terraform/modules/aws-rancher-k8s-host/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | version = "~> 2.0"
3 | access_key = var.aws_access_key
4 | secret_key = var.aws_secret_key
5 | region = var.aws_region
6 | }
7 |
8 | locals {
9 | rancher_node_role = element(keys(var.rancher_host_labels), 0)
10 | }
11 |
12 | data "template_file" "install_rancher_agent" {
13 | template = file("${path.module}/files/install_rancher_agent.sh.tpl")
14 |
15 | vars = {
16 | hostname = var.hostname
17 | docker_engine_install_url = var.docker_engine_install_url
18 | rancher_api_url = var.rancher_api_url
19 | rancher_cluster_registration_token = var.rancher_cluster_registration_token
20 | rancher_cluster_ca_checksum = var.rancher_cluster_ca_checksum
21 | rancher_node_role = local.rancher_node_role == "control" ? "controlplane" : local.rancher_node_role
22 | rancher_agent_image = var.rancher_agent_image
23 | rancher_registry = var.rancher_registry
24 | rancher_registry_username = var.rancher_registry_username
25 | rancher_registry_password = var.rancher_registry_password
26 | volume_device_name = var.ebs_volume_device_name
27 | volume_mount_path = var.ebs_volume_mount_path
28 | }
29 | }
30 |
31 | resource "aws_instance" "host" {
32 | ami = var.aws_ami_id
33 | instance_type = var.aws_instance_type
34 | subnet_id = var.aws_subnet_id
35 | vpc_security_group_ids = [var.aws_security_group_id]
36 | key_name = var.aws_key_name
37 |
38 | tags = {
39 | Name = var.hostname
40 | }
41 |
42 | user_data = data.template_file.install_rancher_agent.rendered
43 | }
44 |
45 | resource "aws_ebs_volume" "host_volume" {
46 | count = var.ebs_volume_device_name != "" ? 1 : 0
47 |
48 | availability_zone = aws_instance.host.availability_zone
49 | type = var.ebs_volume_type
50 | size = var.ebs_volume_size
51 |
52 | tags = {
53 | Name = "${var.hostname}-volume"
54 | }
55 | }
56 |
57 | resource "aws_volume_attachment" "host_volume_attachment" {
58 | count = var.ebs_volume_device_name != "" ? 1 : 0
59 |
60 | # Forcing detach to prevent VolumeInUse error
61 | force_detach = true
62 |
63 | device_name = var.ebs_volume_device_name
64 | volume_id = aws_ebs_volume.host_volume[0].id
65 | instance_id = aws_instance.host.id
66 | }
67 |
68 |
--------------------------------------------------------------------------------
/terraform/modules/gcp-rancher/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {
2 | description = "Human readable name used as prefix to generated names."
3 | }
4 |
5 | variable "rancher_admin_password" {
6 | description = "The Rancher admin password"
7 | }
8 |
9 | variable "docker_engine_install_url" {
10 | default = "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh"
11 | description = "The URL to the shell script to install the docker engine."
12 | }
13 |
14 | variable "rancher_server_image" {
15 | default = "rancher/rancher:v2.4.11"
16 | description = "The Rancher Server image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
17 | }
18 |
19 | variable "rancher_agent_image" {
20 | default = "rancher/rancher-agent:v2.4.11"
21 | description = "The Rancher Agent image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
22 | }
23 |
24 | variable "rancher_registry" {
25 | default = ""
26 | description = "The docker registry to use for rancher server and agent images"
27 | }
28 |
29 | variable "rancher_registry_username" {
30 | default = ""
31 | description = "The username to login as."
32 | }
33 |
34 | variable "rancher_registry_password" {
35 | default = ""
36 | description = "The password to use."
37 | }
38 |
39 | variable "gcp_path_to_credentials" {
40 | description = "Location of GCP JSON credentials file."
41 | }
42 |
43 | variable "gcp_compute_region" {
44 | description = "GCP region to host your network"
45 | }
46 |
47 | variable "gcp_project_id" {
48 | description = "GCP project ID that will be running the instances and managing the network"
49 | }
50 |
51 | variable "gcp_machine_type" {
52 | default = "n1-standard-1"
53 | description = "GCP machine type to launch the instance with"
54 | }
55 |
56 | variable "gcp_instance_zone" {
57 | description = "Zone to deploy GCP machine in"
58 | }
59 |
60 | variable "gcp_image" {
61 | description = "GCP image to be used for instance"
62 | default = "ubuntu-1604-xenial-v20171121a"
63 | }
64 |
65 | variable "gcp_ssh_user" {
66 | default = "ubuntu"
67 | description = "The ssh user to use."
68 | }
69 |
70 | variable "gcp_public_key_path" {
71 | description = "Path to a public key."
72 | default = "~/.ssh/id_rsa.pub"
73 | }
74 |
75 | variable "gcp_private_key_path" {
76 | description = "Path to a private key."
77 | default = "~/.ssh/id_rsa"
78 | }
79 |
80 |
--------------------------------------------------------------------------------
/docs/guide/vSphere/cluster.md:
--------------------------------------------------------------------------------
1 | ## Cluster
2 |
3 | A cluster is a group of physical (or virtual) computers that share resources to accomplish tasks as if they were a single system.
4 | The `create cluster` command allows you to create dedicated nodes for the etcd, worker and control. Creating clusters require a cluster manager to be running already.
5 |
6 | To check if cluster manager exists, run the following:
7 |
8 | ```
9 | $ triton-kubernetes get manager
10 | ```
11 |
12 | To create cluster, run the following:
13 |
14 | ```
15 | $ triton-kubernetes create cluster
16 | ✔ Backend Provider: Local
17 | create cluster called
18 | ✔ Cluster Manager: dev-manager
19 | ✔ Cloud Provider: vSphere
20 | ✔ Cluster Name: dev-cluster
21 | ✔ Kubernetes Version: v1.17.6
22 | ✔ Kubernetes Network Provider: calico
23 | ✔ Private Registry: None
24 | ✔ k8s Registry: None
25 | ✔ vSphere User: [changeme]
26 | ✔ vSphere Password: ****
27 | ✔ vSphere Server: [changeme]
28 | ✔ vSphere Datacenter Name: [changeme]
29 | ✔ vSphere Datastore Name: [changeme]
30 | ✔ vSphere Resource Pool Name: [changeme]
31 | ✔ vSphere Network Name: [changeme]
32 | Create new node? Yes
33 | ✔ Host Type: worker
34 | ✔ Number of nodes to create: 1
35 | ✔ Hostname prefix: worker-host
36 | ✔ VM Template Name: vm-worker-temp
37 | ✔ SSH User: [changeme]
38 | ✔ Private Key Path: ~/.ssh/id_rsa
39 | 1 node added: worker-host-1
40 | Create new node? Yes
41 | ✔ Host Type: etcd
42 | Number of nodes to create? 1
43 | ✔ Hostname prefix: etcd-host
44 | ✔ VM Template Name: vm-etcd-temp
45 | ✔ SSH User: [changeme]
46 | ✔ Private Key Path: ~/.ssh/id_rsa
47 | 1 node added: etcd-host-1
48 | Create new node? Yes
49 | ✔ Host Type: control
50 | Number of nodes to create? 1
51 | ✔ Hostname prefix: control-host
52 | ✔ VM Template Name: vm-control-temp
53 | ✔ SSH User: [changeme]
54 | ✔ Private Key Path: ~/.ssh/id_rsa
55 | 1 node added: control-host-1
56 | Create new node? No
57 | Proceed? Yes
58 | ```
59 | To destroy cluster , run the following:
60 |
61 | ```
62 | $ triton-kubernetes destroy cluster
63 | ✔ Backend Provider: Local
64 | ✔ Cluster Manager: dev-manager
65 | ✔ Cluster: dev-cluster
66 | Destroy "dev-cluster"? Yes
67 | ```
68 |
69 | To get cluster, run the following:
70 |
71 | ```
72 | $ triton-kubernetes get cluster
73 | ```
74 |
75 |
76 | `triton-kubernetes` cli can take a configuration file (yaml) with `--config` option to run in silent mode. To read more about the yaml arguments, look at the [silent-install documentation](https://github.com/joyent/triton-kubernetes/tree/master/docs/guide/silent-install-yaml.md).
77 |
--------------------------------------------------------------------------------
/terraform/modules/azure-rancher-k8s/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {
2 | description = "Human readable name used as prefix to generated names."
3 | }
4 |
5 | variable "rancher_api_url" {
6 | description = ""
7 | }
8 |
9 | variable "rancher_access_key" {
10 | description = ""
11 | }
12 |
13 | variable "rancher_secret_key" {
14 | description = ""
15 | }
16 |
17 | variable "k8s_version" {
18 | default = "v1.18.12-rancher1-1"
19 | }
20 |
21 | variable "k8s_network_provider" {
22 | default = "flannel"
23 | }
24 |
25 | variable "rancher_registry" {
26 | default = ""
27 | description = "The docker registry to use for Rancher images"
28 | }
29 |
30 | variable "rancher_registry_username" {
31 | default = ""
32 | description = "The username to login as."
33 | }
34 |
35 | variable "rancher_registry_password" {
36 | default = ""
37 | description = "The password to use."
38 | }
39 |
40 | variable "k8s_registry" {
41 | default = ""
42 | description = "The docker registry to use for Kubernetes images"
43 | }
44 |
45 | variable "k8s_registry_username" {
46 | default = ""
47 | description = "The username to login as."
48 | }
49 |
50 | variable "k8s_registry_password" {
51 | default = ""
52 | description = "The password to use."
53 | }
54 |
55 | variable "azure_subscription_id" {
56 | default = ""
57 | }
58 |
59 | variable "azure_client_id" {
60 | default = ""
61 | }
62 |
63 | variable "azure_client_secret" {
64 | default = ""
65 | }
66 |
67 | variable "azure_tenant_id" {
68 | default = ""
69 | }
70 |
71 | variable "azure_environment" {
72 | default = "public"
73 | }
74 |
75 | variable "azure_location" {
76 | default = "West US 2"
77 | }
78 |
79 | variable "azure_image_publisher" {
80 | default = "Canonical"
81 | }
82 |
83 | variable "azure_image_offer" {
84 | default = "UbuntuServer"
85 | }
86 |
87 | variable "azure_image_sku" {
88 | default = "16.04-LTS"
89 | }
90 |
91 | variable "azure_image_version" {
92 | default = "latest"
93 | }
94 |
95 | variable "azure_virtual_network_name" {
96 | default = "k8s-network"
97 | }
98 |
99 | variable "azure_virtual_network_address_space" {
100 | default = "10.0.0.0/16"
101 | }
102 |
103 | variable "azure_subnet_name" {
104 | default = "k8s-subnet"
105 | }
106 |
107 | variable "azure_subnet_address_prefix" {
108 | default = "10.0.2.0/24"
109 | }
110 |
111 | variable "azurerm_network_security_group_name" {
112 | default = "k8s-firewall"
113 | }
114 |
115 |
--------------------------------------------------------------------------------
/terraform/modules/azure-rancher-k8s-host/variables.tf:
--------------------------------------------------------------------------------
1 | variable "hostname" {
2 | description = ""
3 | }
4 |
5 | variable "rancher_api_url" {
6 | description = ""
7 | }
8 |
9 | variable "rancher_cluster_registration_token" {
10 | }
11 |
12 | variable "rancher_cluster_ca_checksum" {
13 | }
14 |
15 | variable "rancher_host_labels" {
16 | type = map(string)
17 | description = "A map of key/value pairs that get passed to the rancher agent on the host."
18 | }
19 |
20 | variable "rancher_agent_image" {
21 | default = "rancher/rancher-agent:v2.4.11"
22 | description = "The Rancher Agent image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
23 | }
24 |
25 | variable "rancher_registry" {
26 | default = ""
27 | description = "The docker registry to use for rancher images"
28 | }
29 |
30 | variable "rancher_registry_username" {
31 | default = ""
32 | description = "The username to login as."
33 | }
34 |
35 | variable "rancher_registry_password" {
36 | default = ""
37 | description = "The password to use."
38 | }
39 |
40 | variable "docker_engine_install_url" {
41 | default = "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh"
42 | description = "The URL to the shell script to install the docker engine."
43 | }
44 |
45 | variable "azure_subscription_id" {
46 | }
47 |
48 | variable "azure_client_id" {
49 | }
50 |
51 | variable "azure_client_secret" {
52 | }
53 |
54 | variable "azure_tenant_id" {
55 | }
56 |
57 | variable "azure_environment" {
58 | default = "public"
59 | }
60 |
61 | variable "azure_location" {
62 | }
63 |
64 | variable "azure_resource_group_name" {
65 | }
66 |
67 | variable "azure_network_security_group_id" {
68 | }
69 |
70 | variable "azure_subnet_id" {
71 | }
72 |
73 | variable "azure_size" {
74 | default = "Standard_A0"
75 | }
76 |
77 | variable "azure_image_publisher" {
78 | default = "Canonical"
79 | }
80 |
81 | variable "azure_image_offer" {
82 | default = "UbuntuServer"
83 | }
84 |
85 | variable "azure_image_sku" {
86 | default = "16.04-LTS"
87 | }
88 |
89 | variable "azure_image_version" {
90 | default = "latest"
91 | }
92 |
93 | variable "azure_ssh_user" {
94 | default = "root"
95 | }
96 |
97 | variable "azure_public_key_path" {
98 | default = "~/.ssh/id_rsa.pub"
99 | }
100 |
101 | variable "azure_disk_mount_path" {
102 | default = ""
103 | }
104 |
105 | variable "azure_disk_size" {
106 | default = ""
107 | }
108 |
109 |
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {
2 | description = "Human readable name used as prefix to generated names."
3 | }
4 |
5 | variable "rancher_admin_password" {
6 | description = "The Rancher admin password"
7 | }
8 |
9 | variable "docker_engine_install_url" {
10 | default = "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh"
11 | description = "The URL to the shell script to install the docker engine."
12 | }
13 |
14 | variable "rancher_server_image" {
15 | default = "rancher/rancher:v2.4.11"
16 | description = "The Rancher Server image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
17 | }
18 |
19 | variable "rancher_agent_image" {
20 | default = "rancher/rancher-agent:v2.4.11"
21 | description = "The Rancher Agent image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
22 | }
23 |
24 | variable "rancher_registry" {
25 | default = ""
26 | description = "The docker registry to use for rancher server and agent images"
27 | }
28 |
29 | variable "rancher_registry_username" {
30 | default = ""
31 | description = "The username to login as."
32 | }
33 |
34 | variable "rancher_registry_password" {
35 | default = ""
36 | description = "The password to use."
37 | }
38 |
39 | variable "triton_account" {
40 | description = "The Triton account name, usually the username of your root user."
41 | }
42 |
43 | variable "triton_key_path" {
44 | description = "The path to a private key that is authorized to communicate with the Triton API."
45 | }
46 |
47 | variable "triton_key_id" {
48 | description = "The md5 fingerprint of the key at triton_key_path. Obtained by running `ssh-keygen -E md5 -lf ~/path/to.key`"
49 | }
50 |
51 | variable "triton_url" {
52 | description = "The CloudAPI endpoint URL. e.g. https://us-west-1.api.joyent.com"
53 | }
54 |
55 | variable "triton_network_names" {
56 | type = list
57 | description = "List of Triton network names that the node(s) should be attached to."
58 | }
59 |
60 | variable "triton_image_name" {
61 | description = "The name of the Triton image to use."
62 | }
63 |
64 | variable "triton_image_version" {
65 | description = "The version/tag of the Triton image to use."
66 | }
67 |
68 | variable "triton_ssh_user" {
69 | default = "root"
70 | description = "The ssh user to use."
71 | }
72 |
73 | variable "master_triton_machine_package" {
74 | description = "The Triton machine package to use for Rancher master node(s). e.g. k4-highcpu-kvm-1.75G"
75 | }
76 |
--------------------------------------------------------------------------------
/backend/mocks/Backend.go:
--------------------------------------------------------------------------------
1 | package mocks
2 |
3 | import mock "github.com/stretchr/testify/mock"
4 | import state "github.com/joyent/triton-kubernetes/state"
5 |
6 | // Backend is an autogenerated mock type for the Backend type
7 | type Backend struct {
8 | mock.Mock
9 | }
10 |
11 | // DeleteState provides a mock function with given fields: name
12 | func (_m *Backend) DeleteState(name string) error {
13 | ret := _m.Called(name)
14 |
15 | var r0 error
16 | if rf, ok := ret.Get(0).(func(string) error); ok {
17 | r0 = rf(name)
18 | } else {
19 | r0 = ret.Error(0)
20 | }
21 |
22 | return r0
23 | }
24 |
25 | // PersistState provides a mock function with given fields: _a0
26 | func (_m *Backend) PersistState(_a0 state.State) error {
27 | ret := _m.Called(_a0)
28 |
29 | var r0 error
30 | if rf, ok := ret.Get(0).(func(state.State) error); ok {
31 | r0 = rf(_a0)
32 | } else {
33 | r0 = ret.Error(0)
34 | }
35 |
36 | return r0
37 | }
38 |
39 | // State provides a mock function with given fields: name
40 | func (_m *Backend) State(name string) (state.State, error) {
41 | ret := _m.Called(name)
42 |
43 | var r0 state.State
44 | if rf, ok := ret.Get(0).(func(string) state.State); ok {
45 | r0 = rf(name)
46 | } else {
47 | r0 = ret.Get(0).(state.State)
48 | }
49 |
50 | var r1 error
51 | if rf, ok := ret.Get(1).(func(string) error); ok {
52 | r1 = rf(name)
53 | } else {
54 | r1 = ret.Error(1)
55 | }
56 |
57 | return r0, r1
58 | }
59 |
60 | // StateTerraformConfig provides a mock function with given fields: name
61 | func (_m *Backend) StateTerraformConfig(name string) (string, interface{}) {
62 | ret := _m.Called(name)
63 |
64 | var r0 string
65 | if rf, ok := ret.Get(0).(func(string) string); ok {
66 | r0 = rf(name)
67 | } else {
68 | r0 = ret.Get(0).(string)
69 | }
70 |
71 | var r1 interface{}
72 | if rf, ok := ret.Get(1).(func(string) interface{}); ok {
73 | r1 = rf(name)
74 | } else {
75 | if ret.Get(1) != nil {
76 | r1 = ret.Get(1).(interface{})
77 | }
78 | }
79 |
80 | return r0, r1
81 | }
82 |
83 | // States provides a mock function with given fields:
84 | func (_m *Backend) States() ([]string, error) {
85 | ret := _m.Called()
86 |
87 | var r0 []string
88 | if rf, ok := ret.Get(0).(func() []string); ok {
89 | r0 = rf()
90 | } else {
91 | if ret.Get(0) != nil {
92 | r0 = ret.Get(0).([]string)
93 | }
94 | }
95 |
96 | var r1 error
97 | if rf, ok := ret.Get(1).(func() error); ok {
98 | r1 = rf()
99 | } else {
100 | r1 = ret.Error(1)
101 | }
102 |
103 | return r0, r1
104 | }
105 |
--------------------------------------------------------------------------------
/terraform/modules/gcp-rancher-k8s-host/main.tf:
--------------------------------------------------------------------------------
1 | provider "google" {
2 | credentials = file(var.gcp_path_to_credentials)
3 | project = var.gcp_project_id
4 | region = var.gcp_compute_region
5 | }
6 |
7 | locals {
8 | rancher_node_role = element(keys(var.rancher_host_labels), 0)
9 | }
10 |
11 | data "template_file" "install_rancher_agent" {
12 | template = file("${path.module}/files/install_rancher_agent.sh.tpl")
13 |
14 | vars = {
15 | hostname = var.hostname
16 | docker_engine_install_url = var.docker_engine_install_url
17 | rancher_api_url = var.rancher_api_url
18 | rancher_cluster_registration_token = var.rancher_cluster_registration_token
19 | rancher_cluster_ca_checksum = var.rancher_cluster_ca_checksum
20 | rancher_node_role = local.rancher_node_role == "control" ? "controlplane" : local.rancher_node_role
21 | rancher_agent_image = var.rancher_agent_image
22 | rancher_registry = var.rancher_registry
23 | rancher_registry_username = var.rancher_registry_username
24 | rancher_registry_password = var.rancher_registry_password
25 | disk_mount_path = var.gcp_disk_mount_path
26 | }
27 | }
28 |
29 | resource "google_compute_instance" "host" {
30 | name = var.hostname
31 | machine_type = var.gcp_machine_type
32 | zone = var.gcp_instance_zone
33 | project = var.gcp_project_id
34 |
35 | tags = [var.gcp_compute_firewall_host_tag]
36 |
37 | boot_disk {
38 | initialize_params {
39 | image = var.gcp_image
40 | }
41 | }
42 |
43 | # There's now way to specify for 0 attached_disk blocks
44 | # We need to wait for Terraform for_each support https://github.com/hashicorp/terraform/issues/7034
45 | # This way we'll only add an attached_disk block when there are disks to attach
46 | # attached_disk {
47 | # source = "${element(concat(google_compute_disk.host_volume.*.self_link, list("")), 0)}"
48 | # }
49 |
50 | network_interface {
51 | network = var.gcp_compute_network_name
52 |
53 | access_config {
54 | // Ephemeral IP
55 | }
56 | }
57 | service_account {
58 | scopes = ["https://www.googleapis.com/auth/cloud-platform"]
59 | }
60 | metadata_startup_script = data.template_file.install_rancher_agent.rendered
61 | }
62 |
63 | resource "google_compute_disk" "host_volume" {
64 | count = var.gcp_disk_type == "" ? 0 : 1
65 |
66 | type = var.gcp_disk_type
67 | name = "${var.hostname}-volume"
68 | zone = var.gcp_instance_zone
69 | size = var.gcp_disk_size
70 | }
71 |
72 |
--------------------------------------------------------------------------------
/terraform/modules/k8s-backup-s3/main.tf:
--------------------------------------------------------------------------------
1 | resource "null_resource" "setup_ark_backup" {
2 | provisioner "local-exec" {
3 | command = "curl -LO https://github.com/heptio/ark/releases/download/v0.7.1/ark-v0.7.1-linux-arm64.tar.gz"
4 | }
5 |
6 | provisioner "local-exec" {
7 | command = "tar xvf ark-v0.7.1-linux-arm64.tar.gz"
8 | }
9 |
10 | provisioner "local-exec" {
11 | command = "curl -LO https://github.com/heptio/ark/archive/v0.7.1.tar.gz"
12 | }
13 |
14 | provisioner "local-exec" {
15 | command = "tar xvf v0.7.1.tar.gz"
16 | }
17 |
18 | provisioner "local-exec" {
19 | # Get kubernetes config yaml from Rancher, write it to disk
20 | command = < kubeconfig.yaml
28 |
29 | EOT
30 |
31 | }
32 |
33 | provisioner "local-exec" {
34 | command = < credentials-ark </${var.aws_s3_bucket}/g' ark-0.7.1/examples/aws/00-ark-config.yaml
60 | sed -i '.original' 's//${var.aws_region}/g' ark-0.7.1/examples/aws/00-ark-config.yaml
61 |
62 | EOT
63 |
64 | }
65 |
66 | provisioner "local-exec" {
67 | command = "kubectl apply -f ark-0.7.1/examples/aws/00-ark-config.yaml --kubeconfig=kubeconfig.yaml"
68 | }
69 |
70 | provisioner "local-exec" {
71 | command = "kubectl apply -f ark-0.7.1/examples/common/10-deployment.yaml --kubeconfig=kubeconfig.yaml"
72 | }
73 |
74 | provisioner "local-exec" {
75 | command = "rm -rf ark ark-* credentials-ark kubeconfig.yaml v0.7.1.tar.gz"
76 | }
77 | }
78 |
79 |
--------------------------------------------------------------------------------
/terraform/modules/aws-rancher/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {
2 | description = "Human readable name used as prefix to generated names."
3 | }
4 |
5 | variable "rancher_admin_password" {
6 | description = "The Rancher admin password"
7 | }
8 |
9 | variable "docker_engine_install_url" {
10 | default = "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh"
11 | description = "The URL to the shell script to install the docker engine."
12 | }
13 |
14 | variable "rancher_server_image" {
15 | default = "rancher/rancher:v2.4.11"
16 | description = "The Rancher Server image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
17 | }
18 |
19 | variable "rancher_agent_image" {
20 | default = "rancher/rancher-agent:v2.4.11"
21 | description = "The Rancher Agent image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
22 | }
23 |
24 | variable "rancher_registry" {
25 | default = ""
26 | description = "The docker registry to use for rancher server and agent images"
27 | }
28 |
29 | variable "rancher_registry_username" {
30 | default = ""
31 | description = "The username to login as."
32 | }
33 |
34 | variable "rancher_registry_password" {
35 | default = ""
36 | description = "The password to use."
37 | }
38 |
39 | variable "aws_access_key" {
40 | description = "AWS access key"
41 | }
42 |
43 | variable "aws_secret_key" {
44 | description = "AWS secret access key"
45 | }
46 |
47 | variable "aws_region" {
48 | description = "AWS region to host your network"
49 | }
50 |
51 | variable "aws_vpc_cidr" {
52 | description = "CIDR for VPC"
53 | default = "10.0.0.0/16"
54 | }
55 |
56 | variable "aws_subnet_cidr" {
57 | description = "CIDR for subnet"
58 | default = "10.0.2.0/24"
59 | }
60 |
61 | variable "aws_ami_id" {
62 | description = "Base AMI to launch the instances with"
63 | }
64 |
65 | variable "aws_instance_type" {
66 | default = "t2.micro"
67 | description = "The AWS instance type to use for Kubernetes compute node(s). Defaults to t2.micro."
68 | }
69 |
70 | variable "aws_key_name" {
71 | description = "The AWS key name to use to deploy the instance."
72 | }
73 |
74 | variable "aws_public_key_path" {
75 | description = "Path to a public key. If set, a key_pair will be made in AWS named aws_key_name"
76 | default = "~/.ssh/id_rsa.pub"
77 | }
78 |
79 | variable "aws_private_key_path" {
80 | description = "Path to a private key."
81 | default = "~/.ssh/id_rsa"
82 | }
83 |
84 | variable "aws_ssh_user" {
85 | default = "ubuntu"
86 | description = "The ssh user to use."
87 | }
88 |
89 |
--------------------------------------------------------------------------------
/terraform/modules/gcp-rancher-k8s-host/variables.tf:
--------------------------------------------------------------------------------
1 | variable "hostname" {
2 | description = ""
3 | }
4 |
5 | variable "rancher_api_url" {
6 | description = ""
7 | }
8 |
9 | variable "rancher_cluster_registration_token" {
10 | }
11 |
12 | variable "rancher_cluster_ca_checksum" {
13 | }
14 |
15 | variable "rancher_host_labels" {
16 | type = map(string)
17 | description = "A map of key/value pairs that get passed to the rancher agent on the host."
18 | }
19 |
20 | variable "rancher_agent_image" {
21 | default = "rancher/rancher-agent:v2.4.11"
22 | description = "The Rancher Agent image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
23 | }
24 |
25 | variable "rancher_registry" {
26 | default = ""
27 | description = "The docker registry to use for rancher images"
28 | }
29 |
30 | variable "rancher_registry_username" {
31 | default = ""
32 | description = "The username to login as."
33 | }
34 |
35 | variable "rancher_registry_password" {
36 | default = ""
37 | description = "The password to use."
38 | }
39 |
40 | variable "docker_engine_install_url" {
41 | default = "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh"
42 | description = "The URL to the shell script to install the docker engine."
43 | }
44 |
45 | variable "gcp_path_to_credentials" {
46 | description = "Location of GCP JSON credentials file."
47 | }
48 |
49 | variable "gcp_compute_region" {
50 | description = "GCP region to host your network"
51 | }
52 |
53 | variable "gcp_project_id" {
54 | description = "GCP project ID that will be running the instances and managing the network"
55 | }
56 |
57 | variable "gcp_machine_type" {
58 | default = "n1-standard-1"
59 | description = "GCP machine type to launch the instance with"
60 | }
61 |
62 | variable "gcp_instance_zone" {
63 | description = "Zone to deploy GCP machine in"
64 | }
65 |
66 | variable "gcp_image" {
67 | description = "GCP image to be used for instance"
68 | default = "ubuntu-1604-xenial-v20171121a"
69 | }
70 |
71 | variable "gcp_compute_network_name" {
72 | description = "Network to deploy GCP machine in"
73 | }
74 |
75 | variable "gcp_compute_firewall_host_tag" {
76 | description = "Tag that should be applied to nodes so the firewall source rules can be applied"
77 | }
78 |
79 | variable "gcp_disk_type" {
80 | default = ""
81 | description = "The disk type which can be either 'pd-ssd' for SSD or 'pd-standard' for Standard"
82 | }
83 |
84 | variable "gcp_disk_size" {
85 | default = ""
86 | description = "The disk size"
87 | }
88 |
89 | variable "gcp_disk_mount_path" {
90 | default = ""
91 | description = "The mount path"
92 | }
93 |
94 |
--------------------------------------------------------------------------------
/destroy/manager.go:
--------------------------------------------------------------------------------
1 | package destroy
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "sort"
7 |
8 | "github.com/joyent/triton-kubernetes/backend"
9 | "github.com/joyent/triton-kubernetes/shell"
10 | "github.com/joyent/triton-kubernetes/util"
11 |
12 | "github.com/manifoldco/promptui"
13 | "github.com/spf13/viper"
14 | )
15 |
16 | func DeleteManager(remoteBackend backend.Backend) error {
17 | nonInteractiveMode := viper.GetBool("non-interactive")
18 | clusterManagers, err := remoteBackend.States()
19 | if err != nil {
20 | return err
21 | }
22 |
23 | if len(clusterManagers) == 0 {
24 | return fmt.Errorf("No cluster managers, please create a cluster manager before creating a kubernetes cluster.")
25 | }
26 |
27 | selectedClusterManager := ""
28 | if viper.IsSet("cluster_manager") {
29 | selectedClusterManager = viper.GetString("cluster_manager")
30 | } else if nonInteractiveMode {
31 | return errors.New("cluster_manager must be specified")
32 | } else {
33 | sort.Strings(clusterManagers)
34 | prompt := promptui.Select{
35 | Label: "Cluster Manager",
36 | Items: clusterManagers,
37 | Templates: &promptui.SelectTemplates{
38 | Label: "{{ . }}?",
39 | Active: fmt.Sprintf(`%s {{ . | underline }}`, promptui.IconSelect),
40 | Inactive: ` {{ . }}`,
41 | Selected: fmt.Sprintf(`{{ "%s" | green }} {{ "Cluster Manager:" | bold}} {{ . }}`, promptui.IconGood),
42 | },
43 | }
44 |
45 | _, value, err := prompt.Run()
46 | if err != nil {
47 | return err
48 | }
49 |
50 | selectedClusterManager = value
51 | }
52 |
53 | // Verify selected cluster manager exists
54 | found := false
55 | for _, clusterManager := range clusterManagers {
56 | if selectedClusterManager == clusterManager {
57 | found = true
58 | break
59 | }
60 | }
61 | if !found {
62 | return fmt.Errorf("Selected cluster manager '%s' does not exist.", selectedClusterManager)
63 | }
64 |
65 | state, err := remoteBackend.State(selectedClusterManager)
66 | if err != nil {
67 | return err
68 | }
69 |
70 | if !nonInteractiveMode {
71 | // Confirmation
72 | label := fmt.Sprintf("Are you sure you want to destroy %q", selectedClusterManager)
73 | selected := fmt.Sprintf("Destroy %q", selectedClusterManager)
74 | confirmed, err := util.PromptForConfirmation(label, selected)
75 | if err != nil {
76 | return err
77 | }
78 | if !confirmed {
79 | fmt.Println("Destroy manager canceled.")
80 | return nil
81 | }
82 | }
83 |
84 | // Run Terraform destroy
85 | err = shell.RunTerraformDestroyWithState(state, []string{})
86 | if err != nil {
87 | return err
88 | }
89 |
90 | // After terraform succeeds, delete remote state
91 | err = remoteBackend.DeleteState(selectedClusterManager)
92 | if err != nil {
93 | return err
94 | }
95 |
96 | return nil
97 | }
98 |
--------------------------------------------------------------------------------
/docs/guide/triton/cluster.md:
--------------------------------------------------------------------------------
1 | ## Cluster
2 |
3 | A cluster is a group of physical (or virtual) computers that share resources to accomplish tasks as if they were a single system.
4 | The `create cluster` command allows you to create dedicated nodes for the etcd, worker and control. Creating clusters require a cluster manager to be running already.
5 |
6 | To check if cluster manager exists, run the following:
7 |
8 | ```
9 | $ triton-kubernetes get manager
10 | ```
11 |
12 | To create cluster, run the following:
13 |
14 | ```
15 | $ triton-kubernetes create cluster
16 | ✔ Backend Provider: Local
17 | create cluster called
18 | ✔ Cluster Manager: dev-manager
19 | ✔ Cloud Provider: Triton
20 | ✔ Cluster Name: dev-cluster
21 | ✔ Kubernetes Version: v1.17.6
22 | ✔ Kubernetes Network Provider: calico
23 | ✔ Private Registry: None
24 | ✔ k8s Registry: None
25 | ✔ Triton Account Name: [changeme]
26 | ✔ Triton Key Path: ~/.ssh/id_rsa
27 | ✔ Triton URL: https://us-east-1.api.joyent.com
28 | Create new node? Yes
29 | ✔ Host Type: etcd
30 | Number of nodes to create? 3
31 | ✔ Hostname prefix: dev-etcd
32 | ✔ Triton Network Attached: Joyent-SDC-Public
33 | Attach another? No
34 | ✔ Triton Image: ubuntu-certified-16.04@20180222
35 | ✔ Triton SSH User: ubuntu
36 | ✔ Triton Machine Package: k4-highcpu-kvm-1.75G
37 | 3 nodes added: dev-etcd-1, dev-etcd-2, dev-etcd-3
38 | Create new node? Yes
39 | ✔ Host Type: worker
40 | ✔ Number of nodes to create: 3
41 | ✔ Hostname prefix: dev-worker
42 | ✔ Triton Network Attached: Joyent-SDC-Public
43 | Attach another? No
44 | ✔ Triton Image: ubuntu-certified-16.04@20180222
45 | ✔ Triton SSH User: ubuntu
46 | ✔ Triton Machine Package: k4-highcpu-kvm-1.75G
47 | 3 nodes added: dev-worker-1, dev-worker-2, dev-worker-3
48 | Create new node? Yes
49 | ✔ Host Type: control
50 | Number of nodes to create? 3
51 | ✔ Hostname prefix: dev-control
52 | ✔ Triton Network Attached: Joyent-SDC-Public
53 | Attach another? No
54 | ✔ Triton Image: ubuntu-certified-16.04@20180222
55 | ✔ Triton SSH User: ubuntu
56 | ✔ Triton Machine Package: k4-highcpu-kvm-1.75G
57 | 3 nodes added: dev-control-1, dev-control-2, dev-control-3
58 | Create new node? No
59 | Proceed? Yes
60 | ```
61 |
62 | To destroy cluster, run the following:
63 |
64 | ```
65 | $ triton-kubernetes destroy cluster
66 | ✔ Backend Provider: Local
67 | ✔ Cluster Manager: dev-manager
68 | ✔ Cluster: dev-cluster
69 | Destroy "dev-cluster"? Yes
70 | ```
71 |
72 | To get cluster, run the following:
73 |
74 | ```
75 | $ triton-kubernetes get cluster
76 | ```
77 |
78 |
79 | `triton-kubernetes` cli can take a configuration file (yaml) with `--config` option to run in silent mode. To read more about the yaml arguments, look at the [silent-install documentation](https://github.com/joyent/triton-kubernetes/tree/master/docs/guide/silent-install-yaml.md).
80 |
--------------------------------------------------------------------------------
/docs/guide/azure/cluster.md:
--------------------------------------------------------------------------------
1 | ## Cluster
2 |
3 | A cluster is a group of physical (or virtual) computers that share resources to accomplish tasks as if they were a single system.
4 | Create cluster command allows to create dedicated nodes for the etcd, worker and control. Creating clusters require a cluster manager to be running already.
5 |
6 | To check if cluster manager exists, run the following:
7 |
8 | ```
9 | $ triton-kubernetes get manager
10 | ```
11 |
12 | To create cluster, run the following:
13 |
14 | ```
15 | triton-kubernetes create cluster
16 | ✔ Backend Provider: Local
17 | create cluster called
18 | ✔ Cluster Manager: dev-manager
19 | ✔ Cloud Provider: Azure
20 | ✔ Cluster Name: azure-cluster
21 | ✔ Kubernetes Version: v1.17.6
22 | ✔ Kubernetes Network Provider: calico
23 | ✔ Private Registry: None
24 | ✔ k8s Registry: None
25 | ✔ Azure Subscription ID: 0535d7cf-a52e-491b-b7bc-37f674787ab8
26 | ✔ Azure Client ID: 22520959-c5bb-499a-b3d0-f97e8849385e
27 | ✔ Azure Client Secret: a19ed50f-f7c1-4ef4-9862-97bc880d2536
28 | ✔ Azure Tenant ID: 324e4a5e-53a9-4be4-a3a5-fcd3e79f2c5b
29 | ✔ Azure Environment: public
30 | ✔ Azure Location: West US
31 | Create new node? Yes
32 | ✔ Host Type: worker
33 | ✔ Number of nodes to create: 3
34 | ✔ Hostname prefix: azure-node-worker
35 | ✔ Azure Size: Standard_B1ms
36 | ✔ Azure SSH User: ubuntu
37 | ✔ Azure Public Key Path: ~/.ssh/id_rsa.pub
38 | Disk created? No
39 | 3 nodes added: azure-node-worker-1, azure-node-worker-2, azure-node-worker-3
40 | Create new node? Yes
41 | ✔ Host Type: etcd
42 | Number of nodes to create? 3
43 | ✔ Hostname prefix: azure-node-etcd
44 | ✔ Azure Size: Standard_B1ms
45 | ✔ Azure SSH User: ubuntu
46 | ✔ Azure Public Key Path: ~/.ssh/id_rsa.pub
47 | Disk created? No
48 | 3 nodes added: azure-node-etcd-1, azure-node-etcd-2, azure-node-etcd-3
49 | Create new node? Yes
50 | ✔ Host Type: control
51 | Number of nodes to create? 3
52 | ✔ Hostname prefix: azure-node-contol
53 | ✔ Azure Size: Standard_B1ms
54 | ✔ Azure SSH User: ubuntu
55 | ✔ Azure Public Key Path: ~/.ssh/id_rsa.pub
56 | Disk created? No
57 | 3 nodes added: azure-node-contol-1, azure-node-contol-2, azure-node-contol-3
58 | Create new node? No
59 | Proceed? Yes
60 | ```
61 |
62 |
63 | To destroy cluster, run the following:
64 |
65 | ```
66 | $ triton-kubernetes destroy cluster
67 | ✔ Backend Provider: Local
68 | ✔ Cluster Manager: dev-manager
69 | ✔ Cluster: dev-cluster
70 | Destroy "dev-cluster"? Yes
71 | ```
72 |
73 | To get cluster, run the following:
74 |
75 | ```
76 | $ triton-kubernetes get cluster
77 | ```
78 |
79 |
80 | `triton-kubernetes` cli can take a configuration file (yaml) with `--config` option to run in silent mode. To read about the yaml arguments, look at the [silent-install documentation](https://github.com/joyent/triton-kubernetes/tree/master/docs/guide/silent-install-yaml.md).
81 |
--------------------------------------------------------------------------------
/terraform/modules/triton-rancher-k8s-host/variables.tf:
--------------------------------------------------------------------------------
1 | variable "hostname" {
2 | description = ""
3 | }
4 |
5 | variable "rancher_api_url" {
6 | description = ""
7 | }
8 |
9 | variable "rancher_cluster_registration_token" {}
10 |
11 | variable "rancher_cluster_ca_checksum" {}
12 |
13 | variable "rancher_host_labels" {
14 | type = map
15 | description = "A map of key/value pairs that get passed to the rancher agent on the host."
16 | }
17 |
18 | variable "rancher_agent_image" {
19 | default = "rancher/rancher-agent:v2.4.11"
20 | description = "The Rancher Agent image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
21 | }
22 |
23 | variable "rancher_registry" {
24 | default = ""
25 | description = "The docker registry to use for rancher images"
26 | }
27 |
28 | variable "rancher_registry_username" {
29 | default = ""
30 | description = "The username to login as."
31 | }
32 |
33 | variable "rancher_registry_password" {
34 | default = ""
35 | description = "The password to use."
36 | }
37 |
38 | variable "docker_engine_install_url" {
39 | default = "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh"
40 | description = "The URL to the shell script to install the docker engine."
41 | }
42 |
43 | variable "triton_account" {
44 | default = ""
45 | description = "The Triton account name, usually the username of your root user."
46 | }
47 |
48 | variable "triton_key_path" {
49 | default = ""
50 | description = "The path to a private key that is authorized to communicate with the Triton API."
51 | }
52 |
53 | variable "triton_key_id" {
54 | default = ""
55 | description = "The md5 fingerprint of the key at triton_key_path. Obtained by running `ssh-keygen -E md5 -lf ~/path/to.key`"
56 | }
57 |
58 | variable "triton_url" {
59 | default = ""
60 | description = "The CloudAPI endpoint URL. e.g. https://us-west-1.api.joyent.com"
61 | }
62 |
63 | variable "triton_network_names" {
64 | type = list
65 | description = "List of Triton network names that the node(s) should be attached to."
66 |
67 | default = [
68 | "sdc_nat",
69 | ]
70 | }
71 |
72 | variable "triton_image_name" {
73 | default = "ubuntu-certified-18.04"
74 | description = "The name of the Triton image to use."
75 | }
76 |
77 | variable "triton_image_version" {
78 | default = "20190627.1.1"
79 | description = "The version/tag of the Triton image to use."
80 | }
81 |
82 | variable "triton_ssh_user" {
83 | default = "ubuntu"
84 | description = "The ssh user to use."
85 | }
86 |
87 | variable "triton_machine_package" {
88 | default = "sample-bhyve-flexible-1G"
89 | description = "The Triton machine package to use for this host. Defaults to sample-bhyve-flexible-1G."
90 | }
91 |
--------------------------------------------------------------------------------
/terraform/modules/k8s-backup-manta/files/minio-manta-deployment.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2017 the Heptio Ark contributors.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | ---
16 | apiVersion: apps/v1beta1
17 | kind: Deployment
18 | metadata:
19 | namespace: heptio-ark-server
20 | name: minio
21 | labels:
22 | component: minio
23 | spec:
24 | strategy:
25 | type: Recreate
26 | template:
27 | metadata:
28 | labels:
29 | component: minio
30 | spec:
31 | volumes:
32 | - name: storage
33 | emptyDir: {}
34 | containers:
35 | - name: minio
36 | image: minio/minio:latest
37 | imagePullPolicy: IfNotPresent
38 | args:
39 | - gateway
40 | - manta
41 | env:
42 | - name: MINIO_ACCESS_KEY
43 | value: ${triton_account}
44 | - name: MINIO_SECRET_KEY
45 | value: ${triton_key_id}
46 | - name: MANTA_KEY_MATERIAL
47 | value: |
48 | ${triton_key_material}
49 | - name: MANTA_SUBUSER
50 | value: ${manta_subuser}
51 | ports:
52 | - containerPort: 9000
53 | volumeMounts:
54 | - name: storage
55 | mountPath: "/storage"
56 |
57 | ---
58 | apiVersion: v1
59 | kind: Service
60 | metadata:
61 | namespace: heptio-ark-server
62 | name: minio
63 | labels:
64 | component: minio
65 | spec:
66 | type: ClusterIP
67 | ports:
68 | - port: 9000
69 | targetPort: 9000
70 | protocol: TCP
71 | selector:
72 | component: minio
73 |
74 | ---
75 | apiVersion: batch/v1
76 | kind: Job
77 | metadata:
78 | namespace: heptio-ark-server
79 | name: minio-setup
80 | labels:
81 | component: minio
82 | spec:
83 | template:
84 | metadata:
85 | name: minio-setup
86 | spec:
87 | restartPolicy: OnFailure
88 | containers:
89 | - name: mc
90 | image: minio/mc:latest
91 | imagePullPolicy: IfNotPresent
92 | env:
93 | - name: MINIO_KEY_MATERIAL
94 | value: |
95 | ${triton_key_material}
96 | command:
97 | - /bin/sh
98 | - -c
99 | - "mc config host add ark http://minio:9000 ${triton_account} ${triton_key_id} && mc mb -p ark/ark"
100 |
--------------------------------------------------------------------------------
/docs/guide/aws/cluster.md:
--------------------------------------------------------------------------------
1 | ## Cluster
2 |
3 | A cluster is a group of physical (or virtual) computers that share resources to accomplish tasks as if they were a single system.
4 | Create cluster command allows to create dedicated nodes for the etcd, worker and control. Creating clusters require a cluster manager to be running already.
5 |
6 | To check if cluster manager exists, run the following:
7 |
8 | ```
9 | $ triton-kubernetes get manager
10 | ```
11 |
12 | To create cluster, run the following:
13 |
14 | ```
15 | $ triton-kubernetes create cluster
16 | ✔ Backend Provider: Local
17 | create cluster called
18 | ✔ Cluster Manager: dev-manager
19 | ✔ Cloud Provider: AWS
20 | ✔ Cluster Name: dev-cluster
21 | ✔ Kubernetes Version: v1.17.6
22 | ✔ Kubernetes Network Provider: calico
23 | ✔ Private Registry: None
24 | ✔ k8s Registry: None
25 | ✔ AWS Access Key: [changeme]
26 | ✔ AWS Secret Key: [changeme]
27 | ✔ AWS Region: us-east-1
28 | ✔ Name for new aws public key: triton-kubernetes_public_key
29 | ✔ AWS Public Key Path: ~/.ssh/id_rsa.pub
30 | ✔ AWS VPC CIDR: 10.0.0.0/16
31 | ✔ AWS Subnet CIDR: 10.0.2.0/24
32 | Create new node? Yes
33 | ✔ Host Type: worker
34 | ✔ Number of nodes to create: 3
35 | ✔ Hostname prefix: dev-worker
36 | ✔ AWS AMI: ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20180405
37 | ✔ AWS Instance Type: t2.micro
38 | Volume Created? Yes
39 | ✔ EBS Volume Device Name: /dev/sdf
40 | ✔ EBS Volume Mount Path: /mnt/triton-kubernetes
41 | EBS Volume Type? General Purpose SSD
42 | ✔ EBS Volume Size in GiB: 100
43 | 3 nodes added: dev-worker-1, dev-worker-2, dev-worker-3
44 | Create new node? Yes
45 | ✔ Host Type: etcd
46 | Number of nodes to create? 3
47 | ✔ Hostname prefix: dev-etcd
48 | ✔ AWS AMI: ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20180405
49 | ✔ AWS Instance Type: t2.micro
50 | Volume Created? No
51 | 3 nodes added: dev-etcd-1, dev-etcd-2, dev-etcd-3
52 | Create new node? Yes
53 | ✔ Host Type: control
54 | Number of nodes to create? 3
55 | ✔ Hostname prefix: dev-control
56 | ✔ AWS AMI: ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20180405
57 | ✔ AWS Instance Type: t2.micro
58 | Volume Created? No
59 | 3 nodes added: dev-control-1, dev-control-2, dev-control-3
60 | Create new node? No
61 | Proceed? Yes
62 | ```
63 |
64 | To destroy cluster, run the following:
65 |
66 | ```
67 | $ triton-kubernetes destroy cluster
68 | ✔ Backend Provider: Local
69 | ✔ Cluster Manager: dev-manager
70 | ✔ Cluster: dev-cluster
71 | Destroy "dev-cluster"? Yes
72 | ```
73 |
74 | To get cluster, run the following:
75 |
76 | ```
77 | $ triton-kubernetes get cluster
78 | ```
79 |
80 |
81 | `triton-kubernetes` cli can take a configuration file (yaml) with `--config` option to run in silent mode. To read about the yaml arguments, look at the [silent-install documentation](https://github.com/joyent/triton-kubernetes/tree/master/docs/guide/silent-install-yaml.md).
82 |
--------------------------------------------------------------------------------
/cmd/root.go:
--------------------------------------------------------------------------------
1 | package cmd
2 |
3 | import (
4 | "fmt"
5 | "os"
6 |
7 | "github.com/spf13/cobra"
8 | "github.com/spf13/viper"
9 | )
10 |
11 | var cfgFile string
12 |
13 | // This represents the base command when called without any subcommands
14 | var rootCmd = &cobra.Command{
15 | Use: "triton-kubernetes",
16 | Short: "A tool to deploy Kubernetes to multiple cloud providers",
17 | Long: `This is a multi-cloud Kubernetes solution. Triton Kubernetes has a global
18 | cluster manager which can manage multiple clusters across regions/data-centers and/or clouds.
19 | Cluster manager can run anywhere (Triton/AWS/Azure/GCP/Baremetal) and manage Kubernetes environments running on any region of any supported cloud.
20 | For an example set up, look at the How-To section.`,
21 | }
22 |
23 | // Execute adds all child commands to the root command sets flags appropriately.
24 | // This is called by main.main(). It only needs to happen once to the rootCmd.
25 | func Execute() {
26 | if err := rootCmd.Execute(); err != nil {
27 | fmt.Println(err)
28 | os.Exit(-1)
29 | }
30 | }
31 |
32 | func init() {
33 | cobra.OnInitialize(initConfig)
34 |
35 | // Here you will define your flags and configuration settings.
36 | // Cobra supports Persistent Flags, which, if defined here,
37 | // will be global for your application.
38 |
39 | rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.triton-kubernetes.yaml)")
40 | rootCmd.PersistentFlags().Bool("non-interactive", false, "Prevent interactive prompts")
41 | rootCmd.PersistentFlags().Bool("terraform-configuration", false, "Create terraform configuration only")
42 |
43 | // Cobra also supports local flags, which will only run
44 | // when this action is called directly.
45 | rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
46 | }
47 |
48 | // initConfig reads in config file and ENV variables if set.
49 | func initConfig() {
50 | viper.BindPFlag("non-interactive", rootCmd.Flags().Lookup("non-interactive"))
51 | if viper.GetBool("non-interactive") {
52 | fmt.Println("Running in non interactive mode")
53 | }
54 | viper.BindPFlag("terraform-configuration", rootCmd.Flags().Lookup("terraform-configuration"))
55 | if viper.GetBool("terraform-configuration") {
56 | fmt.Println("Will not create infrastructure, only terraform configuration")
57 | }
58 | if cfgFile != "" { // enable ability to specify config file via flag
59 | viper.SetConfigFile(cfgFile)
60 | } else {
61 | viper.SetConfigName(".triton-kubernetes") // name of config file (without extension)
62 | viper.AddConfigPath("$HOME") // adding home directory as first search path
63 | }
64 |
65 | viper.AutomaticEnv() // read in environment variables that match
66 |
67 | // If a config file is found, read it in.
68 | if err := viper.ReadInConfig(); err == nil {
69 | fmt.Println("Using config file:", viper.ConfigFileUsed())
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/terraform/modules/aws-rancher-k8s-host/variables.tf:
--------------------------------------------------------------------------------
1 | variable "hostname" {
2 | description = ""
3 | }
4 |
5 | variable "rancher_api_url" {
6 | description = ""
7 | }
8 |
9 | variable "rancher_cluster_registration_token" {
10 | }
11 |
12 | variable "rancher_cluster_ca_checksum" {
13 | }
14 |
15 | variable "rancher_host_labels" {
16 | type = map(string)
17 | description = "A map of key/value pairs that get passed to the rancher agent on the host."
18 | }
19 |
20 | variable "rancher_agent_image" {
21 | default = "rancher/rancher-agent:v2.4.11"
22 | description = "The Rancher Agent image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
23 | }
24 |
25 | variable "rancher_registry" {
26 | default = ""
27 | description = "The docker registry to use for rancher images"
28 | }
29 |
30 | variable "rancher_registry_username" {
31 | default = ""
32 | description = "The username to login as."
33 | }
34 |
35 | variable "rancher_registry_password" {
36 | default = ""
37 | description = "The password to use."
38 | }
39 |
40 | variable "docker_engine_install_url" {
41 | default = "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh"
42 | description = "The URL to the shell script to install the docker engine."
43 | }
44 |
45 | variable "aws_access_key" {
46 | description = "AWS access key"
47 | }
48 |
49 | variable "aws_secret_key" {
50 | description = "AWS secret access key"
51 | }
52 |
53 | variable "aws_region" {
54 | description = "AWS region to host your network"
55 | }
56 |
57 | variable "aws_ami_id" {
58 | description = "Base AMI to launch the instances with"
59 | # default="ami-08a099fcfc36dff3f"
60 | }
61 |
62 | variable "aws_instance_type" {
63 | default = "t2.micro"
64 | description = "The AWS instance type to use for Kubernetes compute node(s). Defaults to t2.micro."
65 | }
66 |
67 | variable "aws_subnet_id" {
68 | description = "The AWS subnet id to deploy the instance to."
69 | }
70 |
71 | variable "aws_security_group_id" {
72 | description = "The AWS subnet id to deploy the instance to."
73 | }
74 |
75 | variable "aws_key_name" {
76 | description = "The AWS key name to use to deploy the instance."
77 | }
78 |
79 | variable "ebs_volume_device_name" {
80 | default = ""
81 | description = "The EBS Device name"
82 | }
83 |
84 | variable "ebs_volume_mount_path" {
85 | default = ""
86 | description = "The EBS volume mount path"
87 | }
88 |
89 | variable "ebs_volume_type" {
90 | default = "standard"
91 | description = "The EBS volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard for Magnetic volumes."
92 | }
93 |
94 | variable "ebs_volume_size" {
95 | default = ""
96 | description = "The size of the volume, in GiBs."
97 | }
98 |
99 |
--------------------------------------------------------------------------------
/terraform/modules/azure-rancher-k8s/main.tf:
--------------------------------------------------------------------------------
1 | data "external" "rancher_cluster" {
2 | program = ["bash", "${path.module}/files/rancher_cluster.sh"]
3 |
4 | query = {
5 | rancher_api_url = var.rancher_api_url
6 | rancher_access_key = var.rancher_access_key
7 | rancher_secret_key = var.rancher_secret_key
8 | name = var.name
9 | k8s_version = var.k8s_version
10 | k8s_network_provider = var.k8s_network_provider
11 | k8s_registry = var.k8s_registry
12 | k8s_registry_username = var.k8s_registry_username
13 | k8s_registry_password = var.k8s_registry_password
14 | }
15 | }
16 |
17 | provider "azurerm" {
18 | version = "=2.0.0"
19 | subscription_id = var.azure_subscription_id
20 | client_id = var.azure_client_id
21 | client_secret = var.azure_client_secret
22 | tenant_id = var.azure_tenant_id
23 | environment = var.azure_environment
24 | }
25 |
26 | resource "azurerm_resource_group" "resource_group" {
27 | name = "${var.name}-resource_group"
28 | location = var.azure_location
29 | }
30 |
31 | resource "azurerm_virtual_network" "vnet" {
32 | name = var.azure_virtual_network_name
33 | address_space = [var.azure_virtual_network_address_space]
34 | location = var.azure_location
35 | resource_group_name = azurerm_resource_group.resource_group.name
36 | }
37 |
38 | resource "azurerm_subnet" "subnet" {
39 | name = var.azure_subnet_name
40 | resource_group_name = azurerm_resource_group.resource_group.name
41 | virtual_network_name = azurerm_virtual_network.vnet.name
42 | address_prefix = var.azure_subnet_address_prefix
43 | }
44 |
45 | resource "azurerm_network_security_group" "firewall" {
46 | name = var.azurerm_network_security_group_name
47 | location = var.azure_location
48 | resource_group_name = azurerm_resource_group.resource_group.name
49 | }
50 |
51 | # Firewall requirements taken from:
52 | # https://rancher.com/docs/rancher/v2.0/en/quick-start-guide/
53 | resource "azurerm_network_security_rule" "rke_ports" {
54 | name = "rke_ports"
55 | priority = 1000
56 | direction = "Inbound"
57 | access = "Allow"
58 | protocol = "Tcp"
59 |
60 | destination_port_ranges = [
61 | "22", # SSH
62 | "80", # Canal
63 | "443", # Canal
64 | "6443", # Kubernetes API server
65 | "2379-2380", # etcd server client API
66 | "10250", # kubelet API
67 | "10251", # scheduler
68 | "10252", # controller
69 | "10256", # kubeproxy
70 | "30000-32767", # NodePort Services
71 | ]
72 |
73 | source_port_range = "*"
74 | source_address_prefix = "VirtualNetwork"
75 | destination_address_prefix = "*"
76 | resource_group_name = azurerm_resource_group.resource_group.name
77 | network_security_group_name = azurerm_network_security_group.firewall.name
78 | }
79 |
80 |
--------------------------------------------------------------------------------
/terraform/modules/azure-rancher/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {
2 | description = "Human readable name used as prefix to generated names."
3 | }
4 |
5 | variable "rancher_admin_password" {
6 | description = "The Rancher admin password"
7 | }
8 |
9 | variable "docker_engine_install_url" {
10 | default = "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh"
11 | description = "The URL to the shell script to install the docker engine."
12 | }
13 |
14 | variable "rancher_server_image" {
15 | default = "rancher/rancher:v2.4.11"
16 | description = "The Rancher Server image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
17 | }
18 |
19 | variable "rancher_agent_image" {
20 | default = "rancher/rancher-agent:v2.4.11"
21 | description = "The Rancher Agent image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
22 | }
23 |
24 | variable "rancher_registry" {
25 | default = ""
26 | description = "The docker registry to use for rancher server and agent images"
27 | }
28 |
29 | variable "rancher_registry_username" {
30 | default = ""
31 | description = "The username to login as."
32 | }
33 |
34 | variable "rancher_registry_password" {
35 | default = ""
36 | description = "The password to use."
37 | }
38 |
39 | variable "azure_subscription_id" {
40 | default = ""
41 | }
42 |
43 | variable "azure_client_id" {
44 | default = ""
45 | }
46 |
47 | variable "azure_client_secret" {
48 | default = ""
49 | }
50 |
51 | variable "azure_tenant_id" {
52 | default = ""
53 | }
54 |
55 | variable "azure_environment" {
56 | default = "public"
57 | }
58 |
59 | variable "azure_location" {
60 | default = "West US 2"
61 | }
62 |
63 | variable "azure_image_publisher" {
64 | default = "Canonical"
65 | }
66 |
67 | variable "azure_image_offer" {
68 | default = "UbuntuServer"
69 | }
70 |
71 | variable "azure_image_sku" {
72 | default = "16.04-LTS"
73 | }
74 |
75 | variable "azure_image_version" {
76 | default = "latest"
77 | }
78 |
79 | variable "azure_virtual_network_name" {
80 | default = "rancher-network"
81 | }
82 |
83 | variable "azure_virtual_network_address_space" {
84 | default = "10.0.0.0/16"
85 | }
86 |
87 | variable "azure_subnet_name" {
88 | default = "rancher-subnet"
89 | }
90 |
91 | variable "azure_subnet_address_prefix" {
92 | default = "10.0.2.0/24"
93 | }
94 |
95 | variable "azurerm_network_security_group_name" {
96 | default = "rancher-firewall"
97 | }
98 |
99 | variable "azure_resource_group_name" {
100 | }
101 |
102 | variable "azure_size" {
103 | default = "Standard_A0"
104 | }
105 |
106 | variable "azure_ssh_user" {
107 | default = "ubuntu"
108 | }
109 |
110 | variable "azure_public_key_path" {
111 | default = "~/.ssh/id_rsa.pub"
112 | }
113 |
114 | variable "azure_private_key_path" {
115 | default = "~/.ssh/id_rsa"
116 | }
117 |
118 |
--------------------------------------------------------------------------------
/get/cluster.go:
--------------------------------------------------------------------------------
1 | package get
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "sort"
7 |
8 | "github.com/joyent/triton-kubernetes/backend"
9 | "github.com/joyent/triton-kubernetes/shell"
10 |
11 | "github.com/manifoldco/promptui"
12 | "github.com/spf13/viper"
13 | )
14 |
15 | func GetCluster(remoteBackend backend.Backend) error {
16 | nonInteractiveMode := viper.GetBool("non-interactive")
17 | clusterManagers, err := remoteBackend.States()
18 | if err != nil {
19 | return err
20 | }
21 |
22 | if len(clusterManagers) == 0 {
23 | return fmt.Errorf("No cluster managers.")
24 | }
25 |
26 | selectedClusterManager := ""
27 | if viper.IsSet("cluster_manager") {
28 | selectedClusterManager = viper.GetString("cluster_manager")
29 | } else if nonInteractiveMode {
30 | return errors.New("cluster_manager must be specified")
31 | } else {
32 | prompt := promptui.Select{
33 | Label: "Cluster Manager",
34 | Items: clusterManagers,
35 | }
36 |
37 | _, value, err := prompt.Run()
38 | if err != nil {
39 | return err
40 | }
41 |
42 | selectedClusterManager = value
43 | }
44 |
45 | // Verify selected cluster manager exists
46 | found := false
47 | for _, clusterManager := range clusterManagers {
48 | if selectedClusterManager == clusterManager {
49 | found = true
50 | break
51 | }
52 | }
53 | if !found {
54 | return fmt.Errorf("Selected cluster manager '%s' does not exist.", selectedClusterManager)
55 | }
56 |
57 | state, err := remoteBackend.State(selectedClusterManager)
58 | if err != nil {
59 | return err
60 | }
61 |
62 | // Get existing clusters
63 | clusters, err := state.Clusters()
64 | if err != nil {
65 | return err
66 | }
67 |
68 | if len(clusters) == 0 {
69 | return fmt.Errorf("No clusters.")
70 | }
71 |
72 | selectedClusterKey := ""
73 | if viper.IsSet("cluster_name") {
74 | clusterName := viper.GetString("cluster_name")
75 | clusterKey, ok := clusters[clusterName]
76 | if !ok {
77 | return fmt.Errorf("A cluster named '%s', does not exist.", clusterName)
78 | }
79 |
80 | selectedClusterKey = clusterKey
81 | } else if nonInteractiveMode {
82 | return errors.New("cluster_name must be specified")
83 | } else {
84 | clusterNames := make([]string, 0, len(clusters))
85 | for name := range clusters {
86 | clusterNames = append(clusterNames, name)
87 | }
88 | sort.Strings(clusterNames)
89 | prompt := promptui.Select{
90 | Label: "Cluster to view",
91 | Items: clusterNames,
92 | Templates: &promptui.SelectTemplates{
93 | Label: "{{ . }}?",
94 | Active: fmt.Sprintf("%s {{ . | underline }}", promptui.IconSelect),
95 | Inactive: " {{ . }}",
96 | Selected: fmt.Sprintf(`{{ "%s" | green }} {{ "Cluster:" | bold}} {{ . }}`, promptui.IconGood),
97 | },
98 | }
99 |
100 | _, value, err := prompt.Run()
101 | if err != nil {
102 | return err
103 | }
104 | selectedClusterKey = clusters[value]
105 | }
106 |
107 | err = shell.RunTerraformOutputWithState(state, selectedClusterKey)
108 | if err != nil {
109 | return err
110 | }
111 |
112 | return nil
113 | }
114 |
--------------------------------------------------------------------------------
/util/backend_prompt_test.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import (
4 | "testing"
5 | "github.com/spf13/viper"
6 | )
7 |
8 |
9 | func TestBackendPromptWithUnsupportedBackendProviderNonInteractiveMode(t *testing.T) {
10 | viper.Set("non-interactive", true)
11 | viper.Set("backend_provider", "S3")
12 |
13 | defer viper.Reset()
14 |
15 | _,err:=PromptForBackend()
16 |
17 | expected:= "Unsupported backend provider 'S3'"
18 |
19 | if err.Error() != expected {
20 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
21 | }
22 | }
23 |
24 |
25 | func TestBackendPromptWithNoTritonAccountNonInteractiveMode(t *testing.T) {
26 | viper.Set("non-interactive", true)
27 | viper.Set("backend_provider", "manta")
28 |
29 | defer viper.Reset()
30 |
31 | _,err:=PromptForBackend()
32 |
33 | expected:= "triton_account must be specified"
34 |
35 | if err.Error() != expected {
36 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
37 | }
38 | }
39 |
40 | func TestBackendPromptWithNoTritonSSHKeyPathNonInteractiveMode(t *testing.T) {
41 | viper.Set("non-interactive", true)
42 | viper.Set("backend_provider", "manta")
43 | viper.Set("triton_account", "xyz")
44 |
45 | defer viper.Reset()
46 |
47 | _,err:=PromptForBackend()
48 |
49 | expected:= "triton_key_path must be specified"
50 |
51 | if err.Error() != expected {
52 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
53 | }
54 | }
55 |
56 | func TestBackendPromptWithInvalidTritonSSHKeyPathInteractive(t *testing.T) {
57 | viper.Set("non-interactive", false)
58 | viper.Set("backend_provider", "manta")
59 | viper.Set("triton_account", "xyz")
60 | viper.Set("triton_key_path", "")
61 |
62 | defer viper.Reset()
63 |
64 | _,err:=PromptForBackend()
65 |
66 | expected:= "Unable to read private key: open : no such file or directory"
67 |
68 | if err.Error() != expected {
69 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
70 | }
71 | }
72 |
73 | func TestNoTritonURLForNonInteractiveMode (t *testing.T) {
74 | viper.Set("non-interactive", true)
75 | viper.Set("backend_provider", "manta")
76 | viper.Set("triton_account", "xyz")
77 | viper.Set("triton_key_path", "")
78 | viper.Set("triton_key_id", "")
79 |
80 | defer viper.Reset()
81 |
82 | _,err := PromptForBackend()
83 |
84 | expected := "triton_url must be specified"
85 |
86 | if err.Error() != expected {
87 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
88 | }
89 | }
90 |
91 | func TestNoMantaURLForNonInteractiveMode (t *testing.T) {
92 | viper.Set("non-interactive", true)
93 | viper.Set("backend_provider", "manta")
94 | viper.Set("triton_account", "xyz")
95 | viper.Set("triton_key_path", "")
96 | viper.Set("triton_key_id", "")
97 | viper.Set("triton_url", "xyz.triton.com")
98 |
99 | defer viper.Reset()
100 |
101 | _,err := PromptForBackend()
102 |
103 | expected := "manta_url must be specified"
104 |
105 | if err.Error() != expected {
106 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
107 | }
108 | }
--------------------------------------------------------------------------------
/terraform/modules/azure-rke/variables.tf:
--------------------------------------------------------------------------------
1 | variable "node_count" {
2 | default = 3
3 | }
4 |
5 | variable "name" {
6 | description = "Human readable name used as prefix to generated names."
7 | }
8 |
9 | variable "docker_engine_install_url" {
10 | default = "https://raw.githubusercontent.com/joyent/triton-kubernetes/master/scripts/docker/19.03.sh"
11 | description = "The URL to the shell script to install the docker engine."
12 | }
13 |
14 | variable "rancher_admin_password" {
15 | description = "The Rancher admin password"
16 | }
17 |
18 | variable "rancher_server_image" {
19 | default = "rancher/rancher:latest"
20 | description = "The Rancher Server image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
21 | }
22 |
23 | variable "rancher_agent_image" {
24 | default = "rancher/rancher-agent:v2.4.11"
25 | description = "The Rancher Agent image to use, can be a url to a private registry leverage docker_login_* variables to authenticate to registry."
26 | }
27 |
28 | variable "rancher_registry" {
29 | default = ""
30 | description = "The docker registry to use for rancher server and agent images"
31 | }
32 |
33 | variable "rancher_registry_username" {
34 | default = ""
35 | description = "The username to login as."
36 | }
37 |
38 | variable "rancher_registry_password" {
39 | default = ""
40 | description = "The password to use."
41 | }
42 |
43 | variable "azure_subscription_id" {
44 | default = ""
45 | }
46 |
47 | variable "azure_client_id" {
48 | default = ""
49 | }
50 |
51 | variable "azure_client_secret" {
52 | default = ""
53 | }
54 |
55 | variable "azure_tenant_id" {
56 | default = ""
57 | }
58 |
59 | variable "azure_environment" {
60 | default = "public"
61 | }
62 |
63 | variable "azure_location" {
64 | default = "West US 2"
65 | }
66 |
67 | variable "azure_image_publisher" {
68 | default = "Canonical"
69 | }
70 |
71 | variable "azure_image_offer" {
72 | default = "UbuntuServer"
73 | }
74 |
75 | variable "azure_image_sku" {
76 | default = "16.04-LTS"
77 | }
78 |
79 | variable "azure_image_version" {
80 | default = "latest"
81 | }
82 |
83 | variable "azure_virtual_network_name" {
84 | default = "rancher-network"
85 | }
86 |
87 | variable "azure_virtual_network_address_space" {
88 | default = "10.0.0.0/16"
89 | }
90 |
91 | variable "azure_subnet_name" {
92 | default = "rancher-subnet"
93 | }
94 |
95 | variable "azure_subnet_address_prefix" {
96 | default = "10.0.2.0/24"
97 | }
98 |
99 | variable "azurerm_network_security_group_name" {
100 | default = "rancher-firewall"
101 | }
102 |
103 | variable "azure_resource_group_name" {
104 | }
105 |
106 | variable "azure_size" {
107 | default = "Standard_A0"
108 | }
109 |
110 | variable "azure_ssh_user" {
111 | default = "ubuntu"
112 | }
113 |
114 | variable "azure_public_key_path" {
115 | default = "~/.ssh/id_rsa.pub"
116 | }
117 |
118 | variable "azure_private_key_path" {
119 | default = "~/.ssh/id_rsa"
120 | }
121 |
122 | variable "fqdn" {
123 | }
124 |
125 | variable "tls_cert_path" {
126 | }
127 |
128 | variable "tls_private_key_path" {
129 | }
130 |
131 |
--------------------------------------------------------------------------------
/destroy/cluster_test.go:
--------------------------------------------------------------------------------
1 | package destroy
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/joyent/triton-kubernetes/backend/mocks"
7 | "github.com/joyent/triton-kubernetes/state"
8 | "github.com/spf13/viper"
9 | )
10 |
11 | var mockClusters = []byte(`{
12 | "module":{
13 | "cluster_1":{"name":"dev_cluster"},
14 | "cluster_2":{"name":"beta_cluster"},
15 | "cluster_3":{"name":"prod_cluster"}
16 | }
17 | }`)
18 |
19 | func TestDeleteClusterNoClusterManager(t *testing.T) {
20 |
21 | localBackend := &mocks.Backend{}
22 | localBackend.On("States").Return([]string{}, nil)
23 |
24 | expected := "No cluster managers."
25 |
26 | err := DeleteCluster(localBackend)
27 | if expected != err.Error() {
28 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
29 | }
30 | }
31 |
32 | func TestDeleteClusterMissingClusterManager(t *testing.T) {
33 | viper.Reset()
34 | viper.Set("non-interactive", true)
35 |
36 | localBackend := &mocks.Backend{}
37 | localBackend.On("States").Return([]string{"dev-manager", "beta-manager"}, nil)
38 |
39 | expected := "cluster_manager must be specified"
40 |
41 | err := DeleteCluster(localBackend)
42 | if expected != err.Error() {
43 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
44 | }
45 | }
46 |
47 | func TestDeleteClusterManagerNotExist(t *testing.T) {
48 | viper.Reset()
49 | viper.Set("non-interactive", true)
50 | viper.Set("cluster_manager", "prod-manager")
51 |
52 | localBackend := &mocks.Backend{}
53 | localBackend.On("States").Return([]string{"dev-manager", "beta-manager"}, nil)
54 |
55 | expected := "Selected cluster manager 'prod-manager' does not exist."
56 |
57 | err := DeleteCluster(localBackend)
58 | if expected != err.Error() {
59 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
60 | }
61 | }
62 |
63 | func TestDeleteClusterMustSpecifyClusterName(t *testing.T) {
64 | viper.Reset()
65 | viper.Set("non-interactive", true)
66 | viper.Set("cluster_manager", "dev-manager")
67 |
68 | stateObj, _ := state.New("ClusterState", mockClusters)
69 |
70 | backend := &mocks.Backend{}
71 | backend.On("States").Return([]string{"dev-manager", "beta-manager"}, nil)
72 | backend.On("State", "dev-manager").Return(stateObj, nil)
73 |
74 | expected := "cluster_name must be specified"
75 |
76 | err := DeleteCluster(backend)
77 | if expected != err.Error() {
78 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
79 | }
80 | }
81 |
82 | func TestDeleteClusterNotExist(t *testing.T) {
83 | viper.Reset()
84 | viper.Set("non-interactive", true)
85 | viper.Set("cluster_manager", "dev-manager")
86 | viper.Set("cluster_name", "cluster_alpha")
87 |
88 | stateObj, _ := state.New("ClusterState", mockClusters)
89 |
90 | backend := &mocks.Backend{}
91 | backend.On("States").Return([]string{"dev-manager", "beta-manager"}, nil)
92 | backend.On("State", "dev-manager").Return(stateObj, nil)
93 |
94 | expected := "A cluster named 'cluster_alpha', does not exist."
95 |
96 | err := DeleteCluster(backend)
97 | if expected != err.Error() {
98 | t.Errorf("Wrong output, expected %s, received %s", expected, err.Error())
99 | }
100 | }
101 |
--------------------------------------------------------------------------------
/terraform/modules/files/setup_rancher.sh.tpl:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Wait for Rancher UI to boot
4 | printf 'Waiting for Rancher to start'
5 | until $(curl --output /dev/null --silent --head --insecure --fail ${rancher_host}); do
6 | printf '.'
7 | sleep 5
8 | done
9 |
10 | # Wait for apt-get to be unlocked
11 | printf 'Waiting for apt-get to unlock'
12 | sudo fuser /var/lib/dpkg/lock >/dev/null 2>&1;
13 | while [ $? -ne 1 ]; do
14 | printf '.';
15 | sleep 5;
16 | sudo fuser /var/lib/dpkg/lock >/dev/null 2>&1;
17 | done
18 |
19 | # Wait for apt-get frontend to be unlocked
20 | printf 'Waiting for apt-get frontend to unlock'
21 | sudo fuser /var/lib/dpkg/lock-frontend >/dev/null 2>&1;
22 | while [ $? -ne 1 ]; do
23 | printf '.';
24 | sleep 5;
25 | sudo fuser /var/lib/dpkg/lock-frontend >/dev/null 2>&1;
26 | done
27 |
28 | sudo apt-get install jq -y
29 |
30 | # Login as default admin user
31 | login_response=$(curl -X POST \
32 | --insecure \
33 | -d '{"description":"Initial Token", "password":"admin", "ttl": 60000, "username":"admin"}' \
34 | '${rancher_host}/v3-public/localProviders/local?action=login')
35 | initial_token=$(echo $login_response | jq -r '.token')
36 |
37 | # Create token
38 | token_response=$(curl -X POST \
39 | --insecure \
40 | -u $initial_token \
41 | -H 'Accept: application/json' \
42 | -H 'Content-Type: application/json' \
43 | -d '{"expired":false,"isDerived":false,"ttl":0,"type":"token","description":"Managed by Terraform","name":"triton-kubernetes"}' \
44 | '${rancher_host}/v3/token')
45 | echo $token_response > ~/rancher_api_key
46 | access_key=$(echo $token_response | jq -r '.name')
47 | secret_key=$(echo $token_response | jq -r '.token' | cut -d: -f2)
48 |
49 | # Change default admin password
50 | curl -X POST \
51 | --insecure \
52 | -u $access_key:$secret_key \
53 | -H 'Accept: application/json' \
54 | -H 'Content-Type: application/json' \
55 | -d '{"currentPassword":"admin","newPassword":"${rancher_admin_password}"}' \
56 | '${rancher_host}/v3/users?action=changepassword'
57 |
58 | # Setup server url
59 | curl -X PUT \
60 | --insecure \
61 | -u $access_key:$secret_key \
62 | -H 'Accept: application/json' \
63 | -H 'Content-Type: application/json' \
64 | -d '{"baseType": "setting", "id": "server-url", "name": "server-url", "type": "setting", "value": "${host_registration_url}" }' \
65 | '${rancher_host}/v3/settings/server-url'
66 |
67 | # Setup helm
68 | curl -X POST \
69 | --insecure \
70 | -u $access_key:$secret_key \
71 | -H 'Accept: application/json' \
72 | -H 'Content-Type: application/json' \
73 | -d '{"branch":"master", "kind":"helm", "name":"helm-charts", "url":"https://github.com/kubernetes/helm.git"}' \
74 | '${rancher_host}/v3/catalogs'
75 |
76 | # Update graphics
77 | printf 'Updating UI ...\n'
78 | # logos
79 | curl -sLO https://github.com/joyent/triton-kubernetes/raw/master/static/logos.tar.gz -o ~/logos.tar.gz
80 | tar -zxf ~/logos.tar.gz
81 | sudo docker cp ~/logos/ $(sudo docker ps -q):/usr/share/rancher/ui/assets/images/
82 | sudo docker exec -i $(sudo docker ps -q) bash <> /usr/share/rancher/ui/assets/vendor.css
86 | echo 'footer.ember-view{visibility:hidden;display:none;}' >> /usr/share/rancher/ui/assets/vendor.rtl.css
87 | EOF
88 | rm -rf logos*
--------------------------------------------------------------------------------
/terraform/modules/vsphere-rancher-k8s-host/main.tf:
--------------------------------------------------------------------------------
1 | provider "vsphere" {
2 | user = var.vsphere_user
3 | password = var.vsphere_password
4 | vsphere_server = var.vsphere_server
5 |
6 | allow_unverified_ssl = true
7 | }
8 |
9 | locals {
10 | rancher_node_role = element(keys(var.rancher_host_labels), 0)
11 | }
12 |
13 | data "template_file" "install_rancher_agent" {
14 | template = file("${path.module}/files/install_rancher_agent.sh.tpl")
15 |
16 | vars = {
17 | hostname = var.hostname
18 | docker_engine_install_url = var.docker_engine_install_url
19 | rancher_api_url = var.rancher_api_url
20 | rancher_cluster_registration_token = var.rancher_cluster_registration_token
21 | rancher_cluster_ca_checksum = var.rancher_cluster_ca_checksum
22 | rancher_node_role = local.rancher_node_role == "control" ? "controlplane" : local.rancher_node_role
23 | rancher_agent_image = var.rancher_agent_image
24 | rancher_registry = var.rancher_registry
25 | rancher_registry_username = var.rancher_registry_username
26 | rancher_registry_password = var.rancher_registry_password
27 | }
28 | }
29 |
30 | data "vsphere_datacenter" "dc" {
31 | name = var.vsphere_datacenter_name
32 | }
33 |
34 | data "vsphere_datastore" "datastore" {
35 | name = var.vsphere_datastore_name
36 | datacenter_id = data.vsphere_datacenter.dc.id
37 | }
38 |
39 | data "vsphere_resource_pool" "pool" {
40 | name = var.vsphere_resource_pool_name
41 | datacenter_id = data.vsphere_datacenter.dc.id
42 | }
43 |
44 | data "vsphere_network" "network" {
45 | name = var.vsphere_network_name
46 | datacenter_id = data.vsphere_datacenter.dc.id
47 | }
48 |
49 | data "vsphere_virtual_machine" "template" {
50 | name = var.vsphere_template_name
51 | datacenter_id = data.vsphere_datacenter.dc.id
52 | }
53 |
54 | resource "vsphere_virtual_machine" "vm" {
55 | name = var.hostname
56 | resource_pool_id = data.vsphere_resource_pool.pool.id
57 | datastore_id = data.vsphere_datastore.datastore.id
58 |
59 | num_cpus = 2
60 | memory = 2048
61 | guest_id = data.vsphere_virtual_machine.template.guest_id
62 |
63 | scsi_type = data.vsphere_virtual_machine.template.scsi_type
64 |
65 | network_interface {
66 | network_id = data.vsphere_network.network.id
67 | adapter_type = data.vsphere_virtual_machine.template.network_interface_types[0]
68 | }
69 |
70 | disk {
71 | label = "disk0"
72 | size = data.vsphere_virtual_machine.template.disks[0].size
73 | eagerly_scrub = data.vsphere_virtual_machine.template.disks[0].eagerly_scrub
74 | thin_provisioned = data.vsphere_virtual_machine.template.disks[0].thin_provisioned
75 | }
76 |
77 | clone {
78 | template_uuid = data.vsphere_virtual_machine.template.id
79 | }
80 | }
81 |
82 | resource "null_resource" "install_rancher_agent" {
83 | triggers = {
84 | vsphere_virtual_machine_id = vsphere_virtual_machine.vm.id
85 | }
86 |
87 | connection {
88 | type = "ssh"
89 | user = var.ssh_user
90 |
91 | host = vsphere_virtual_machine.vm.default_ip_address
92 | private_key = file(var.key_path)
93 | }
94 |
95 | provisioner "remote-exec" {
96 | inline = < sha256-checksums.txt
29 |
30 | build-local: clean build-osx
31 |
32 | build-osx: clean
33 | @echo "Building OSX..."
34 | # Copying and renaming the linux binary to just 'triton-kubernetes'. Making a temp directory to avoid potential naming conflicts.
35 | @mkdir -p $(OSX_TMP_DIR)
36 | @mkdir -p $(BUILD_PATH)
37 | @GOOS=darwin GOARCH=amd64 go build -v -ldflags="$(LDFLAGS)" -o $(OSX_BINARY_PATH)
38 | @cp $(OSX_BINARY_PATH) $(OSX_TMP_DIR)/triton-kubernetes
39 | @zip --junk-paths $(OSX_ARCHIVE_PATH) $(OSX_TMP_DIR)/triton-kubernetes
40 | @rm -rf $(OSX_TMP_DIR)
41 |
42 | build-linux: clean
43 | @echo "Building Linux..."
44 | @mkdir -p $(LINUX_TMP_DIR)
45 | @mkdir -p $(BUILD_PATH)
46 | @GOOS=linux GOARCH=amd64 go build -v -ldflags="$(LDFLAGS)" -o $(LINUX_BINARY_PATH)
47 | @cp $(LINUX_BINARY_PATH) $(LINUX_TMP_DIR)/triton-kubernetes
48 | @zip --junk-paths $(LINUX_ARCHIVE_PATH) $(LINUX_TMP_DIR)/triton-kubernetes
49 | @rm -rf $(LINUX_TMP_DIR)
50 |
51 | build-rpm: build-linux
52 | @echo "Building RPM..."
53 | # Copying and renaming the linux binary to just 'triton-kubernetes'. Making a temp directory to avoid potential naming conflicts.
54 | @mkdir -p $(RPM_TMP_DIR)
55 | @cp $(LINUX_BINARY_PATH) $(RPM_TMP_DIR)/triton-kubernetes
56 | @fpm \
57 | --chdir $(RPM_TMP_DIR) \
58 | --input-type dir \
59 | --output-type rpm \
60 | --rpm-os linux \
61 | --name triton-kubernetes \
62 | --version $(VERSION) \
63 | --prefix $(RPM_INSTALL_DIR) \
64 | --package $(RPM_PATH) triton-kubernetes
65 | # Cleaning up the tmp directory
66 | @rm -rf $(RPM_TMP_DIR)
67 |
68 | build-deb: build-linux
69 | @echo "Building DEB..."
70 | # Copying and renaming the linux binary to just 'triton-kubernetes'. Making a temp directory to avoid potential naming conflicts.
71 | @mkdir -p $(DEB_TMP_DIR)
72 | @cp $(LINUX_BINARY_PATH) $(DEB_TMP_DIR)/triton-kubernetes
73 | # fpm fails with a tar error when building the DEB package on OSX 10.10.
74 | # Current workaround is to modify PATH so that fpm uses gnu-tar instead of the regular tar command.
75 | # Issue URL: https://github.com/jordansissel/fpm/issues/882
76 | @PATH="/usr/local/opt/gnu-tar/libexec/gnubin:$$PATH" fpm \
77 | --chdir $(DEB_TMP_DIR) \
78 | --input-type dir \
79 | --output-type deb \
80 | --name triton-kubernetes \
81 | --version $(VERSION) \
82 | --prefix $(DEB_INSTALL_DIR) \
83 | --package $(DEB_PATH) triton-kubernetes
84 | # Cleaning up the tmp directory
85 | @rm -rf $(DEB_TMP_DIR)
86 |
87 | test:
88 | @echo "Running unit-tests..."
89 | go test ./...
90 |
--------------------------------------------------------------------------------