├── Web ├── results │ ├── favicon.ico │ ├── 404.html │ ├── search │ ├── img │ │ ├── aws.png │ │ └── logo_cncf.png │ ├── alpha.sh │ ├── release.sh │ ├── summary.json.bak │ ├── main2.css │ └── chart.js └── api │ ├── setup.cfg │ ├── _src │ ├── requirements.txt │ ├── summary.py │ └── Schemas │ │ └── new.json │ ├── _tests │ ├── new │ │ └── event.json │ └── gen_mock_summary.py │ ├── trace.yml │ └── summary.yml ├── cncfdemo-cli ├── cncfdemo │ ├── __init__.py │ ├── bootstrap │ │ ├── __init__.py │ │ ├── aws │ │ │ ├── __init__.py │ │ │ ├── execution_plans │ │ │ │ └── __init__.py │ │ │ ├── Policies │ │ │ │ └── lambda-policy.json │ │ │ └── utils.py │ │ ├── bootstrap.py │ │ ├── main.py │ │ └── DO │ │ │ ├── simple.py │ │ │ └── simple2.py │ ├── kubectl │ │ ├── __init__.py │ │ ├── utils.py │ │ └── cmd_create.py │ ├── Deployment │ │ ├── distcc │ │ │ ├── distcc-svc.yaml │ │ │ ├── distcc-ds.yaml │ │ │ └── README.md │ │ ├── echo │ │ │ ├── echo-svc.yaml │ │ │ └── echo-rc.yaml │ │ ├── Countly │ │ │ ├── configMaps │ │ │ │ └── countly │ │ │ │ │ ├── frontend.js │ │ │ │ │ └── api.js │ │ │ └── countly.yaml.j2 │ │ └── runner.sh │ ├── cncf.py │ └── utils │ │ └── utils.py ├── README.md └── setup.py ├── provisioning ├── gce │ ├── modules │ │ ├── dns │ │ │ ├── output.tf │ │ │ ├── input.tf │ │ │ └── dns.tf │ │ ├── vpc │ │ │ ├── vpc.tf │ │ │ ├── output.tf │ │ │ ├── azure-security.tf │ │ │ ├── io.tf │ │ │ ├── public.tf │ │ │ ├── private.tf │ │ │ └── gce-subnet.tf │ │ ├── bastion │ │ │ ├── output.tf │ │ │ ├── user-data.yml │ │ │ └── input.tf │ │ ├── etcd │ │ │ ├── output.tf │ │ │ ├── external_lb.tf │ │ │ ├── internal_lb.tf │ │ │ ├── discovery.tf │ │ │ ├── input.tf │ │ │ ├── cloud-config.tf │ │ │ ├── load-balancer.tf │ │ │ └── nodes.tf │ │ ├── security │ │ │ ├── io.tf │ │ │ └── security.tf │ │ └── worker │ │ │ ├── cloud-config.tf │ │ │ └── input.tf │ ├── gce.tf │ ├── runme │ ├── keypair.tf │ ├── cert.tf │ ├── wait-for-cluster │ ├── cloud-config.tf │ ├── output.tf │ └── input.tf ├── gke │ ├── modules │ │ ├── vpc │ │ │ ├── output.tf │ │ │ ├── input.tf │ │ │ ├── vpc.tf │ │ │ └── gce-subnet.tf │ │ └── cluster │ │ │ ├── output.tf │ │ │ ├── node-pool.tf │ │ │ ├── input.tf │ │ │ └── cluster.tf │ ├── gke.tf │ ├── output.tf │ ├── input.tf │ └── modules.tf ├── packet │ ├── modules │ │ ├── dns │ │ │ ├── output.tf │ │ │ └── input.tf │ │ ├── worker │ │ │ ├── output.tf │ │ │ ├── worker-nodes.tf │ │ │ ├── input.tf │ │ │ ├── kube-proxy.yml │ │ │ └── worker-cloud-config.tf │ │ └── etcd │ │ │ ├── discovery.tf │ │ │ ├── output.tf │ │ │ ├── etcd-nodes.tf │ │ │ ├── kube-scheduler.yml │ │ │ ├── input.tf │ │ │ ├── kube-proxy.yml │ │ │ ├── kube-controller-manager.yml │ │ │ └── kube-apiserver.yml │ ├── packet.tf │ ├── ssl-ssh-cloud.tf │ ├── output.tf │ ├── input.tf │ └── init-cfssl ├── azure │ ├── servicePrincipalProfile.json │ ├── modules │ │ ├── network │ │ │ ├── output.tf │ │ │ ├── input.tf │ │ │ └── virtual_network.tf │ │ ├── bastion │ │ │ ├── output.tf │ │ │ ├── input.tf │ │ │ ├── bastion-user-data.yml │ │ │ └── bastion-node.tf │ │ ├── dns │ │ │ ├── input.tf │ │ │ └── output.tf │ │ ├── etcd │ │ │ ├── output.tf │ │ │ ├── input.tf │ │ │ ├── etcd-load-balancer.tf │ │ │ ├── kube-apiserver.yml │ │ │ ├── etcd-nodes.tf │ │ │ └── etcd-cloud-config.tf │ │ └── worker │ │ │ ├── input.tf │ │ │ ├── worker-cloud-config.tf │ │ │ └── worker-nodes.tf │ ├── docs │ │ ├── azure_app_endpoints.png │ │ ├── azure_app_registration.png │ │ ├── key_generation_copy_me.png │ │ ├── guid_from_oauth_endpoint.png │ │ └── web_api_application_type.png │ ├── runme │ ├── output.tf │ ├── wait-for-cluster │ ├── azure.tf │ ├── input.tf │ ├── ssl-ssh-cloud.tf │ └── init-cfssl ├── aws │ ├── modules │ │ ├── vpc │ │ │ ├── input.tf │ │ │ ├── output.tf │ │ │ ├── vpc.tf │ │ │ ├── public.tf │ │ │ └── private.tf │ │ ├── bastion │ │ │ ├── output.tf │ │ │ ├── input.tf │ │ │ ├── user-data.yml │ │ │ └── ec2.tf │ │ ├── dns │ │ │ ├── input.tf │ │ │ ├── output.tf │ │ │ └── dns.tf │ │ ├── worker │ │ │ ├── output.tf │ │ │ ├── input.tf │ │ │ ├── cloud-config.tf │ │ │ └── ec2.tf │ │ ├── etcd │ │ │ ├── output.tf │ │ │ ├── input.tf │ │ │ ├── elb.tf │ │ │ ├── ec2.tf │ │ │ ├── kube-apiserver.yml │ │ │ └── cloud-config.tf │ │ ├── security │ │ │ ├── io.tf │ │ │ └── security.tf │ │ └── iam │ │ │ ├── io.tf │ │ │ ├── etcd.tf │ │ │ └── worker.tf │ ├── aws.tf │ ├── cleanup.tf │ ├── keypair.tf │ ├── cert.tf │ ├── wait-for-cluster │ ├── output.tf │ ├── input.tf │ └── Readme.mkd ├── kubeconfig │ ├── output.tf │ ├── input.tf │ └── kubeconfig.tf ├── cross-cloud │ ├── output.tf │ ├── input.tf │ └── cloud.tf └── Dockerfile ├── Docker ├── Demo │ └── Dockerfile ├── KubeAddOns │ └── Dockerfile ├── distcc │ ├── runner.sh │ ├── config │ └── Dockerfile ├── distcc-daemon │ ├── runner.sh │ ├── config │ └── Dockerfile ├── distcc-master │ ├── config │ ├── Dockerfile │ └── runner.sh ├── cloudbuild.yaml ├── echo │ ├── Dockerfile │ └── echo.py ├── azure-cli │ ├── Dockerfile │ ├── Readme.org │ └── entrypoint.sh ├── falcon │ ├── Dockerfile │ └── app.py ├── grafana │ ├── Dockerfile │ ├── README.md │ └── run.sh ├── Countly │ ├── runit │ │ ├── countly-api.sh │ │ └── countly-dashboard.sh │ └── Dockerfile ├── Wrk │ ├── Dockerfile │ ├── runner.sh │ ├── README.md │ └── send_summary.lua ├── gunicorn │ ├── Dockerfile │ └── gunicorn_conf.py ├── boinc │ ├── runner.sh │ ├── attach.sh │ ├── Dockerfile │ ├── README.md │ └── boinc_rpc.sh ├── Kernel │ └── Dockerfile ├── kubectl │ └── Dockerfile ├── fluentd-reportstats │ ├── Dockerfile │ └── fluent.conf └── fluentd-kubectl │ ├── fluent.conf │ └── Dockerfile ├── Images ├── base │ ├── disable_tty.sh │ ├── ansible.cfg │ ├── playbook.yml │ ├── README.md │ ├── base.sh │ └── packer.json └── golden │ ├── disable_tty.sh │ ├── ansible.cfg │ ├── packer.json │ ├── README.md │ └── playbook.yml ├── .gitignore ├── Demo.yaml ├── ClusterStart.yaml ├── Demo ├── Manifests │ ├── distcc-daemon.yaml │ └── countly.yaml └── ConfigMaps │ └── countly.yaml ├── AddOns ├── Prometheus │ ├── pushgateway.yaml │ ├── node-exporter.yaml │ └── deployment.yaml ├── grafana.yaml └── dashboard.yaml ├── discovery └── README.md └── .gitlab-ci.yml /Web/results/favicon.ico: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Web/results/404.html: -------------------------------------------------------------------------------- 1 | 404 2 | -------------------------------------------------------------------------------- /Web/results/search: -------------------------------------------------------------------------------- 1 | search.html -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /provisioning/gce/modules/dns/output.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /provisioning/gke/modules/vpc/output.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Web/api/setup.cfg: -------------------------------------------------------------------------------- 1 | [install] 2 | prefix= 3 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/kubectl/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /provisioning/packet/modules/dns/output.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/aws/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /provisioning/azure/servicePrincipalProfile.json: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /provisioning/gke/gke.tf: -------------------------------------------------------------------------------- 1 | provider "google" {} 2 | 3 | -------------------------------------------------------------------------------- /Web/api/_src/requirements.txt: -------------------------------------------------------------------------------- 1 | hashids 2 | jsonschema 3 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/aws/execution_plans/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Docker/Demo/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM zilman/kubectl 2 | 3 | ADD Demo Demo 4 | -------------------------------------------------------------------------------- /Docker/KubeAddOns/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM zilman/kubectl 2 | 3 | ADD AddOns AddOns 4 | -------------------------------------------------------------------------------- /Docker/distcc/runner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | /etc/init.d/distcc start 4 | sleep infinity 5 | -------------------------------------------------------------------------------- /Web/results/img/aws.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/HEAD/Web/results/img/aws.png -------------------------------------------------------------------------------- /provisioning/gke/output.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig" { value = "${ module.kubeconfig.kubeconfig }"} 2 | -------------------------------------------------------------------------------- /Web/results/img/logo_cncf.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/HEAD/Web/results/img/logo_cncf.png -------------------------------------------------------------------------------- /Docker/distcc-daemon/runner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | distccd --daemon --allow 10.0.0.0/8 4 | sleep infinity 5 | -------------------------------------------------------------------------------- /Images/base/disable_tty.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sed -i.bak -e '/Defaults.*requiretty/s/^/#/' /etc/sudoers 3 | -------------------------------------------------------------------------------- /Images/golden/disable_tty.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sed -i.bak -e '/Defaults.*requiretty/s/^/#/' /etc/sudoers 3 | -------------------------------------------------------------------------------- /provisioning/aws/modules/vpc/input.tf: -------------------------------------------------------------------------------- 1 | variable "azs" {} 2 | variable "cidr" {} 3 | variable "name" {} 4 | -------------------------------------------------------------------------------- /provisioning/gke/modules/vpc/input.tf: -------------------------------------------------------------------------------- 1 | variable "cidr" {} 2 | variable "name" {} 3 | variable "region" {} 4 | -------------------------------------------------------------------------------- /provisioning/azure/modules/network/output.tf: -------------------------------------------------------------------------------- 1 | output "subnet_id" { value = "${ azurerm_subnet.cncf.id }" } 2 | -------------------------------------------------------------------------------- /Docker/distcc/config: -------------------------------------------------------------------------------- 1 | STARTDISTCC="true" 2 | ALLOWEDNETS="10.0.0.0/8" 3 | LISTENER="127.0.0.1" 4 | ZEROCONF="false" 5 | -------------------------------------------------------------------------------- /provisioning/kubeconfig/output.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig" { value = "${ data.template_file.kubeconfig.rendered }" } 2 | -------------------------------------------------------------------------------- /provisioning/gke/modules/cluster/output.tf: -------------------------------------------------------------------------------- 1 | output "fqdn_k8s" { value = "${ google_container_cluster.cncf.endpoint }" } 2 | -------------------------------------------------------------------------------- /Docker/distcc-daemon/config: -------------------------------------------------------------------------------- 1 | STARTDISTCC="true" 2 | ALLOWEDNETS="10.0.0.0/8" 3 | LISTENER="127.0.0.1" 4 | ZEROCONF="false" 5 | -------------------------------------------------------------------------------- /Docker/distcc-master/config: -------------------------------------------------------------------------------- 1 | STARTDISTCC="true" 2 | ALLOWEDNETS="10.0.0.0/8" 3 | LISTENER="127.0.0.1" 4 | ZEROCONF="false" 5 | -------------------------------------------------------------------------------- /provisioning/azure/docs/azure_app_endpoints.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/HEAD/provisioning/azure/docs/azure_app_endpoints.png -------------------------------------------------------------------------------- /provisioning/azure/docs/azure_app_registration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/HEAD/provisioning/azure/docs/azure_app_registration.png -------------------------------------------------------------------------------- /provisioning/azure/docs/key_generation_copy_me.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/HEAD/provisioning/azure/docs/key_generation_copy_me.png -------------------------------------------------------------------------------- /provisioning/azure/docs/guid_from_oauth_endpoint.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/HEAD/provisioning/azure/docs/guid_from_oauth_endpoint.png -------------------------------------------------------------------------------- /provisioning/azure/docs/web_api_application_type.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cncf/demo/HEAD/provisioning/azure/docs/web_api_application_type.png -------------------------------------------------------------------------------- /provisioning/azure/modules/network/input.tf: -------------------------------------------------------------------------------- 1 | variable "vpc_cidr" {} 2 | variable "name" {} 3 | variable "name_servers_file" {} 4 | variable "location" {} 5 | -------------------------------------------------------------------------------- /Docker/cloudbuild.yaml: -------------------------------------------------------------------------------- 1 | steps: 2 | - name: 'gcr.io/cloud-builders/docker' 3 | args: ['build', '-t', 'gcr.io/$PROJECT_ID/builder-test', 'kubectl/Dockerfile'] 4 | -------------------------------------------------------------------------------- /Images/base/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | roles_path = ./roles 3 | host_key_checking = False 4 | 5 | remote_user = root 6 | private_key_file = ~/.ssh/your.key 7 | -------------------------------------------------------------------------------- /Docker/echo/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM zilman/falcon 2 | MAINTAINER Eugene Zilman 3 | 4 | COPY echo.py /app.py 5 | 6 | EXPOSE 8000 7 | CMD ["app:app"] 8 | -------------------------------------------------------------------------------- /Images/golden/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | roles_path = ./roles 3 | host_key_checking = False 4 | 5 | remote_user = root 6 | private_key_file = ~/.ssh/cncf-aws.pem 7 | -------------------------------------------------------------------------------- /provisioning/gke/modules/vpc/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_network" "cncf" { 2 | name = "${ var.name }" 3 | auto_create_subnetworks = "false" 4 | } 5 | -------------------------------------------------------------------------------- /Docker/azure-cli/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM azuresdk/azure-cli-python 2 | MAINTAINER "Hippie Hacker " 3 | COPY entrypoint.sh / 4 | ENTRYPOINT ["/entrypoint.sh"] 5 | CMD ["azure"] -------------------------------------------------------------------------------- /provisioning/aws/modules/bastion/output.tf: -------------------------------------------------------------------------------- 1 | output "depends_id" { value = "${null_resource.dummy_dependency.id}" } 2 | output "ip" { value = "${ aws_instance.bastion.public_ip }" } 3 | -------------------------------------------------------------------------------- /provisioning/gce/modules/vpc/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_network" "cncf" { 2 | name = "${ var.name }" 3 | auto_create_subnetworks = "false" 4 | } 5 | 6 | 7 | -------------------------------------------------------------------------------- /cncfdemo-cli/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Local install for now 3 | 4 | mkvirtualenv cncf 5 | pip install --editable . 6 | 7 | And that's it, `cncfdemo` should be created and added to your path. 8 | -------------------------------------------------------------------------------- /provisioning/azure/modules/bastion/output.tf: -------------------------------------------------------------------------------- 1 | output "bastion_ip" { value = "${azurerm_public_ip.cncf.ip_address}" } 2 | output "bastion_fqdn" { value = "${azurerm_public_ip.cncf.fqdn}" } 3 | -------------------------------------------------------------------------------- /provisioning/gce/modules/bastion/output.tf: -------------------------------------------------------------------------------- 1 | # output "bastion-ip" { value = "${azurerm_public_ip.cncf.ip_address}" } 2 | # output "bastion-fqdn" { value = "${azurerm_public_ip.cncf.fqdn}" } 3 | -------------------------------------------------------------------------------- /provisioning/kubeconfig/input.tf: -------------------------------------------------------------------------------- 1 | variable "admin_key_pem" {} 2 | variable "admin_pem" {} 3 | variable "ca_pem" {} 4 | variable "fqdn_k8s" {} 5 | variable "name" {} 6 | variable "data_dir" {} 7 | -------------------------------------------------------------------------------- /provisioning/aws/modules/dns/input.tf: -------------------------------------------------------------------------------- 1 | variable "internal_tld" {} 2 | variable "name" {} 3 | variable "vpc_id" {} 4 | variable "master_ips" { type = "list"} 5 | variable "master_node_count" {} 6 | 7 | -------------------------------------------------------------------------------- /provisioning/gce/modules/vpc/output.tf: -------------------------------------------------------------------------------- 1 | output "network" { value = "${ google_compute_network.cncf.self_link }" } 2 | output "subnetwork" { value = "${ google_compute_subnetwork.cncf.self_link }" } 3 | -------------------------------------------------------------------------------- /provisioning/aws/modules/worker/output.tf: -------------------------------------------------------------------------------- 1 | output "autoscaling_group_name" { value = "${ aws_autoscaling_group.worker.name }" } 2 | output "depends_id" { value = "${ null_resource.dummy_dependency.id }" } 3 | -------------------------------------------------------------------------------- /provisioning/packet/modules/worker/output.tf: -------------------------------------------------------------------------------- 1 | #output "fqdn_lb" { value = "${azurerm_public_ip.cncf.fqdn}" } 2 | output "public_worker_ips" { value = ["${ packet_device.workers.*.network.0.address }"] } 3 | -------------------------------------------------------------------------------- /Docker/falcon/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM zilman/gunicorn 2 | MAINTAINER Eugene Zilman 3 | 4 | RUN pip install meinheld falcon 5 | 6 | COPY app.py / 7 | 8 | EXPOSE 8000 9 | CMD ["app:app"] 10 | -------------------------------------------------------------------------------- /provisioning/azure/modules/dns/input.tf: -------------------------------------------------------------------------------- 1 | variable "internal_tld" {} 2 | variable "name" {} 3 | variable "name_servers_file" {} 4 | variable "master_ips" { type = "list" } 5 | variable "master_node_count" {} 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | *~ 3 | *.swp 4 | *.swo 5 | *.egg-info/ 6 | *.out 7 | *.zip 8 | *.pyc 9 | *.dist-info/ 10 | data/ 11 | terraform.tfstate 12 | terraform.tfstate.backup 13 | tmp/ 14 | *.env 15 | .terraform/ 16 | -------------------------------------------------------------------------------- /provisioning/gce/modules/dns/input.tf: -------------------------------------------------------------------------------- 1 | variable "name" {} 2 | variable "master_node_count" {} 3 | variable "external_lb" {} 4 | variable "internal_lb" {} 5 | variable "domain" {} 6 | variable "record_ttl" { default = "60" } 7 | -------------------------------------------------------------------------------- /Docker/grafana/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM grafana/grafana:3.1.1 2 | 3 | RUN apt-get update && \ 4 | apt-get install -y curl 5 | 6 | COPY dashboards /dashboards 7 | COPY run.sh /run.sh 8 | 9 | EXPOSE 3000 10 | ENTRYPOINT /run.sh 11 | -------------------------------------------------------------------------------- /provisioning/gce/modules/etcd/output.tf: -------------------------------------------------------------------------------- 1 | output "external_lb" { value = "${ google_compute_forwarding_rule.external.ip_address }" } 2 | 3 | output "internal_lb" { value = "${ google_compute_forwarding_rule.cncf.ip_address }" } 4 | 5 | -------------------------------------------------------------------------------- /Docker/Countly/runit/countly-api.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cp /etc/config/api.js /opt/countly/api/config.js 4 | chown countly:countly /opt/countly/api/config.js 5 | 6 | exec /sbin/setuser countly /usr/bin/nodejs /opt/countly/api/api.js 7 | -------------------------------------------------------------------------------- /Docker/grafana/README.md: -------------------------------------------------------------------------------- 1 | # Roll your own Grafana Dashboards / Influx sink 2 | 3 | Use the webUI to build your dashboards, save the result json files in the directory here and bake the image. 4 | 5 | Note: this is actively worked on, ymmv. 6 | -------------------------------------------------------------------------------- /Docker/Wrk/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM williamyeh/wrk:4.0.2 2 | 3 | MAINTAINER Eugene Zilman 4 | 5 | RUN apk add --update curl --no-cache 6 | 7 | ADD runner.sh /wrk/ 8 | ADD send_summary.lua /wrk/ 9 | 10 | ENTRYPOINT ["/wrk/runner.sh"] 11 | -------------------------------------------------------------------------------- /provisioning/gce/modules/vpc/azure-security.tf: -------------------------------------------------------------------------------- 1 | # resource "azurerm_network_security_group" "cncf" { 2 | # name = "${ var.name }" 3 | # location = "${ var.location }" 4 | # resource_group_name = "${ var.name }" 5 | 6 | # } 7 | -------------------------------------------------------------------------------- /Docker/gunicorn/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2.7 2 | MAINTAINER Eugene Zilman 3 | 4 | RUN pip install gunicorn 5 | 6 | COPY gunicorn_conf.py / 7 | 8 | ENTRYPOINT ["/usr/local/bin/gunicorn", "--config", "/gunicorn_conf.py"] 9 | 10 | 11 | -------------------------------------------------------------------------------- /provisioning/aws/modules/bastion/input.tf: -------------------------------------------------------------------------------- 1 | variable "ami_id" {} 2 | variable "instance_type" {} 3 | variable "internal_tld" {} 4 | variable "key_name" {} 5 | variable "name" {} 6 | variable "security_group_id" {} 7 | variable "subnet_ids" {} 8 | variable "vpc_id" {} 9 | -------------------------------------------------------------------------------- /Docker/distcc-daemon/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM zilman/kernel 2 | MAINTAINER Eugene Zilman 3 | 4 | RUN apt-get install -y distcc distcc-pump 5 | 6 | COPY config /etc/default/distcc 7 | COPY runner.sh /runner.sh 8 | 9 | ENTRYPOINT ["/runner.sh"] 10 | 11 | -------------------------------------------------------------------------------- /Docker/distcc-master/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM zilman/kernel 2 | MAINTAINER Eugene Zilman 3 | 4 | RUN apt-get install -y distcc distcc-pump 5 | 6 | COPY config /etc/default/distcc 7 | COPY runner.sh /runner.sh 8 | 9 | ENTRYPOINT ["/runner.sh"] 10 | 11 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/distcc/distcc-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: distcc 6 | name: distcc 7 | spec: 8 | selector: 9 | app: distcc 10 | clusterIP: None 11 | ports: 12 | - port: 3632 13 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/bootstrap.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import click 4 | from aws.cli import aws 5 | 6 | @click.group() 7 | def cli(): 8 | pass 9 | 10 | 11 | cli.add_command(aws) 12 | 13 | 14 | if __name__ == '__main__': 15 | cli() 16 | -------------------------------------------------------------------------------- /provisioning/gce/gce.tf: -------------------------------------------------------------------------------- 1 | # Configure the Microsoft Azure Provider 2 | provider "google" { 3 | #credentials = "${file("gce.json")}" 4 | project = "${ var.project }" 5 | region = "${ var.region }" 6 | } 7 | 8 | provider "dnsimple" { 9 | } 10 | 11 | 12 | -------------------------------------------------------------------------------- /Docker/boinc/runner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -ex 3 | 4 | /sbin/setuser boinc /var/lib/boinc-client/attach.sh & 5 | 6 | exec /sbin/setuser boinc /usr/bin/boinc --exit_after_finish --fetch_minimal_work --exit_when_idle --abort_jobs_on_exit --no_gpus >>/var/lib/boinc-client/log 2>&1 7 | -------------------------------------------------------------------------------- /provisioning/gke/modules/vpc/gce-subnet.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_subnetwork" "cncf" { 2 | name = "${ var.name }" 3 | ip_cidr_range = "${ var.cidr }" 4 | network = "${ google_compute_network.cncf.self_link }" 5 | region = "${ var.region }" 6 | } 7 | -------------------------------------------------------------------------------- /provisioning/azure/modules/etcd/output.tf: -------------------------------------------------------------------------------- 1 | output "external_lb" { value = "${azurerm_lb_backend_address_pool.cncf.id }" } 2 | output "fqdn_lb" { value = "${azurerm_public_ip.cncf.fqdn}" } 3 | output "master_ips" { value = ["${ azurerm_network_interface.cncf.*.private_ip_address }"] } 4 | -------------------------------------------------------------------------------- /provisioning/gce/modules/etcd/external_lb.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_forwarding_rule" "external" { 2 | name = "external${ var.name }" 3 | target = "${google_compute_target_pool.cncf.self_link}" 4 | port_range = "443" 5 | load_balancing_scheme = "EXTERNAL" 6 | } 7 | -------------------------------------------------------------------------------- /provisioning/aws/modules/dns/output.tf: -------------------------------------------------------------------------------- 1 | output "depends_id" { value = "${null_resource.dummy_dependency.id}" } 2 | output "internal_name_servers" { value = "${ aws_route53_zone.internal.name_servers }" } 3 | output "internal_zone_id" { value = "${ aws_route53_zone.internal.zone_id }" } 4 | -------------------------------------------------------------------------------- /Docker/Kernel/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:wheezy 2 | 3 | ENV DEBIAN_FRONTEND noninteractive 4 | 5 | RUN apt-get -y update && apt-get -y install openssh-client coreutils fakeroot build-essential kernel-package wget xz-utils gnupg bc devscripts apt-utils initramfs-tools aria2 curl && apt-get clean 6 | -------------------------------------------------------------------------------- /Docker/Countly/runit/countly-dashboard.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cp /etc/config/frontend.js /opt/countly/frontend/express/config.js 4 | chown -R countly:countly /opt/countly/frontend/express/config.js 5 | 6 | exec /sbin/setuser countly /usr/bin/nodejs /opt/countly/frontend/express/app.js 7 | -------------------------------------------------------------------------------- /provisioning/aws/modules/bastion/user-data.yml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | --- 4 | coreos: 5 | update: 6 | reboot-strategy: etcd-lock 7 | 8 | etcd2: 9 | discovery-srv: ${ internal_tld } 10 | proxy: on 11 | 12 | units: 13 | - name: etcd2.service 14 | command: start 15 | -------------------------------------------------------------------------------- /provisioning/gce/modules/bastion/user-data.yml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | --- 4 | coreos: 5 | update: 6 | reboot-strategy: etcd-lock 7 | 8 | etcd2: 9 | discovery-srv: ${ internal_tld } 10 | proxy: on 11 | 12 | units: 13 | - name: etcd2.service 14 | command: start 15 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/echo/echo-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: echo 5 | namespace: default 6 | labels: 7 | k8s-app: echo 8 | spec: 9 | selector: 10 | k8s-app: echo 11 | ports: 12 | - name: dns 13 | port: 8000 14 | protocol: TCP 15 | -------------------------------------------------------------------------------- /Web/results/alpha.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | aws s3 sync . s3://alpha.cncfdemo.io --region us-west-2 --delete --exclude "search*" --exclude ".*" --exclude "*.sh" && \ 4 | aws s3 sync . s3://alpha.cncfdemo.io --region us-west-2 --delete --exclude "*" --include "search" --no-guess-mime-type --content-type text/html 5 | -------------------------------------------------------------------------------- /Web/results/release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | aws s3 sync . s3://beta.cncfdemo.io --region us-west-2 --delete --exclude "search*" --exclude ".*" --exclude "*.sh" && \ 4 | aws s3 sync . s3://beta.cncfdemo.io --region us-west-2 --delete --exclude "*" --include "search" --no-guess-mime-type --content-type text/html 5 | -------------------------------------------------------------------------------- /Docker/Countly/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM countly/countly-server:16.06 2 | 3 | MAINTAINER Eugene Zilman 4 | 5 | # Add custom Countly configs - these in turn come from k8s volume 6 | ADD ./runit/countly-api.sh /etc/service/countly-api/run 7 | ADD ./runit/countly-dashboard.sh /etc/service/countly-dashboard/run 8 | 9 | -------------------------------------------------------------------------------- /Docker/boinc/attach.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | while true; do 4 | 5 | if grep -q "Initialization complete" /var/lib/boinc-client/log; then 6 | exec boinccmd --project_attach http://www.worldcommunitygrid.org 1013367_21303863232c651457665d59cf936248 & 7 | break 8 | else 9 | sleep 2 10 | fi 11 | 12 | done 13 | -------------------------------------------------------------------------------- /provisioning/aws/aws.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { } 2 | provider "gzip" {compressionlevel = "BestCompression"} 3 | 4 | # configured via: 5 | # $ export AWS_ACCESS_KEY_ID="anaccesskey" 6 | # $ export AWS_SECRET_ACCESS_KEY="asecretkey" 7 | # $ export AWS_DEFAULT_REGION="us-west-2" 8 | # https://www.terraform.io/docs/providers/aws/#environment-variables 9 | -------------------------------------------------------------------------------- /provisioning/packet/modules/dns/input.tf: -------------------------------------------------------------------------------- 1 | variable "name" {} 2 | variable "master_ips" { type = "list" } 3 | variable "public_master_ips" { type = "list" } 4 | variable "public_worker_ips" { type = "list" } 5 | variable "master_node_count" {} 6 | variable "worker_node_count" {} 7 | variable "domain" {} 8 | variable "record_ttl" { default = "60" } 9 | -------------------------------------------------------------------------------- /provisioning/azure/modules/dns/output.tf: -------------------------------------------------------------------------------- 1 | # output "depends_id" { value = "${null_resource.dummy_dependency.id}" } 2 | output "internal_name_servers" { value = "${ azurerm_dns_zone.cncf.name_servers }" } 3 | output "internal_zone_id" { value = "${ azurerm_dns_zone.cncf.zone_id }" } 4 | output "name_servers_file" { value = "${ var.name_servers_file }" } 5 | -------------------------------------------------------------------------------- /provisioning/gke/modules/cluster/node-pool.tf: -------------------------------------------------------------------------------- 1 | resource "google_container_node_pool" "cncf" { 2 | name = "${ var.name }" 3 | project = "${ var.project }" 4 | zone = "${ var.zone }" 5 | cluster = "${google_container_cluster.cncf.name}" 6 | initial_node_count = "${ var.node_pool_count }" 7 | } 8 | -------------------------------------------------------------------------------- /provisioning/aws/modules/etcd/output.tf: -------------------------------------------------------------------------------- 1 | 2 | #output "depends_id" { value = "${ null_resource.dummy_dependency.id }" } 3 | output "external_elb" { value = "${ aws_elb.external.dns_name }" } 4 | output "internal_ips" { value = "${ join(",", aws_instance.etcd.*.public_ip) }" } 5 | 6 | output "master_ips" { value = ["${ aws_instance.etcd.*.private_ip }"] } 7 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import click 4 | from aws.cli import aws 5 | 6 | 7 | @click.group() 8 | def cli(): 9 | pass 10 | 11 | 12 | @click.group() 13 | def bootstrap(): 14 | pass 15 | 16 | 17 | cli.add_command(bootstrap) 18 | bootstrap.add_command(aws) 19 | 20 | 21 | if __name__ == '__main__': 22 | cli() 23 | -------------------------------------------------------------------------------- /Docker/distcc/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stable 2 | MAINTAINER Eugene Zilman 3 | 4 | RUN apt-get clean && apt update && apt install -y 5 | 6 | RUN apt install -y kernel-package 7 | RUN apt install -y git build-essential 8 | RUN apt install -y distcc distcc-pump 9 | 10 | COPY config /etc/default/distcc 11 | COPY runner.sh /runner.sh 12 | 13 | ENTRYPOINT ["/runner.sh"] 14 | 15 | -------------------------------------------------------------------------------- /Images/base/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: yes 4 | 5 | tasks: 6 | - selinux: state=disabled 7 | - copy: content="overlay" dest=/etc/modules-load.d/overlay.conf 8 | - copy: 9 | content: | 10 | net.bridge.bridge-nf-call-ip6tables = 1 11 | net.bridge.bridge-nf-call-iptables = 1 12 | dest: /usr/lib/sysctl.d/90-system.conf 13 | 14 | - yum: name=* state=latest 15 | -------------------------------------------------------------------------------- /provisioning/gke/modules/cluster/input.tf: -------------------------------------------------------------------------------- 1 | variable "name" {} 2 | variable "region" {} 3 | variable "zone" {} 4 | variable "project" {} 5 | variable "node_count" {} 6 | variable "network" {} 7 | variable "subnetwork" {} 8 | variable "node_version" {} 9 | variable "master_user" {} 10 | variable "master_password" {} 11 | variable "node_pool_count" {} 12 | variable "vm_size" {} 13 | variable "data_dir" {} 14 | 15 | -------------------------------------------------------------------------------- /provisioning/gce/modules/etcd/internal_lb.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_forwarding_rule" "cncf" { 2 | name = "${ var.name }" 3 | load_balancing_scheme = "INTERNAL" 4 | region = "${ var.region }" 5 | ports = ["8080", "443"] 6 | network = "${ var.network }" 7 | subnetwork = "${ var.subnetwork }" 8 | backend_service = "${ google_compute_region_backend_service.cncf.self_link }" 9 | } 10 | -------------------------------------------------------------------------------- /Demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: clusterstart 5 | spec: 6 | activeDeadlineSeconds: 180 7 | template: 8 | metadata: 9 | name: clusterstart 10 | spec: 11 | containers: 12 | - name: clusterstart 13 | image: zilman/cncf-demo 14 | command: ["kubectl"] 15 | args: ["create", "-f", "Demo", "--recursive"] 16 | restartPolicy: Never 17 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/cncf.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import click 4 | 5 | from cncfdemo.bootstrap.main import bootstrap 6 | from cncfdemo.kubectl.cmd_create import create 7 | 8 | @click.group() 9 | def cli(): 10 | """Welcome to the Cloud Native Computing Foundation Demo""" 11 | pass 12 | 13 | 14 | cli.add_command(bootstrap) 15 | cli.add_command(create) 16 | 17 | 18 | if __name__ == '__main__': 19 | cli() 20 | -------------------------------------------------------------------------------- /ClusterStart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: clusterstart 5 | spec: 6 | activeDeadlineSeconds: 180 7 | template: 8 | metadata: 9 | name: clusterstart 10 | spec: 11 | containers: 12 | - name: clusterstart 13 | image: zilman/kube-addons 14 | command: ["kubectl"] 15 | args: ["create", "-f", "AddOns", "--recursive"] 16 | restartPolicy: Never 17 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/distcc/distcc-ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: DaemonSet 3 | metadata: 4 | name: distcc 5 | spec: 6 | template: 7 | metadata: 8 | labels: 9 | app: distcc 10 | name: distcc 11 | spec: 12 | containers: 13 | - image: zilman/distcc-daemon 14 | name: distcc 15 | ports: 16 | - containerPort: 3632 17 | hostPort: 3632 18 | -------------------------------------------------------------------------------- /provisioning/aws/modules/security/io.tf: -------------------------------------------------------------------------------- 1 | variable "allow_ssh_cidr" {} 2 | variable "vpc_cidr" {} 3 | variable "name" {} 4 | variable "vpc_id" {} 5 | 6 | output "bastion_id" { value = "${ aws_security_group.bastion.id }" } 7 | output "etcd_id" { value = "${ aws_security_group.etcd.id }" } 8 | output "external_elb_id" { value = "${ aws_security_group.external_elb.id }" } 9 | output "worker_id" { value = "${ aws_security_group.worker.id }" } 10 | -------------------------------------------------------------------------------- /Docker/kubectl/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | 3 | ENV KUBE_LATEST_VERSION="v1.4.4" 4 | 5 | RUN apk add --update ca-certificates \ 6 | && apk add --update -t deps curl \ 7 | && curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl \ 8 | && chmod +x /usr/local/bin/kubectl \ 9 | && apk del --purge deps \ 10 | && rm /var/cache/apk/* 11 | 12 | ENTRYPOINT /usr/bin/tail -f /dev/null 13 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/Countly/configMaps/countly/frontend.js: -------------------------------------------------------------------------------- 1 | var countlyConfig = { 2 | 3 | mongodb: { 4 | host: "mongos.default", 5 | db: "countly", 6 | port: 27017, 7 | max_pool_size: 10, 8 | }, 9 | 10 | web: { 11 | port: 6001, 12 | host: "localhost", 13 | use_intercom: true 14 | }, 15 | 16 | path: "", 17 | cdn: "" 18 | 19 | }; 20 | 21 | module.exports = countlyConfig; 22 | -------------------------------------------------------------------------------- /provisioning/aws/modules/vpc/output.tf: -------------------------------------------------------------------------------- 1 | output "depends_id" { value = "${null_resource.dummy_dependency.id}" } 2 | output "id" { value = "${ aws_vpc.main.id }" } 3 | output "subnet_ids_private" { value = "${ join(",", aws_subnet.private.*.id) }" } 4 | output "subnet_ids_public" { value = "${ join(",", aws_subnet.public.*.id) }" } 5 | 6 | #output "gateway_id" { value = "${ aws_internet_gateway.main.id }" } 7 | #output "route_table_id" { value = "${ aws_route_table.private.id }" } 8 | -------------------------------------------------------------------------------- /Web/api/_tests/new/event.json: -------------------------------------------------------------------------------- 1 | { 2 | "body": { 3 | "Metadata": { 4 | "Masters": { 5 | "size": 1, 6 | "type": "m3.medium" 7 | }, 8 | "Minions": { 9 | "size": 3, 10 | "type": "m4.large" 11 | }, 12 | "Provider": "AWS", 13 | "RAM": "24GiB", 14 | "Storage": "250GB", 15 | "vcpu": 6 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /provisioning/aws/cleanup.tf: -------------------------------------------------------------------------------- 1 | # Clean-up Destroy 2 | resource "null_resource" "cleanup" { 3 | 4 | provisioner "local-exec" { 5 | when = "destroy" 6 | on_failure = "continue" 7 | command = < ${ var.etcd_discovery } 8 | EOF 9 | } 10 | 11 | provisioner "local-exec" { 12 | when = "destroy" 13 | on_failure = "continue" 14 | command = < ${ var.etcd_discovery } 7 | EOF 8 | } 9 | 10 | provisioner "local-exec" { 11 | when = "destroy" 12 | on_failure = "continue" 13 | command = < packer build packer.json 10 | 11 | ## Dependencies 12 | 13 | - Packer 0.11+ 14 | - Ansible 2.1+ installed ([installation instructions] (http://docs.ansible.com/ansible/intro_installation.html)) 15 | -------------------------------------------------------------------------------- /Web/api/summary.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: cncfdemo-summary 3 | environments: 4 | dev: 5 | profile: default 6 | region: us-west-2 7 | policy: 8 | resources: 9 | - arn: arn:aws:s3:::* 10 | actions: 11 | - "*" 12 | - arn: arn:aws:logs:*:*:* 13 | actions: 14 | - "*" 15 | 16 | lambda: 17 | description: cncfdemo summary view 18 | handler: summary.handler 19 | runtime: python2.7 20 | memory_size: 256 21 | timeout: 3 22 | 23 | -------------------------------------------------------------------------------- /Web/results/summary.json.bak: -------------------------------------------------------------------------------- 1 | { 2 | "Results": [{ 3 | "id": "joOKmJg", 4 | "timestart": 323232, 5 | "timeend": 323232, 6 | "Provider": "AWS", 7 | "CPU": 7, 8 | "Memory": 24, 9 | "DistCC": 1061, 10 | "HTTP_Requests": "2,432,234", 11 | "Boinc_Jobs": 0 12 | }, 13 | { 14 | "Provider": "AWS", 15 | "CPU": 6, 16 | "Memory": 24, 17 | "DistCC": 1299, 18 | "HTTP_Requests": "2,129,533", 19 | "Boinc_Jobs": 0 20 | 21 | }] 22 | 23 | 24 | } 25 | -------------------------------------------------------------------------------- /provisioning/azure/runme: -------------------------------------------------------------------------------- 1 | rm -rf /cncf/data/.ssh/ 2 | rm -rf /cncf/data/.cfssl/ 3 | rm -rf /cncf/data/azure-config.json 4 | rm -rf /build/azure/terraform.tfstate* 5 | rm -rf /build/azure/azure_dns* 6 | terraform get 7 | terraform apply -target null_resource.sshkey_gen 8 | terraform apply -target null_resource.ssl_gen 9 | terraform apply -target null_resource.cloud_gen 10 | terraform apply -target module.dns.null_resource.dns_gen 11 | terraform apply -target module.etcd.azurerm_network_interface.cncf 12 | time terraform apply -------------------------------------------------------------------------------- /provisioning/gce/runme: -------------------------------------------------------------------------------- 1 | rm -rf /cncf/data/.ssh/ 2 | rm -rf /cncf/data/.cfssl/ 3 | rm -rf /cncf/data/azure-config.json 4 | rm -rf /build/azure/terraform.tfstate* 5 | rm -rf /build/azure/azure_dns* 6 | terraform get 7 | terraform apply -target null_resource.sshkey_gen 8 | terraform apply -target null_resource.ssl_gen 9 | terraform apply -target null_resource.cloud_gen 10 | terraform apply -target module.dns.null_resource.dns_gen 11 | terraform apply -target module.etcd.azurerm_network_interface.cncf 12 | time terraform apply -------------------------------------------------------------------------------- /Docker/falcon/app.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import json 4 | import falcon 5 | 6 | 7 | class Hello(object): 8 | def on_get(self, request, response): 9 | response.set_header('Content-Type', 'text/plain') 10 | response.body = b'Hello, world!' 11 | 12 | 13 | app = falcon.API() 14 | app.add_route("/", Hello()) 15 | 16 | 17 | if __name__ == "__main__": 18 | from wsgiref import simple_server 19 | 20 | httpd = simple_server.make_server('localhost', 8080, app) 21 | httpd.serve_forever() 22 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/runner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cncfdemo create configmap demo --from-file . --recursive 4 | cncfdemo create -f . --recursive 5 | 6 | # kubectl get pods 7 | # kubectl logs -f mongors1-setup-k8cxn 8 | # kubectl logs -f mongors2-setup-wje6o 9 | # kubectl logs -f mongocfg-setup-tewjt 10 | # these can really only be scripted by listening for events from the api on a background thread 11 | 12 | # Optional step: python Utils/AWS/route53.py -elb $(./Utils/get_ingress.sh countly) -domain countly.cncfdemo.io 13 | -------------------------------------------------------------------------------- /provisioning/aws/modules/vpc/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "aws_vpc" "main" { 2 | cidr_block = "${ var.cidr }" 3 | 4 | enable_dns_hostnames = true 5 | enable_dns_support = true 6 | 7 | tags { 8 | builtWith = "terraform" 9 | KubernetesCluster = "${ var.name }" 10 | kz8s = "${ var.name }" 11 | Name = "kz8s-${ var.name }" 12 | visibility = "private,public" 13 | } 14 | } 15 | 16 | resource "null_resource" "dummy_dependency" { 17 | depends_on = [ 18 | "aws_vpc.main", 19 | "aws_nat_gateway.nat" 20 | ] 21 | } 22 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/Countly/configMaps/countly/api.js: -------------------------------------------------------------------------------- 1 | var countlyConfig = { 2 | 3 | mongodb: { 4 | host: "mongos.default", 5 | db: "countly", 6 | port: 27017, 7 | max_pool_size: 500, 8 | }, 9 | 10 | api: { 11 | port: 3001, 12 | host: "localhost", 13 | max_sockets: 1024 14 | }, 15 | 16 | path: "", 17 | logging: { 18 | info: ["jobs", "push"], 19 | default: "warn" 20 | } 21 | 22 | }; 23 | 24 | module.exports = countlyConfig; 25 | -------------------------------------------------------------------------------- /provisioning/aws/keypair.tf: -------------------------------------------------------------------------------- 1 | # Add AWS Keypair 2 | resource "null_resource" "aws_keypair" { 3 | provisioner "local-exec" { 4 | command = < ${ var.data_dir }/${ var.aws_key_name }.pem 10 | chmod 400 ${ var.data_dir }/${ var.aws_key_name }.pem 11 | EOF 12 | } 13 | } 14 | 15 | resource "null_resource" "dummy_dependency2" { 16 | depends_on = [ "null_resource.aws_keypair" ] 17 | } 18 | -------------------------------------------------------------------------------- /provisioning/aws/modules/iam/io.tf: -------------------------------------------------------------------------------- 1 | # variable "s3_bucket" {} 2 | # variable "depends_id" {} 3 | variable "name" {} 4 | 5 | output "depends_id" { value = "${ null_resource.dummy_dependency.id }" } 6 | output "aws-iam-role-etcd-id" { value = "${ aws_iam_role.master.id }" } 7 | output "aws-iam-role-worker-id" { value = "${ aws_iam_role.worker.id }" } 8 | output "instance_profile_name_master" { value = "${ aws_iam_instance_profile.master.name }" } 9 | output "instance_profile_name_worker" { value = "${ aws_iam_instance_profile.worker.name }" } 10 | -------------------------------------------------------------------------------- /provisioning/packet/modules/etcd/output.tf: -------------------------------------------------------------------------------- 1 | #output "fqdn_lb" { value = "${azurerm_public_ip.cncf.fqdn}" } 2 | output "first_master_ip" { value = "${ packet_device.masters.0.network.0.address }" } 3 | output "second_master_ip" { value = "${ packet_device.masters.1.network.0.address }" } 4 | output "third_master_ip" { value = "${ packet_device.masters.2.network.0.address }" } 5 | output "master_ips" { value = ["${ packet_device.masters.*.network.2.address }"] } 6 | output "public_master_ips" { value = ["${ packet_device.masters.*.network.0.address }"] } 7 | -------------------------------------------------------------------------------- /provisioning/gce/keypair.tf: -------------------------------------------------------------------------------- 1 | #Create SSH Keypair 2 | resource "null_resource" "sshkey_gen" { 3 | 4 | provisioner "local-exec" { 5 | command = < 3 | WORKDIR /home/fluent 4 | ENV PATH /home/fluent/.gem/ruby/2.3.0/bin:$PATH 5 | 6 | USER root 7 | RUN apk --no-cache add sudo build-base ruby-dev && \ 8 | sudo -u fluent gem install fluent-plugin-secure-forward fluent-plugin-s3 && \ 9 | rm -rf /home/fluent/.gem/ruby/2.3.0/cache/*.gem && sudo -u fluent gem sources -c && \ 10 | apk del sudo build-base ruby-dev 11 | 12 | EXPOSE 24284 13 | 14 | USER fluent 15 | CMD exec fluentd -c /fluentd/etc/$FLUENTD_CONF -p /fluentd/plugins $FLUENTD_OPT 16 | -------------------------------------------------------------------------------- /Images/base/base.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | region='us-west-2' 4 | productCode='aw0evgkw8e5c1q413zgy5pjce' 5 | 6 | AMI=$(aws --region $region ec2 describe-images --owners aws-marketplace --filters Name=product-code,Values=$productCode --query 'Images | [-1] | ImageId' --out text) 7 | 8 | echo $AMI 9 | 10 | # This is a convenience script to grab the latest CentOS7 AMI id. 11 | # A soon to be released evrsion of Packer has a 'dynamic source AMI' feature 12 | # so once can specifiy the latest image right in the packer template. 13 | 14 | # Otherwise the output of this script would have to be injected into the packer template. 15 | -------------------------------------------------------------------------------- /provisioning/azure/output.tf: -------------------------------------------------------------------------------- 1 | output "fqdn_k8s" { value = "${ module.etcd.fqdn_lb}" } 2 | output "bastion_ip" { value = "${ module.bastion.bastion_ip}" } 3 | output "bastion_fqdn" { value = "${ module.bastion.bastion_fqdn}" } 4 | output "k8s_admin" { value = "${ k8s_admin}"} 5 | # fixme for use outside container 6 | output "ssh_key_setup" { value = "eval $(ssh-agent) ; ssh-add ${ var.data_dir }/.ssh/id_rsa"} 7 | output "ssh_via_bastion" { value = "ssh -At ${ var.admin_username }@${ module.bastion.bastion_fqdn } ssh ${ var.admin_username }@etcd1.${ var.internal_tld }"} 8 | output "kubeconfig" { value = "${ module.kubeconfig.kubeconfig }"} 9 | -------------------------------------------------------------------------------- /Docker/gunicorn/gunicorn_conf.py: -------------------------------------------------------------------------------- 1 | import multiprocessing 2 | import os 3 | import sys 4 | 5 | # Sane Defaults 6 | 7 | workers = multiprocessing.cpu_count() 8 | bind = '0.0.0.0:8080' 9 | keepalive = 120 10 | errorlog = '-' 11 | pidfile = 'gunicorn.pid' 12 | worker_class = "meinheld.gmeinheld.MeinheldWorker" 13 | 14 | def post_fork(server, worker): 15 | # Disalbe access log 16 | import meinheld.server 17 | meinheld.server.set_access_logger(None) 18 | 19 | # Override from ENV 20 | for k,v in os.environ.items(): 21 | if k.startswith("GUNICORN_"): 22 | key = k.split('_', 1)[1].lower() 23 | locals()[key] = v 24 | 25 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/distcc/README.md: -------------------------------------------------------------------------------- 1 | ### Usage 2 | 3 | runner.sh should self configure as master or slave and do approximately: 4 | 5 | ``` 6 | git clone --depth 1 git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git 7 | cd linux-stable && make defconfig 8 | 9 | export DISTCC_HOSTS=$(getent hosts distcc | awk '{ printf "%s,cpp,lzo ", $1 }') 10 | #distcc --show-hosts 11 | eval $(distcc-pump --startup) 12 | export PATH=/usr/lib/distcc:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 13 | 14 | fakeroot make-kpkg --initrd --append-to-version=testbuild --revision=0.1 kernel_image 15 | ``` 16 | -------------------------------------------------------------------------------- /Docker/boinc/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM phusion/baseimage:0.9.19 2 | 3 | MAINTAINER Eugene Zilman 4 | 5 | RUN apt update -y && \ 6 | apt install -y boinc-client && \ 7 | apt-get clean && \ 8 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 9 | 10 | RUN mkdir -p /var/lib/boinc-client/projects/www.worldcommunitygrid.org && \ 11 | mkdir -p /var/lib/boinc-client/slots && \ 12 | chown -R boinc:boinc /var/lib/boinc-client 13 | 14 | ADD runner.sh /var/lib/boinc-client 15 | ADD attach.sh /var/lib/boinc-client 16 | 17 | WORKDIR /var/lib/boinc-client 18 | 19 | ENTRYPOINT ["/var/lib/boinc-client/runner.sh"] 20 | -------------------------------------------------------------------------------- /Docker/distcc-master/runner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -ex 4 | 5 | /etc/init.d/distcc start 6 | 7 | git clone --depth 1 git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git 8 | cd linux-stable && make defconfig 9 | 10 | export DISTCC_HOSTS="$(getent hosts distcc | awk '{ printf "%s,cpp,lzo ", $1 }')" 11 | export N_JOBS="$(echo $(getent hosts distcc | wc -l)+2 | bc)" 12 | 13 | distcc --show-hosts 14 | 15 | eval $(distcc-pump --startup) 16 | export PATH=/usr/lib/distcc:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 17 | 18 | DISTCC_VERBOSE=1 make -j$N_JOBS 2>&1 | tee build.log 19 | 20 | sleep infinity 21 | -------------------------------------------------------------------------------- /Demo/Manifests/distcc-daemon.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: distcc 6 | name: distcc 7 | spec: 8 | selector: 9 | app: distcc 10 | clusterIP: None 11 | ports: 12 | - port: 3632 13 | 14 | --- 15 | 16 | apiVersion: extensions/v1beta1 17 | kind: DaemonSet 18 | metadata: 19 | name: distcc 20 | spec: 21 | template: 22 | metadata: 23 | labels: 24 | app: distcc 25 | name: distcc 26 | spec: 27 | containers: 28 | - image: zilman/distcc-daemon 29 | name: distcc 30 | ports: 31 | - containerPort: 3632 32 | hostPort: 3632 33 | -------------------------------------------------------------------------------- /provisioning/cross-cloud/output.tf: -------------------------------------------------------------------------------- 1 | # outputs 2 | output "aws_external_elb" { value = "${ module.aws.external_elb }" } 3 | output "aws_internal_tld" { value = "${ module.aws.internal_tld }" } 4 | # standardizing key locations would be nice 5 | output "ssh_key_setup" { value = "eval $(ssh-agent) ; ssh-add ${ var.data_dir }/*/*.pem ; ssh-add ${ var.data_dir}/*/.ssh/id_rsa" } 6 | output "aws_bastion" { value = "${ module.aws.ssh_via_bastion }" } 7 | output "azure_k8s_fqdn" { value = "${ module.azure.fqdn_k8s }" } 8 | output "azure_bastion" { value = "${ module.azure.ssh_via_bastion }" } 9 | output "kubeconfig" { value = "${ data.template_file.kubeconfig.rendered }" } 10 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/Deployment/echo/echo-rc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: echo-v1 5 | labels: 6 | k8s-app: echo 7 | spec: 8 | replicas: 2 9 | selector: 10 | k8s-app: echo 11 | template: 12 | metadata: 13 | labels: 14 | k8s-app: echo 15 | spec: 16 | nodeSelector: 17 | echo: "yes" 18 | containers: 19 | - name: echo 20 | image: zilman/echo:latest 21 | env: 22 | - name: GUNICORN_BIND 23 | value: 0.0.0.0:8000 24 | ports: 25 | - containerPort: 8000 26 | name: gunicorn-local 27 | protocol: TCP 28 | -------------------------------------------------------------------------------- /provisioning/aws/wait-for-cluster: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | ELB=$(terraform output external_elb) 4 | 5 | _retry() { 6 | [ -z "${2}" ] && return 1 7 | echo -n ${1} 8 | until printf "." && "${@:2}" &>/dev/null; do sleep 5.2; done; echo "✓" 9 | } 10 | 11 | echo "❤ Polling for cluster life - this could take a minute or more" 12 | 13 | _retry "❤ Waiting for DNS to resolve for ${ELB}" ping -c1 "${ELB}" 14 | _retry "❤ Curling apiserver external elb" curl --insecure --silent "https://${ELB}" 15 | _retry "❤ Trying to connect to cluster with kubectl" kubectl cluster-info 16 | 17 | kubectl cluster-info 18 | sleep 2 # FIXME: Maybe API was up, but scheduling wasn't quite up? 19 | -------------------------------------------------------------------------------- /provisioning/gce/cert.tf: -------------------------------------------------------------------------------- 1 | #Gen Certs 2 | resource "null_resource" "ssl_gen" { 3 | 4 | provisioner "local-exec" { 5 | command = </dev/null; do sleep 5.2; done; echo "✓" 9 | } 10 | 11 | echo "❤ Polling for cluster life - this could take a minute or more" 12 | 13 | _retry "❤ Waiting for DNS to resolve for ${ELB}" getent hosts "${ELB}" 14 | _retry "❤ Curling apiserver external elb" curl --insecure --silent "https://${ELB}" 15 | _retry "❤ Trying to connect to cluster with kubectl" kubectl cluster-info 16 | 17 | kubectl cluster-info 18 | sleep 2 # FIXME: Maybe API was up, but scheduling wasn't quite up? 19 | -------------------------------------------------------------------------------- /provisioning/azure/wait-for-cluster: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | ELB=$(terraform output fqdn_k8s) 4 | 5 | _retry() { 6 | [ -z "${2}" ] && return 1 7 | echo -n ${1} 8 | until printf "." && "${@:2}" &>/dev/null; do sleep 5.2; done; echo "✓" 9 | } 10 | 11 | echo "❤ Polling for cluster life - this could take a minute or more" 12 | 13 | _retry "❤ Waiting for DNS to resolve for ${ELB}" getent hosts "${ELB}" 14 | _retry "❤ Curling apiserver external elb" curl --insecure --silent "https://${ELB}" 15 | _retry "❤ Trying to connect to cluster with kubectl" kubectl cluster-info 16 | 17 | kubectl cluster-info 18 | sleep 2 # FIXME: Maybe API was up, but scheduling wasn't quite up? 19 | -------------------------------------------------------------------------------- /provisioning/packet/modules/etcd/kube-scheduler.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-scheduler 5 | namespace: kube-system 6 | spec: 7 | hostNetwork: true 8 | containers: 9 | - name: kube-scheduler 10 | image: ${ kubelet_image_url }:${ kubelet_image_tag } 11 | command: 12 | - /hyperkube 13 | - scheduler 14 | - --leader-elect=true 15 | - --master=http://127.0.0.1:8080 16 | resources: 17 | requests: 18 | cpu: 100m 19 | livenessProbe: 20 | httpGet: 21 | host: 127.0.0.1 22 | path: /healthz 23 | port: 10251 24 | initialDelaySeconds: 15 25 | timeoutSeconds: 1 26 | -------------------------------------------------------------------------------- /Docker/boinc/README.md: -------------------------------------------------------------------------------- 1 | 2 | boinccmd --lookup_account http://www.worldcommunitygrid.org zilman zombocom 3 | status: Success 4 | poll status: operation in progress 5 | account key: d2804d9d05efdad427b69bc020d5492f 6 | 7 | pkill boinc 8 | 9 | /var/lib/boinc-client 10 | boinc & 11 | boinccmd --project_attach http://www.worldcommunitygrid.org d2804d9d05efdad427b69bc020d5492f 12 | 13 | weak account key is "better": 14 | 15 | boinccmd --project_attach http://www.worldcommunitygrid.org 1013367_21303863232c651457665d59cf936248 16 | 17 | /usr/bin/boinc --skip_cpu_benchmarks --exit_after_finish --fetch_minimal_work --exit_when_idle --abort_jobs_on_exit --no_gpus 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /Docker/Wrk/runner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -ex 4 | 5 | if [ -z "$URL" ]; then echo "URL Required" && exit 1; fi 6 | 7 | # Wrk Defaults 8 | SCRIPT=${SCRIPT-/wrk/send_summary.lua} 9 | DURATION=${DURATION-5} 10 | CONNECTIONS=${CONNECTIONS-5} 11 | THREADS=${THREADS-2} 12 | TIMEOUT=${TIMEOUT-3} 13 | 14 | # Global Defaults 15 | export hostIP=$(curl -m1 -s http://169.254.169.254/latest/meta-data/local-ipv4) 16 | export podID=$HOSTNAME 17 | 18 | hostIP=${hostIP:=127.0.0.1} 19 | 20 | export PUSHGATEWAY_SERVICE_PORT=${PUSHGATEWAY_SERVICE_PORT:=9091} 21 | export PUSHGATEWAY=${PUSHGATEWAY-pushgateway} 22 | 23 | wrk -s $SCRIPT -d$DURATION -c$CONNECTIONS -t$THREADS --timeout $TIMEOUT $URL 24 | -------------------------------------------------------------------------------- /provisioning/gce/modules/bastion/input.tf: -------------------------------------------------------------------------------- 1 | variable "name" {} 2 | # variable "location" {} 3 | variable "region" {} 4 | variable "project" {} 5 | variable "zone" {} 6 | # variable "bastion-vm-size" {} 7 | # variable "image-publisher" {} 8 | # variable "image-offer" {} 9 | # variable "image-sku" {} 10 | # variable "image-version" {} 11 | # variable "admin_username" {} 12 | variable "internal_tld" {} 13 | # variable "subnet-id" {} 14 | # variable "availability-id" {} 15 | # variable "storage-container" {} 16 | # variable "storage-primary-endpoint" {} 17 | 18 | 19 | 20 | 21 | # variable "cidr-allow-ssh" {} 22 | # variable "security-group-id" {} 23 | # variable "subnet-ids" {} 24 | -------------------------------------------------------------------------------- /Docker/Wrk/README.md: -------------------------------------------------------------------------------- 1 | ## Summary 2 | 3 | This is an Alpine based image with curl and [wrk](https://github.com/wg/wrk) (HTTP benchmarking tool). 4 | 5 | ## Usage 6 | 7 | To try it out, simply pass a URL: 8 | 9 | docker run -e URL="http://google.com" zilman/wrk 10 | 11 | runner.sh is a simple wrapper around the wrk command. 12 | 13 | Pass enviorment variables to override any of the defaults: 14 | 15 | - DURATION 16 | - CONNECTIONS 17 | - THREADS 18 | - TIMEOUT 19 | 20 | Summary results are curl'd to an endpoint as defined in the included lua script. 21 | Set your own location by exporting PUSHGATEWAY. 22 | 23 | #### Tip 24 | 25 | You can poke around by doing: 26 | docker run --entrypoint /bin/sh --rm -ti zilman/wrk 27 | -------------------------------------------------------------------------------- /provisioning/packet/modules/worker/input.tf: -------------------------------------------------------------------------------- 1 | variable "name" {} 2 | variable "worker_node_count" {} 3 | variable "packet_facility" {} 4 | variable "packet_project_id" {} 5 | variable "packet_billing_cycle" {} 6 | variable "packet_operating_system" {} 7 | variable "packet_worker_device_plan" {} 8 | variable "kubelet_image_url" {} 9 | variable "kubelet_image_tag" {} 10 | variable "dns_service_ip" {} 11 | variable "cluster_domain" {} 12 | variable "internal_tld" {} 13 | variable "pod_cidr" {} 14 | variable "service_cidr" {} 15 | variable "ca" {} 16 | variable "k8s_etcd" {} 17 | variable "k8s_etcd_key" {} 18 | variable "k8s_worker" {} 19 | variable "k8s_worker_key" {} 20 | variable "data_dir" {} 21 | variable "etcd_discovery" {} 22 | -------------------------------------------------------------------------------- /provisioning/packet/modules/etcd/input.tf: -------------------------------------------------------------------------------- 1 | variable "name" {} 2 | variable "master_node_count" {} 3 | variable "packet_facility" {} 4 | variable "packet_project_id" {} 5 | variable "packet_billing_cycle" {} 6 | variable "packet_operating_system" {} 7 | variable "packet_master_device_plan" {} 8 | variable "kubelet_image_url" {} 9 | variable "kubelet_image_tag" {} 10 | variable "dns_service_ip" {} 11 | variable "cluster_domain" {} 12 | variable "internal_tld" {} 13 | variable "pod_cidr" {} 14 | variable "service_cidr" {} 15 | variable "ca" {} 16 | variable "k8s_etcd" {} 17 | variable "k8s_etcd_key" {} 18 | variable "k8s_apiserver" {} 19 | variable "k8s_apiserver_key" {} 20 | variable "data_dir" {} 21 | variable "etcd_discovery" {} 22 | -------------------------------------------------------------------------------- /Docker/fluentd-kubectl/fluent.conf: -------------------------------------------------------------------------------- 1 | 2 | @type exec 3 | tag joblog 4 | command sh -c "kubectl logs $(kubectl get pod -l job-name=${"job_name"} -a --output=jsonpath={.items..metadata.name})" 5 | keys message 6 | run_interval 5s 7 | 8 | 9 | 10 | @type s3 11 | 12 | s3_bucket stats.cncfdemo.io 13 | s3_region us-west-2 14 | 15 | path "fluentd/#{ENV["uuid"]}/" 16 | buffer_path /fluentd/log/s3_buffer 17 | 18 | time_slice_format %Y%m%d%H%M%S 19 | time_slice_wait 1s 20 | utc 21 | 22 | s3_object_key_format "%{path}#{ENV["step"]}.%{file_extension}" 23 | format json 24 | store_as json 25 | include_time_key true 26 | time_format %s 27 | overwrite true 28 | 29 | 30 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/aws/Policies/lambda-policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": "ec2:Describe*", 7 | "Resource": "*" 8 | }, 9 | { 10 | "Effect": "Allow", 11 | "Action": [ 12 | "logs:CreateLogGroup", 13 | "logs:CreateLogStream", 14 | "logs:PutLogEvents" 15 | ], 16 | "Resource": "arn:aws:logs:*:*:*" 17 | }, 18 | { 19 | "Effect": "Allow", 20 | "Action": [ 21 | "route53:*" 22 | ], 23 | "Resource": [ 24 | "*" 25 | ] 26 | }, 27 | { 28 | "Effect": "Allow", 29 | "Action": [ 30 | "autoscaling:Describe*" 31 | ], 32 | "Resource": [ 33 | "*" 34 | ] 35 | } 36 | ] 37 | } 38 | -------------------------------------------------------------------------------- /provisioning/gce/modules/worker/cloud-config.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "cloud-config" { 2 | template = "${ file( "${ path.module }/cloud-config.yml" )}" 3 | 4 | vars { 5 | cluster_domain = "${ var.cluster_domain }" 6 | dns_service_ip = "${ var.dns_service_ip }" 7 | kubelet_image_url = "${ var.kubelet_image_url }" 8 | kubelet_image_tag = "${ var.kubelet_image_tag }" 9 | internal_tld = "${ var.internal_tld }" 10 | ca = "${ base64encode(var.ca) }" 11 | k8s-worker = "${ base64encode(var.k8s-worker) }" 12 | k8s-worker-key = "${ base64encode(var.k8s-worker-key) }" 13 | name = "${ var.name }" 14 | domain = "${ var.domain }" 15 | # cloud-config = "${ base64encode(var.cloud-config) }" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /Docker/fluentd-reportstats/fluent.conf: -------------------------------------------------------------------------------- 1 | 2 | @type tail 3 | path /fluentd/log/test.log 4 | pos_file /fluentd/log/test.log.pos 5 | tag batman 6 | read_from_head true 7 | format none 8 | 9 | 10 | 11 | @type file 12 | path /fluentd/log/test2.out 13 | 14 | 15 | 16 | @type s3 17 | 18 | s3_bucket stats.cncfdemo.io 19 | s3_region us-west-2 20 | path "fluentd/#{ENV["uuid"]}/" 21 | 22 | buffer_path /fluentd/log/s3_buffer 23 | 24 | time_slice_format %Y%m%d%H%M%S 25 | time_slice_wait 10s 26 | utc 27 | 28 | s3_object_key_format "%{path}#{ENV["step"]}.%{file_extension}.%{index}" 29 | format json 30 | store_as json 31 | include_time_key true 32 | time_format %s 33 | 34 | 35 | -------------------------------------------------------------------------------- /provisioning/gce/modules/vpc/io.tf: -------------------------------------------------------------------------------- 1 | #variable "azs" {} 2 | variable "cidr" {} 3 | #variable "hyperkube-tag" {} 4 | #variable "depends-id" {} 5 | variable "name" {} 6 | # variable "name-servers-file" {} 7 | # variable "location" {} 8 | variable "region" {} 9 | 10 | #output "depends-id" { value = "${null_resource.dummy_dependency.id}" } 11 | #output "gateway-id" { value = "${ aws_internet_gateway.cncf.id }" } 12 | #output "id" { value = "${ aws_vpc.cncf.id }" } 13 | #output "route-table-id" { value = "${ aws_route_table.private.id }" } 14 | #output "subnet-ids-private" { value = "${ join(",", aws_subnet.private.*.id) }" } 15 | #output "subnet-ids-public" { value = "${ join(",", aws_subnet.public.*.id) }" } 16 | # output "subnet-id" { value = "${ azurerm_subnet.cncf.id }" } 17 | -------------------------------------------------------------------------------- /Images/base/packer.json: -------------------------------------------------------------------------------- 1 | { 2 | 3 | "builders": [{ 4 | "type": "amazon-ebs", 5 | "region": "us-west-2", 6 | "source_ami_filter": { 7 | "filters": { 8 | "virtualization-type": "hvm", 9 | "name": "*CentOS Linux 7 x86_64 HVM EBS*", 10 | "root-device-type": "ebs" }, 11 | "most_recent": true 12 | }, 13 | "instance_type": "c4.2xlarge", 14 | "ssh_username": "centos", 15 | "ssh_pty" : false, 16 | "ami_name": "cncfbase{{timestamp}}", 17 | "user_data_file": "disable_tty.sh" 18 | }], 19 | 20 | "provisioners": [{ 21 | "type": "ansible", 22 | "playbook_file": "playbook.yml", 23 | "user": "centos", 24 | "sftp_command": "/usr/libexec/openssh/sftp-server", 25 | "extra_arguments": [ "-vvv", "--extra-vars", "packer=yes" ] 26 | }] 27 | 28 | } 29 | -------------------------------------------------------------------------------- /provisioning/azure/modules/bastion/bastion-user-data.yml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | --- 4 | coreos: 5 | update: 6 | reboot-strategy: etcd-lock 7 | 8 | etcd2: 9 | discovery-srv: ${ internal_tld } 10 | proxy: on 11 | 12 | units: 13 | - name: etcd2.service 14 | command: start 15 | # - name: s3-iam-get.service 16 | # command: start 17 | # content: | 18 | # [Unit] 19 | # Description=s3-iam-get 20 | # [Service] 21 | # Type=oneshot 22 | # RemainAfterExit=yes 23 | # ExecStartPre=-/usr/bin/mkdir -p /opt/bin 24 | # ExecStartPre=/usr/bin/curl -L -o /opt/bin/s3-iam-get \ 25 | # https://raw.githubusercontent.com/kz8s/s3-iam-get/master/s3-iam-get 26 | # ExecStart=/usr/bin/chmod +x /opt/bin/s3-iam-get 27 | -------------------------------------------------------------------------------- /provisioning/gce/modules/dns/dns.tf: -------------------------------------------------------------------------------- 1 | # domain - (Required) The domain to add the record to 2 | # name - (Required) The name of the record 3 | # value - (Required) The value of the record 4 | # type - (Required) The type of the record 5 | # ttl - (Optional) The TTL of the record 6 | # priority - (Optional) The priority of the record - only useful for some record typesi 7 | 8 | resource "dnsimple_record" "A-public-endpoint" { 9 | name = "endpoint.${ var.name }" 10 | value = "${ var.external_lb}" 11 | type = "A" 12 | ttl = "${ var.record_ttl }" 13 | domain = "${ var.domain }" 14 | } 15 | 16 | resource "dnsimple_record" "A-internal-lb" { 17 | name = "master.${ var.name }" 18 | value = "${ var.internal_lb}" 19 | type = "A" 20 | ttl = "${ var.record_ttl }" 21 | domain = "${ var.domain }" 22 | } 23 | -------------------------------------------------------------------------------- /Images/golden/packer.json: -------------------------------------------------------------------------------- 1 | { 2 | 3 | "builders": [{ 4 | "type": "amazon-ebs", 5 | "region": "us-west-2", 6 | "source_ami_filter": { 7 | "filters": { 8 | "virtualization-type": "hvm", 9 | "name": "cncfbase*", 10 | "root-device-type": "ebs" 11 | }, 12 | "owners": ["750548967590"], 13 | "most_recent": true 14 | }, 15 | "instance_type": "c4.2xlarge", 16 | "ssh_username": "centos", 17 | "ssh_pty" : false, 18 | "ami_name": "cncfgolden{{timestamp}}", 19 | "user_data_file": "disable_tty.sh" 20 | }], 21 | 22 | "provisioners": [{ 23 | "type": "ansible", 24 | "playbook_file": "playbook.yml", 25 | "user": "centos", 26 | "sftp_command": "/usr/libexec/openssh/sftp-server", 27 | "extra_arguments": [ "--extra-vars", "packer=yes" ] 28 | }] 29 | 30 | } 31 | -------------------------------------------------------------------------------- /provisioning/packet/modules/etcd/kube-proxy.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-proxy 5 | namespace: kube-system 6 | spec: 7 | hostNetwork: true 8 | containers: 9 | - name: kube-proxy 10 | image: ${ kubelet_image_url }:${ kubelet_image_tag } 11 | command: 12 | - /hyperkube 13 | - proxy 14 | - --master=http://127.0.0.1:8080 15 | securityContext: 16 | privileged: true 17 | volumeMounts: 18 | - mountPath: /etc/ssl/certs 19 | name: ssl-certs-host 20 | readOnly: true 21 | - mountPath: /var/run/dbus 22 | name: dbus 23 | readOnly: false 24 | volumes: 25 | - hostPath: 26 | path: /usr/share/ca-certificates 27 | name: ssl-certs-host 28 | - hostPath: 29 | path: /var/run/dbus 30 | name: dbus 31 | -------------------------------------------------------------------------------- /provisioning/aws/modules/etcd/input.tf: -------------------------------------------------------------------------------- 1 | variable "ami_id" {} 2 | variable "cluster_domain" {} 3 | variable "kubelet_image_url" {} 4 | variable "kubelet_image_tag" {} 5 | variable "depends_id" {} 6 | variable "dns_service_ip" {} 7 | variable "etcd_security_group_id" {} 8 | variable "external_elb_security_group_id" {} 9 | variable "instance_type" {} 10 | variable "internal_tld" {} 11 | variable "key_name" {} 12 | variable "name" {} 13 | variable "pod_cidr" {} 14 | variable "region" {} 15 | variable "service_cidr" {} 16 | variable "subnet_ids_private" {} 17 | variable "subnet_ids_public" {} 18 | variable "vpc_id" {} 19 | variable "ca" {} 20 | variable "k8s_etcd" {} 21 | variable "k8s_etcd_key" {} 22 | variable "k8s_apiserver" {} 23 | variable "k8s_apiserver_key" {} 24 | variable "instance_profile_name" {} 25 | variable "master_node_count" {} 26 | -------------------------------------------------------------------------------- /Docker/boinc/boinc_rpc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # inspired by: https://github.com/BOINC/boinc/blob/master/lib/gui_rpc_client.cpp 4 | 5 | NONCE=$(printf "\n\n\n\003" | nc localhost 31416) 6 | AUTH=$(cat gui_rpc_auth.cfg) 7 | HASH=$(echo "$NONCE$AUTH" | md5sum | awk '{print $1}') 8 | 9 | printf "\n\n$HASH\n\n\n\003" | nc localhost 31416 10 | 11 | : ' 12 | Oh my word, so sometimes it responds a bit and if one insert a long enough delay here then it is possible to send to the rpc port: 13 | 14 | 15 | 16 | 0 17 | 18 | 19 | 20 | IF and only IF all of this was part of the same tcp session. Ouch. 21 | ' 22 | -------------------------------------------------------------------------------- /provisioning/azure/modules/worker/input.tf: -------------------------------------------------------------------------------- 1 | variable "location" {} 2 | variable "subnet_id" {} 3 | variable "name" {} 4 | variable "worker_vm_size" {} 5 | variable "worker_node_count" {} 6 | variable "image_publisher" {} 7 | variable "image_offer" {} 8 | variable "image_sku" {} 9 | variable "image_version" {} 10 | variable "storage_account" {} 11 | variable "storage_primary_endpoint" {} 12 | variable "storage_container" {} 13 | variable "availability_id" {} 14 | variable "external_lb" {} 15 | variable "cluster_domain" {} 16 | variable "dns_service_ip" {} 17 | variable "internal_tld" {} 18 | variable "admin_username" {} 19 | variable "kubelet_image_url" {} 20 | variable "kubelet_image_tag" {} 21 | variable "k8s_cloud_config" {} 22 | variable "ca" {} 23 | variable "k8s_worker" {} 24 | variable "k8s_worker_key" {} 25 | variable "data_dir" {} 26 | -------------------------------------------------------------------------------- /provisioning/aws/modules/worker/input.tf: -------------------------------------------------------------------------------- 1 | variable "ami_id" {} 2 | variable "capacity" { 3 | default = { 4 | desired = 5 5 | max = 5 6 | min = 3 7 | } 8 | } 9 | variable "cluster_domain" {} 10 | variable "kubelet_image_url" {} 11 | variable "kubelet_image_tag" {} 12 | variable "depends_id" {} 13 | variable "dns_service_ip" {} 14 | variable "instance_type" {} 15 | variable "internal_tld" {} 16 | variable "key_name" {} 17 | variable "name" {} 18 | variable "region" {} 19 | variable "security_group_id" {} 20 | variable "subnet_ids" {} 21 | variable "volume_size" { 22 | default = { 23 | ebs = 250 24 | root = 52 25 | } 26 | } 27 | variable "vpc_id" {} 28 | variable "worker_name" {} 29 | variable "ca" {} 30 | variable "k8s_worker" {} 31 | variable "k8s_worker_key" {} 32 | variable "instance_profile_name" {} 33 | # variable "s3_bucket" {} 34 | -------------------------------------------------------------------------------- /provisioning/aws/modules/worker/cloud-config.tf: -------------------------------------------------------------------------------- 1 | resource "gzip_me" "ca" { 2 | input = "${ var.ca }" 3 | } 4 | 5 | resource "gzip_me" "k8s_worker" { 6 | input = "${ var.k8s_worker }" 7 | } 8 | 9 | resource "gzip_me" "k8s_worker_key" { 10 | input = "${ var.k8s_worker_key }" 11 | } 12 | 13 | data "template_file" "cloud-config" { 14 | template = "${ file( "${ path.module }/cloud-config.yml" )}" 15 | 16 | vars { 17 | cluster_domain = "${ var.cluster_domain }" 18 | dns_service_ip = "${ var.dns_service_ip }" 19 | kubelet_image_url = "${ var.kubelet_image_url }" 20 | kubelet_image_tag = "${ var.kubelet_image_tag }" 21 | internal_tld = "${ var.internal_tld }" 22 | region = "${ var.region }" 23 | ca = "${ gzip_me.ca.output }" 24 | k8s_worker = "${ gzip_me.k8s_worker.output }" 25 | k8s_worker_key = "${ gzip_me.k8s_worker_key.output }" 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /Web/api/_tests/gen_mock_summary.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import print_function 4 | 5 | import sys 6 | 7 | import time, datetime 8 | import json 9 | import random 10 | 11 | from hashids import Hashids 12 | 13 | hashids = Hashids(salt='grabfromenv') 14 | now = int(time.time()) 15 | 16 | N = int(sys.argv[1]) if 1 < len(sys.argv) else 2 17 | 18 | 19 | data = [] 20 | for _ in range (0,N): 21 | now += 1 22 | data.append({ 23 | 'id': hashids.encode(now), 24 | 'timestart': now, 25 | 'timeend': now + random.randint(850,1100), 26 | 'Boinc_Jobs': 0, 27 | 'CPU': random.randint(3,12), 28 | 'DistCC': random.randint(900,1424), 29 | 'HTTP_Requests': random.randint(2000000, 2542424), 30 | 'Memory': random.randint(16,32), 31 | 'Provider': 'AWS'}) 32 | 33 | print(json.dumps({"Results" : data})) 34 | -------------------------------------------------------------------------------- /provisioning/gce/modules/worker/input.tf: -------------------------------------------------------------------------------- 1 | variable "region" {} 2 | variable "zone" {} 3 | variable "project" {} 4 | # variable "subnet-id" {} 5 | variable "name" {} 6 | # variable "worker-vm-size" {} 7 | variable "worker_node_count" {} 8 | # variable "image-publisher" {} 9 | # variable "image-offer" {} 10 | # variable "image-sku" {} 11 | # variable "image-version" {} 12 | # variable "storage-account" {} 13 | # variable "storage-primary-endpoint" {} 14 | # variable "storage-container" {} 15 | # variable "availability-id" {} 16 | # variable "external-lb" {} 17 | variable "cluster_domain" {} 18 | variable "dns_service_ip" {} 19 | variable "internal_tld" {} 20 | # variable "admin_username" {} 21 | variable "kubelet_image_url" {} 22 | variable "kubelet_image_tag" {} 23 | variable "ca" {} 24 | variable "k8s-worker" {} 25 | variable "k8s-worker-key" {} 26 | variable "domain" {} 27 | # variable "cloud-config" {} 28 | -------------------------------------------------------------------------------- /AddOns/Prometheus/pushgateway.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: pushgateway 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | labels: 9 | name: pushgateway 10 | spec: 11 | selector: 12 | app: pushgateway 13 | type: NodePort 14 | ports: 15 | - name: pushgateway 16 | protocol: TCP 17 | port: 9091 18 | nodePort: 30901 19 | 20 | --- 21 | apiVersion: extensions/v1beta1 22 | kind: Deployment 23 | metadata: 24 | name: pushgateway 25 | namespace: monitoring 26 | spec: 27 | replicas: 1 28 | selector: 29 | matchLabels: 30 | app: pushgateway 31 | template: 32 | metadata: 33 | name: pushgateway 34 | labels: 35 | app: pushgateway 36 | spec: 37 | containers: 38 | - name: pushgateway 39 | image: prom/pushgateway:latest 40 | ports: 41 | - name: web 42 | containerPort: 9091 43 | -------------------------------------------------------------------------------- /provisioning/aws/modules/etcd/elb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_elb" "external" { 2 | name = "kz8s-apiserver-${replace(var.name, "/(.{0,17})(.*)/", "$1")}" 3 | 4 | cross_zone_load_balancing = false 5 | 6 | health_check { 7 | healthy_threshold = 2 8 | unhealthy_threshold = 2 9 | timeout = 3 10 | target = "HTTP:8080/" 11 | interval = 30 12 | } 13 | 14 | instances = [ "${ aws_instance.etcd.*.id }" ] 15 | idle_timeout = 3600 16 | 17 | listener { 18 | instance_port = 443 19 | instance_protocol = "tcp" 20 | lb_port = 443 21 | lb_protocol = "tcp" 22 | } 23 | 24 | security_groups = [ "${ var.external_elb_security_group_id }" ] 25 | subnets = [ "${ split(",", var.subnet_ids_public) }" ] 26 | 27 | tags { 28 | builtWith = "terraform" 29 | kz8s = "${ var.name }" 30 | Name = "kz8s-apiserver" 31 | role = "apiserver" 32 | visibility = "public" 33 | KubernetesCluster = "${ var.name }" 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /AddOns/Prometheus/node-exporter.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: node-exporter 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | labels: 9 | app: node-exporter 10 | name: node-exporter 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: scrape 15 | port: 9100 16 | protocol: TCP 17 | selector: 18 | app: node-exporter 19 | type: ClusterIP 20 | --- 21 | apiVersion: extensions/v1beta1 22 | kind: DaemonSet 23 | metadata: 24 | name: node-exporter 25 | namespace: monitoring 26 | spec: 27 | template: 28 | metadata: 29 | labels: 30 | app: node-exporter 31 | name: node-exporter 32 | spec: 33 | containers: 34 | - image: prom/node-exporter 35 | name: node-exporter 36 | ports: 37 | - containerPort: 9100 38 | hostPort: 9100 39 | name: scrape 40 | hostNetwork: true 41 | hostPID: true 42 | -------------------------------------------------------------------------------- /Docker/fluentd-kubectl/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM fluent/fluentd:latest-onbuild 2 | MAINTAINER Eugene Zilman 3 | WORKDIR /home/fluent 4 | ENV PATH /home/fluent/.gem/ruby/2.3.0/bin:$PATH 5 | ENV KUBE_LATEST_VERSION="v1.4.4" 6 | 7 | USER root 8 | 9 | RUN apk add --update ca-certificates \ 10 | && apk add --update -t deps curl \ 11 | && curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl \ 12 | && chmod +x /usr/local/bin/kubectl \ 13 | && apk del --purge deps \ 14 | && rm /var/cache/apk/* 15 | 16 | RUN apk --no-cache add sudo build-base ruby-dev && \ 17 | sudo -u fluent gem install fluent-plugin-secure-forward fluent-plugin-s3 && \ 18 | rm -rf /home/fluent/.gem/ruby/2.3.0/cache/*.gem && sudo -u fluent gem sources -c && \ 19 | apk del sudo build-base ruby-dev 20 | 21 | USER fluent 22 | CMD exec fluentd -c /fluentd/etc/$FLUENTD_CONF -p /fluentd/plugins $FLUENTD_OPT 23 | 24 | -------------------------------------------------------------------------------- /Docker/Wrk/send_summary.lua: -------------------------------------------------------------------------------- 1 | local random = math.random 2 | 3 | local function uuid() 4 | local template ='xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx' 5 | return string.gsub(template, '[xy]', function (c) 6 | local v = (c == 'x') and random(0, 0xf) or random(8, 0xb) 7 | return string.format('%x', v) 8 | end) 9 | end 10 | 11 | function init(args) 12 | 13 | -- print(args[0]) 14 | 15 | wrk.path = wrk.path .. '&device_id=' .. uuid() 16 | -- TODO: write a proper add_query_parm function instead of appending at the end 17 | 18 | end 19 | 20 | -- function response(status, headers, body) 21 | -- print(status) 22 | -- todo: keep seperate counts per status code 23 | -- end 24 | 25 | function done(summary, latency, requests) 26 | 27 | local msg = "echo 'metric_name %d' | curl -s -m3 --data-binary @- http://$PUSHGATEWAY:$PUSHGATEWAY_SERVICE_PORT/metrics/job/wrk/name/$podID/instance/$hostIP" 28 | local t = os.execute(msg:format(summary.requests)) 29 | 30 | end 31 | -------------------------------------------------------------------------------- /provisioning/packet/ssl-ssh-cloud.tf: -------------------------------------------------------------------------------- 1 | #Gen Certs and SSH KeyPair 2 | resource "null_resource" "ssl_ssh_gen" { 3 | 4 | provisioner "local-exec" { 5 | command = < ${ var.data-dir }/azure-config.json 7 | # { 8 | # "aadClientId": "$${ARM_CLIENT_ID}", 9 | # "aadClientSecret": "$${ARM_CLIENT_SECRET}", 10 | # "tenantId": "$${ARM_TENANT_ID}", 11 | # "subscriptionId": "$${ARM_SUBSCRIPTION_ID}", 12 | # "resourceGroup": "${ var.name }", 13 | # "location": "${ var.location }", 14 | # "subnetName": "${ var.name }", 15 | # "securityGroupName": "${ var.name }", 16 | # "vnetName": "${ var.name }", 17 | # "routeTableName": "${ var.name }", 18 | # "primaryAvailabilitySetName": "${ var.name }" 19 | # } 20 | # JSON 21 | # COMMAND 22 | # } 23 | 24 | # provisioner "local-exec" { 25 | # when = "destroy" 26 | # on_failure = "continue" 27 | # command = < "${ var.data_dir }/ca.pem" 38 | echo "${ base64decode(google_container_cluster.cncf.master_auth.0.client_certificate) }" > "${ var.data_dir }/k8s-admin.pem" 39 | echo "${ base64decode(google_container_cluster.cncf.master_auth.0.client_key) }" > "${ var.data_dir }/k8s-admin-key.pem" 40 | LOCAL_EXEC 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /Docker/azure-cli/Readme.org: -------------------------------------------------------------------------------- 1 | purpose: to generate an azure.env for use with terraform. 2 | 3 | This is published on docker hub as [[https://hub.docker.com/r/generate/creds/][generate/creds:azure]] 4 | 5 | #+BEGIN_SRC shell 6 | $ docker run -v $(pwd)/data:/data -ti generate/creds:azure 7 | To sign in, use a web browser to open the page https://aka.ms/devicelogin and enter the code GY7W7BMRZ to authenticate. 8 | Name CloudName SubscriptionId State IsDefault 9 | ------------- ----------- ------------------------------------ -------- ----------- 10 | Free Trial AzureCloud 5358e673-95e7-4cd8-9791-ca28dd5e3cbb Disabled True 11 | Pay-As-You-Go AzureCloud 70693672-7c0d-485f-ac08-06d458c80f0e Enabled 12 | 13 | Please enter the Name of the account you wish to use. If you do not see 14 | a valid account in the list press Ctrl+C to abort and create one. 15 | If you leave this blank we will use the Current account. 16 | > Pay-As-You-Go 17 | Using subscription_id: 70693672-7c0d-485f-ac08-06d458c80f0e 18 | Using tenant_id: 9996322a-93ac-43ae-80be-887a3e8194a1 19 | ==> Creating service principal 20 | Retrying role assignment creation: 1/36 21 | Retrying role assignment creation: 2/36 22 | ./data/azure.env created 23 | $ cat ./data/azure.env 24 | export ARM_SUBSCRIPTION_ID=70693672-XXXX-4858-ac08-06888888880e 25 | export ARM_TENANT_ID=9896828a-93ac-43ae-YYYY-887a3e8898a1 26 | export ARM_CLIENT_ID=968448ae-f9f9-ZZZZ-bf43-5c081da88975 27 | export ARM_CLIENT_SECRET=BBBBBBBB-8eaa-AAAA-aafe-75b02ad4ceba 28 | #+END_SRC 29 | -------------------------------------------------------------------------------- /provisioning/gce/modules/vpc/private.tf: -------------------------------------------------------------------------------- 1 | /* 2 | resource "aws_eip" "nat" { vpc = true } 3 | 4 | resource "aws_nat_gateway" "nat" { 5 | depends_on = [ 6 | "aws_eip.nat", 7 | "aws_internet_gateway.main", 8 | ] 9 | 10 | allocation_id = "${ aws_eip.nat.id }" 11 | subnet_id = "${ aws_subnet.public.0.id }" 12 | } 13 | 14 | resource "aws_subnet" "private" { 15 | count = "${ length( split(",", var.azs) ) }" 16 | 17 | availability_zone = "${ element( split(",", var.azs), count.index ) }" 18 | cidr_block = "${ cidrsubnet(var.cidr, 8, count.index + 10) }" 19 | vpc_id = "${ aws_vpc.cncf.id }" 20 | 21 | tags { 22 | "kubernetes.io/role/internal-elb" = "${ var.name }" 23 | builtWith = "terraform" 24 | KubernetesCluster = "${ var.name }" 25 | kz8s = "${ var.name }" 26 | Name = "kz8s-${ var.name }-private" 27 | visibility = "private" 28 | } 29 | } 30 | 31 | resource "aws_route_table" "private" { 32 | vpc_id = "${ aws_vpc.cncf.id }" 33 | 34 | route { 35 | cidr_block = "0.0.0.0/0" 36 | nat_gateway_id = "${ aws_nat_gateway.nat.id }" 37 | } 38 | 39 | tags { 40 | builtWith = "terraform" 41 | KubernetesCluster = "${ var.name }" 42 | kz8s = "${ var.name }" 43 | Name = "kz8s-${ var.name }" 44 | visibility = "private" 45 | } 46 | } 47 | 48 | resource "aws_route_table_association" "private" { 49 | count = "${ length(split(",", var.azs)) }" 50 | 51 | route_table_id = "${ aws_route_table.private.id }" 52 | subnet_id = "${ element(aws_subnet.private.*.id, count.index) }" 53 | } 54 | */ 55 | -------------------------------------------------------------------------------- /provisioning/gce/modules/security/security.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_firewall" "allow-internal-lb" { 2 | name = "allow-internal-lb" 3 | network = "${ var.name }" 4 | 5 | allow { 6 | protocol = "tcp" 7 | ports = ["8080", "443"] 8 | } 9 | 10 | source_ranges = ["10.0.0.0/16"] 11 | target_tags = ["int-lb"] 12 | } 13 | 14 | resource "google_compute_firewall" "allow-health-check" { 15 | name = "allow-health-check" 16 | network = "${ var.name }" 17 | 18 | allow { 19 | protocol = "tcp" 20 | ports = ["8080", "443"] 21 | } 22 | 23 | source_ranges = ["130.211.0.0/22","35.191.0.0/16","10.0.0.0/16"] 24 | } 25 | 26 | resource "google_compute_firewall" "allow-all-internal" { 27 | name = "allow-all-10-128-0-0-20" 28 | network = "${ var.name }" 29 | 30 | allow { 31 | protocol = "tcp" 32 | } 33 | 34 | allow { 35 | protocol = "udp" 36 | } 37 | 38 | allow { 39 | protocol = "icmp" 40 | } 41 | 42 | source_ranges = ["10.0.0.0/16"] 43 | } 44 | 45 | resource "google_compute_firewall" "allow-ssh-bastion" { 46 | name = "allow-ssh-bastion" 47 | network = "${ var.name }" 48 | 49 | allow { 50 | protocol = "tcp" 51 | ports = ["22"] 52 | } 53 | 54 | source_ranges = ["0.0.0.0/0"] 55 | target_tags = ["bastion"] 56 | } 57 | 58 | resource "google_compute_firewall" "allow-kubectl" { 59 | name = "allow-kubectl-lb" 60 | network = "${ var.name }" 61 | 62 | allow { 63 | protocol = "tcp" 64 | ports = ["443"] 65 | } 66 | 67 | source_ranges = ["0.0.0.0/0"] 68 | target_tags = ["foo"] 69 | } 70 | -------------------------------------------------------------------------------- /provisioning/gce/output.tf: -------------------------------------------------------------------------------- 1 | # output "fqdn-k8s" { value = "${ module.etcd.fqdn-lb}" } 2 | # output "bastion-ip" { value = "${ module.bastion.bastion-ip}" } 3 | # output "bastion-fqdn" { value = "${ module.bastion.bastion-fqdn}" } 4 | # output "k8s-admin" { value = "${ k8s-admin}"} 5 | # # fixme for use outside container 6 | # output "ssh-key-setup" { value = "eval $(ssh-agent) ; ssh-add /cncf/data/.ssh/id_rsa"} 7 | # output "ssh-via-bastion" { value = "ssh -At ${ var.admin_username }@${ module.bastion.bastion-fqdn } ssh ${ var.admin_username }@master1.cncf.demo"} 8 | 9 | #output "availability-id" { value = "${ azurerm_availability_set.cncf.id }" } 10 | #output "azs" { value = "${ var.aws["azs"] }" } 11 | #output "bastion-ip" { value = "${ module.bastion.ip }" } 12 | #output "cluster_domain" { value = "${ var.cluster_domain }" } 13 | #output "dns-service-ip" { value = "${ var.dns_service_ip }" } 14 | #output "etcd1-ip" { value = "${ element( split(",", var.etcd-ips), 0 ) }" } 15 | output "external_lb" { value = "${ module.etcd.external_lb }" } 16 | #output "internal_tld" { value = "${ var.internal_tld }" } 17 | #output "name" { value = "${ var.name }" } 18 | #output "region" { value = "${ var.aws["region"] }" } 19 | #output "s3-bucket" { value = "${ var.s3-bucket }" } 20 | #output "subnet-ids-private" { value = "${ module.vpc.subnet-ids-private }" } 21 | #output "subnet-ids-public" { value = "${ module.vpc.subnet-ids-public }" } 22 | #output "worker-autoscaling-group-name" { value = "${ module.worker.autoscaling-group-name }" } 23 | output "kubeconfig" { value = "${ module.kubeconfig.kubeconfig }"} 24 | -------------------------------------------------------------------------------- /provisioning/gce/input.tf: -------------------------------------------------------------------------------- 1 | variable "name" { default = "test" } 2 | variable "internal_tld" { default = "cncf.demo" } 3 | variable "master_node_count" { default = "3" } 4 | variable "worker_node_count" { default = "3" } 5 | #variable "master_vm_size" { default = "Standard_A2" } 6 | #variable "worker-vm-size" { default = "Standard_A2" } 7 | #variable "bastion-vm-size" { default = "Standard_A2" } 8 | # Set from https://quay.io/repository/coreos/hyperkube?tab=tags 9 | variable "kubelet_image_url" { default = "quay.io/coreos/hyperkube"} 10 | variable "kubelet_image_tag" { default = "v1.4.7_coreos.0"} 11 | #variable "image-publisher" { default = "CoreOS" } 12 | #variable "image-offer" { default = "CoreOS" } 13 | #variable "image-sku" { default = "Stable" } 14 | #variable "image-version" { default = "1298.6.0" } 15 | variable "region" { default = "us-central1" } 16 | variable "zone" { default = "us-central1-a" } 17 | variable "project" { default = "test-163823" } 18 | variable "cluster_domain" { default = "cluster.local" } 19 | # variable "admin_username" { default = "cncf"} 20 | variable "cidr" { default = "10.0.0.0/16" } 21 | variable "pod_cidr" { default = "10.2.0.0/16" } 22 | variable "service_cidr" { default = "10.3.0.0/24" } 23 | variable "k8s_service_ip" { default = "10.3.0.1" } 24 | variable "dns_service_ip" { default = "10.3.0.10" } 25 | # variable "allow-ssh-cidr" { default = "0.0.0.0/0" } 26 | variable "data_dir" { default = "/cncf/data" } 27 | # variable "name-servers-file" { default = "google_dns_zone"} 28 | variable "domain" { default = "cncf.ci" } 29 | -------------------------------------------------------------------------------- /provisioning/azure/modules/network/virtual_network.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_network_security_group" "cncf" { 2 | name = "${ var.name }" 3 | location = "${ var.location}" 4 | resource_group_name = "${ var.name }" 5 | } 6 | 7 | resource "azurerm_subnet" "cncf" { 8 | name = "${ var.name }" 9 | resource_group_name = "${ var.name }" 10 | virtual_network_name = "${azurerm_virtual_network.cncf.name}" 11 | address_prefix = "10.0.10.0/24" 12 | route_table_id = "${ azurerm_route_table.cncf.id }" 13 | 14 | } 15 | 16 | resource "azurerm_virtual_network" "cncf" { 17 | name = "${ var.name }" 18 | resource_group_name = "${ var.name }" 19 | address_space = ["${ var.vpc_cidr }"] 20 | location = "${ var.location }" 21 | dns_servers = [ 22 | "${ element(split( ",", file(var.name_servers_file) ),0) }", 23 | "${ element(split( ",", file(var.name_servers_file) ),1) }", 24 | "8.8.8.8" 25 | ] 26 | # getting dns servers in list form was difficult 27 | # module.vpc.azurerm_virtual_network.main: Creating... 28 | # address_space.#: "" => "1" 29 | # address_space.0: "" => "10.0.0.0/16" 30 | # dns_servers.#: "" => "4" 31 | # dns_servers.0: "" => "40.90.4.9" 32 | # dns_servers.1: "" => "13.107.24.9" 33 | # dns_servers.2: "" => "64.4.48.9" 34 | # dns_servers.3: "" => "13.107.160.9" 35 | } 36 | 37 | resource "azurerm_route_table" "cncf" { 38 | name = "${ var.name }" 39 | location = "${ var.location }" 40 | resource_group_name = "${ var.name }" 41 | } 42 | -------------------------------------------------------------------------------- /provisioning/aws/input.tf: -------------------------------------------------------------------------------- 1 | variable "name" { default = "aws" } 2 | 3 | variable "internal_tld" { default = "aws.cncf.demo" } 4 | variable "data_dir" { default = "/cncf/data/aws" } 5 | 6 | # AWS Cloud Specific Settings 7 | variable "aws_region" { default = "ap-southeast-2" } 8 | variable "aws_key_name" { default = "aws" } 9 | variable "aws_azs" { default = "ap-southeast-2a,ap-southeast-2b,ap-southeast-2c" } 10 | variable "vpc_cidr" { default = "10.0.0.0/16" } 11 | variable "allow_ssh_cidr" { default = "0.0.0.0/0" } 12 | 13 | # VM Image and size 14 | variable "admin_username" { default = "core" } 15 | variable "aws_image_ami" { default = "ami-fde3e09e"} # channel/stable type/hvm 16 | variable "aws_master_vm_size" { default = "m3.medium" } 17 | variable "aws_worker_vm_size" { default = "m3.medium" } 18 | variable "aws_bastion_vm_size" { default = "t2.nano" } 19 | 20 | # Kubernetes 21 | variable "cluster_domain" { default = "cluster.local" } 22 | variable "pod_cidr" { default = "10.2.0.0/16" } 23 | variable "service_cidr" { default = "10.3.0.0/24" } 24 | variable "k8s_service_ip" { default = "10.3.0.1" } 25 | variable "dns_service_ip" { default = "10.3.0.10" } 26 | variable "master_node_count" { default = "3" } 27 | variable "worker_node_count" { default = "3" } 28 | variable "worker_node_min" { default = "3" } 29 | variable "worker_node_max" { default = "5" } 30 | 31 | # Deployment Artifact Versions 32 | # Hyperkube 33 | # Set from https://quay.io/repository/coreos/hyperkube?tab=tags 34 | variable "kubelet_image_url" { default = "quay.io/coreos/hyperkube"} 35 | variable "kubelet_image_tag" { default = "v1.6.1_coreos.0"} 36 | 37 | -------------------------------------------------------------------------------- /Web/api/_src/Schemas/new.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-04/schema#", 3 | "title": "NewDemoModel", 4 | "required": ["Metadata"], 5 | "type": "object", 6 | "properties": { 7 | "Metadata": { 8 | "required": ["Masters", "Minions", "Provider", "RAM", "vcpu", "Storage"], 9 | "properties": { 10 | "Masters": { 11 | "properties": { 12 | "Instance": { 13 | "type": "string" 14 | }, 15 | "size": { 16 | "type": "integer" 17 | } 18 | }, 19 | "type": "object" 20 | }, 21 | "Minions": { 22 | "properties": { 23 | "Instance": { 24 | "type": "string" 25 | }, 26 | "size": { 27 | "type": "integer" 28 | } 29 | }, 30 | "type": "object" 31 | }, 32 | "Provider": { 33 | "type": "string" 34 | }, 35 | "RAM": { 36 | "type": "string" 37 | }, 38 | "Storage": { 39 | "type": "string" 40 | }, 41 | "vcpu": { 42 | "type": "integer" 43 | } 44 | }, 45 | "type": "object" 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /provisioning/azure/input.tf: -------------------------------------------------------------------------------- 1 | variable "name" { default = "azure" } 2 | 3 | variable "internal_tld" { default = "azure.cncf.demo" } 4 | variable "data_dir" { default = "/cncf/data/azure" } 5 | 6 | # Azure Cloud Specific Settings 7 | variable "location" { default = "westus" } 8 | variable "vpc_cidr" { default = "10.0.0.0/16" } 9 | 10 | # VM Image and size 11 | variable "admin_username" { default = "cncf"} 12 | variable "image_publisher" { default = "CoreOS" } 13 | variable "image_offer" { default = "CoreOS" } 14 | variable "image_sku" { default = "Stable" } 15 | variable "image_version" { default = "1298.6.0" } 16 | variable "master_vm_size" { default = "Standard_A2" } 17 | variable "worker_vm_size" { default = "Standard_A2" } 18 | variable "bastion_vm_size" { default = "Standard_A2" } 19 | 20 | # Kubernetes 21 | variable "cluster_domain" { default = "cluster.local" } 22 | variable "pod_cidr" { default = "10.2.0.0/16" } 23 | variable "service_cidr" { default = "10.3.0.0/24" } 24 | variable "k8s_service_ip" { default = "10.3.0.1" } 25 | variable "dns_service_ip" { default = "10.3.0.10" } 26 | variable "master_node_count" { default = "3" } 27 | variable "worker_node_count" { default = "3" } 28 | # Autoscaling not supported by Kuberenetes on Azure yet 29 | # variable "worker_node_min" { default = "3" } 30 | # variable "worker_node_max" { default = "5" } 31 | 32 | # Deployment Artifact Versions 33 | # Hyperkube 34 | # Set from https://quay.io/repository/coreos/hyperkube?tab=tags 35 | variable "kubelet_image_url" { default = "quay.io/coreos/hyperkube"} 36 | variable "kubelet_image_tag" { default = "v1.4.7_coreos.0"} 37 | -------------------------------------------------------------------------------- /AddOns/dashboard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kubernetes-dashboard 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kubernetes-dashboard 8 | kubernetes.io/cluster-service: "true" 9 | spec: 10 | selector: 11 | k8s-app: kubernetes-dashboard 12 | ports: 13 | - port: 80 14 | targetPort: 9090 15 | 16 | --- 17 | 18 | apiVersion: v1 19 | kind: ReplicationController 20 | metadata: 21 | name: kubernetes-dashboard-v1.4.2 22 | namespace: kube-system 23 | labels: 24 | k8s-app: kubernetes-dashboard 25 | version: v1.4.2 26 | kubernetes.io/cluster-service: "true" 27 | spec: 28 | replicas: 1 29 | selector: 30 | k8s-app: kubernetes-dashboard 31 | template: 32 | metadata: 33 | labels: 34 | k8s-app: kubernetes-dashboard 35 | version: v1.4.2 36 | kubernetes.io/cluster-service: "true" 37 | annotations: 38 | scheduler.alpha.kubernetes.io/critical-pod: '' 39 | scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' 40 | spec: 41 | containers: 42 | - name: kubernetes-dashboard 43 | image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.4.2 44 | resources: 45 | limits: 46 | cpu: 100m 47 | memory: 50Mi 48 | requests: 49 | cpu: 100m 50 | memory: 50Mi 51 | ports: 52 | - containerPort: 9090 53 | livenessProbe: 54 | httpGet: 55 | path: / 56 | port: 9090 57 | initialDelaySeconds: 30 58 | timeoutSeconds: 30 59 | 60 | -------------------------------------------------------------------------------- /provisioning/azure/modules/etcd/etcd-load-balancer.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_public_ip" "cncf" { 2 | name = "PublicIPForLB" 3 | location = "${ var.location }" 4 | resource_group_name = "${ var.name }" 5 | public_ip_address_allocation = "static" 6 | domain_name_label = "k8s-${ var.name }" 7 | } 8 | 9 | resource "azurerm_lb" "cncf" { 10 | name = "TestLoadBalancer" 11 | location = "${ azurerm_public_ip.cncf.location }" 12 | resource_group_name = "${ azurerm_public_ip.cncf.resource_group_name }" 13 | 14 | frontend_ip_configuration { 15 | name = "PublicIPAddress" 16 | public_ip_address_id = "${azurerm_public_ip.cncf.id}" 17 | } 18 | } 19 | 20 | resource "azurerm_lb_rule" "cncf" { 21 | resource_group_name = "${azurerm_public_ip.cncf.resource_group_name}" 22 | loadbalancer_id = "${azurerm_lb.cncf.id}" 23 | probe_id = "${ azurerm_lb_probe.cncf.id }" 24 | backend_address_pool_id = "${ azurerm_lb_backend_address_pool.cncf.id }" 25 | name = "LBRule" 26 | protocol = "Tcp" 27 | frontend_port = 443 28 | backend_port = 443 29 | frontend_ip_configuration_name = "PublicIPAddress" 30 | } 31 | 32 | resource "azurerm_lb_probe" "cncf" { 33 | resource_group_name = "${azurerm_public_ip.cncf.resource_group_name}" 34 | loadbalancer_id = "${azurerm_lb.cncf.id}" 35 | name = "${ var.name }" 36 | protocol = "Http" 37 | port = 8080 38 | request_path = "/" 39 | interval_in_seconds = 30 40 | number_of_probes = 5 41 | } 42 | 43 | resource "azurerm_lb_backend_address_pool" "cncf" { 44 | resource_group_name = "${ azurerm_public_ip.cncf.resource_group_name }" 45 | loadbalancer_id = "${azurerm_lb.cncf.id}" 46 | name = "BackEndAddressPool" 47 | } 48 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | # This file is a template, and might need editing before it works on your project. 2 | # Official docker image. 3 | 4 | image: docker:latest 5 | variables: 6 | DOCKER_HOST: 127.0.0.1:2375 7 | privileged: 'true' 8 | services: 9 | - docker:dind 10 | 11 | stages: 12 | - build-provisioning 13 | - deploy 14 | - destroy 15 | 16 | build-provisioning: 17 | stage: build-provisioning 18 | only: 19 | - master 20 | script: 21 | - docker login -u "gitlab-ci-token" -p "$CI_JOB_TOKEN" $CI_REGISTRY 22 | - docker build --pull -t "$CI_REGISTRY_IMAGE/provisioning:latest" ./provisioning 23 | - docker push "$CI_REGISTRY_IMAGE/provisioning:latest" 24 | 25 | deploy_cloud: 26 | image: registry.gitlab.com/cncf/demo/provisioning:latest 27 | stage: deploy 28 | only: 29 | - aws 30 | - azure 31 | - gce 32 | - gke 33 | - packet 34 | environment: 35 | name: $CI_COMMIT_REF_NAME 36 | url: https://$CI_ENVIRONMENT_SLUG.cncf.ci/ 37 | on_stop: destroy_cloud 38 | artifacts: 39 | when: always 40 | expire_in: 4 weeks 41 | name: "${CI_ENVIRONMENT_SLUG}" 42 | paths: 43 | - ./provisioning/data/ 44 | script: 45 | - ./provisioning/provision.sh ${CI_COMMIT_REF_NAME}-deploy ${CI_ENVIRONMENT_SLUG} 46 | 47 | destroy_cloud: 48 | image: registry.gitlab.com/cncf/demo/provisioning:latest 49 | stage: destroy 50 | when: manual 51 | environment: 52 | name: $CI_COMMIT_REF_NAME 53 | action: stop 54 | artifacts: 55 | when: always 56 | expire_in: 4 weeks 57 | name: "${CI_ENVIRONMENT_SLUG}" 58 | paths: 59 | - ./provisioning/data/ 60 | script: 61 | - ./provisioning/provision.sh ${CI_COMMIT_REF_NAME}-destroy ${CI_ENVIRONMENT_SLUG} 62 | -------------------------------------------------------------------------------- /provisioning/gce/modules/vpc/gce-subnet.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_subnetwork" "cncf" { 2 | name = "${ var.name }" 3 | ip_cidr_range = "${ var.cidr }" 4 | network = "${ google_compute_network.cncf.self_link }" 5 | region = "${ var.region }" 6 | } 7 | 8 | /* 9 | # gateway" "nat" { 10 | depends_on = [ 11 | "aws_eip.nat", 12 | "aws_internet_gateway.main", 13 | ] 14 | 15 | allocation_id = "${ aws_eip.nat.id }" 16 | subnet_id = "${ aws_subnet.public.0.id }" 17 | } 18 | 19 | resource "aws_subnet" "private" { 20 | count = "${ length( split(",", var.azs) ) }" 21 | 22 | availability_zone = "${ element( split(",", var.azs), count.index ) }" 23 | cidr_block = "${ cidrsubnet(var.cidr, 8, count.index + 10) }" 24 | vpc_id = "${ aws_vpc.cncf.id }" 25 | 26 | tags { 27 | "kubernetes.io/role/internal-elb" = "${ var.name }" 28 | builtWith = "terraform" 29 | KubernetesCluster = "${ var.name }" 30 | kz8s = "${ var.name }" 31 | Name = "kz8s-${ var.name }-private" 32 | visibility = "private" 33 | } 34 | } 35 | 36 | resource "aws_route_table" "private" { 37 | vpc_id = "${ aws_vpc.cncf.id }" 38 | 39 | route { 40 | cidr_block = "0.0.0.0/0" 41 | nat_gateway_id = "${ aws_nat_gateway.nat.id }" 42 | } 43 | 44 | tags { 45 | builtWith = "terraform" 46 | KubernetesCluster = "${ var.name }" 47 | kz8s = "${ var.name }" 48 | Name = "kz8s-${ var.name }" 49 | visibility = "private" 50 | } 51 | } 52 | 53 | resource "aws_route_table_association" "private" { 54 | count = "${ length(split(",", var.azs)) }" 55 | 56 | route_table_id = "${ aws_route_table.private.id }" 57 | subnet_id = "${ element(aws_subnet.private.*.id, count.index) }" 58 | } 59 | */ 60 | -------------------------------------------------------------------------------- /provisioning/gce/modules/etcd/load-balancer.tf: -------------------------------------------------------------------------------- 1 | # resource "azurerm_public_ip" "cncf" { 2 | # name = "PublicIPForLB" 3 | # location = "${ var.location }" 4 | # resource_group_name = "${ var.name }" 5 | # public_ip_address_allocation = "static" 6 | # domain_name_label = "k8s${ var.name }" 7 | # } 8 | 9 | # resource "azurerm_lb" "cncf" { 10 | # name = "TestLoadBalancer" 11 | # location = "${ azurerm_public_ip.cncf.location }" 12 | # resource_group_name = "${ azurerm_public_ip.cncf.resource_group_name }" 13 | 14 | # frontend_ip_configuration { 15 | # name = "PublicIPAddress" 16 | # public_ip_address_id = "${azurerm_public_ip.cncf.id}" 17 | # } 18 | # } 19 | 20 | # resource "azurerm_lb_rule" "cncf" { 21 | # resource_group_name = "${azurerm_public_ip.cncf.resource_group_name}" 22 | # loadbalancer_id = "${azurerm_lb.cncf.id}" 23 | # probe_id = "${ azurerm_lb_probe.cncf.id }" 24 | # backend_address_pool_id = "${ azurerm_lb_backend_address_pool.cncf.id }" 25 | # name = "LBRule" 26 | # protocol = "Tcp" 27 | # frontend_port = 443 28 | # backend_port = 443 29 | # frontend_ip_configuration_name = "PublicIPAddress" 30 | # } 31 | 32 | # resource "azurerm_lb_probe" "cncf" { 33 | # resource_group_name = "${azurerm_public_ip.cncf.resource_group_name}" 34 | # loadbalancer_id = "${azurerm_lb.cncf.id}" 35 | # name = "${ var.name }" 36 | # protocol = "Http" 37 | # port = 8080 38 | # request_path = "/" 39 | # interval_in_seconds = 30 40 | # number_of_probes = 5 41 | # } 42 | 43 | # resource "azurerm_lb_backend_address_pool" "cncf" { 44 | # resource_group_name = "${ azurerm_public_ip.cncf.resource_group_name }" 45 | # loadbalancer_id = "${azurerm_lb.cncf.id}" 46 | # name = "BackEndAddressPool" 47 | # } 48 | -------------------------------------------------------------------------------- /Images/golden/README.md: -------------------------------------------------------------------------------- 1 | ## Run on specific host 2 | Provide a hosts inventory to the i flag, note the trailing comma 3 | 4 | ```ansible-playbook playbook.yml -i 42.867.53.09, --step``` 5 | 6 | ## Useful ansible tips 7 | 8 | ansible-playbook playbook.yml --list-tasks 9 | --step, --start-at-task="foo" 10 | 11 | ## Centos7 based golden Kubernetes image 12 | 13 | This image builds on the [base image](https://github.com/cncf/demo/tree/master/Images/base), 14 | a minimally modified Centos7 configured with sensible defaults for hosting a Kubernetes cluster. 15 | 16 | It bakes in everything needed to run Kubernetes master and/or minion nodes into one AMI. It is suggested to configure an instance to bootstrap as a minion or master via userdata. 17 | 18 | ## Configuration via Userdata 19 | 20 | 21 | Simply write a file named `kubernetes-master` _or_ `kubernetes-minion` and specify a `cluster_name` environment variable. That's it. 22 | 23 | 24 | ``` 25 | 26 | #!/bin/bash 27 | 28 | set -ex 29 | 30 | HOSTNAME_OVERRIDE=$(curl -s http://169.254.169.254/2007-01-19/meta-data/local-hostname | cut -d" " -f1) 31 | 32 | cat << EOF > /etc/sysconfig/kubernetes-{master,minion} 33 | 34 | CLUSTER_NAME={cncfdemo} 35 | KUBELET_HOSTNAME=--hostname-override=$HOSTNAME_OVERRIDE 36 | 37 | EOF 38 | 39 | ``` 40 | 41 | Note: The hostname override is an example specific to AWS. Adjust if needed. 42 | 43 | ## Customization Quickstart 44 | 45 | Simply install and configure [packer](https://www.packer.io/) and fork this repo to customize. 46 | 47 | > packer build packer.json 48 | 49 | ## Dependencies 50 | 51 | - Packer 0.11+ 52 | - Ansible 2.1+ installed ([installation instructions] (http://docs.ansible.com/ansible/intro_installation.html)) 53 | -------------------------------------------------------------------------------- /provisioning/aws/modules/iam/worker.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_role" "worker" { 2 | name = "worker-k8s-${ var.name }" 3 | 4 | assume_role_policy = < " 13 | read azure_subscription_id 14 | if [ "$azure_subscription_id" != "" ]; then 15 | az account set --subscription $azure_subscription_id 16 | azure_subscription_id=$(az account show | jq -r .id) 17 | else 18 | azure_subscription_id=$(az account show | jq -r .id) 19 | fi 20 | ARM_SUBSCRIPTION_ID=$azure_subscription_id 21 | ARM_TENANT_ID=$(az account show | jq -r .tenantId) 22 | echo "Using subscription_id: $ARM_SUBSCRIPTION_ID" 23 | echo "Using tenant_id: $ARM_TENANT_ID" 24 | } 25 | 26 | createServicePrincipal() { 27 | echo "==> Creating service principal" 28 | CREDS_JSON=$( az ad sp create-for-rbac) 29 | ARM_TENANT_ID=$( echo ${CREDS_JSON} | jq -r .tenant ) 30 | ARM_CLIENT_ID=$( echo ${CREDS_JSON} | jq -r .appId ) 31 | ARM_CLIENT_SECRET=$( echo ${CREDS_JSON} | jq -r .password ) 32 | if [ $? -ne 0 ]; then 33 | echo "Error creating service principal: $azure_client_id" 34 | exit 1 35 | fi 36 | } 37 | 38 | showConfigs() { 39 | echo ARM_SUBSCRIPTION_ID=$ARM_SUBSCRIPTION_ID 40 | echo ARM_TENANT_ID=$ARM_TENANT_ID 41 | echo ARM_CLIENT_ID=$ARM_CLIENT_ID 42 | echo ARM_CLIENT_SECRET=$ARM_CLIENT_SECRET 43 | } 44 | 45 | az login > /dev/null 46 | askSubscription 47 | createServicePrincipal 48 | showConfigs > /data/azure.env 49 | echo "./data/azure.env created" 50 | echo 'sudo chown -R $(whoami):$(whoami) ./data' 51 | -------------------------------------------------------------------------------- /provisioning/cross-cloud/cloud.tf: -------------------------------------------------------------------------------- 1 | module "aws" { 2 | source = "../aws" 3 | name = "${ var.name }-aws" 4 | internal_tld = "${ var.name }-aws.cncf.demo" 5 | data_dir = "${ var.data_dir }/aws" 6 | } 7 | 8 | module "azure" { 9 | source = "../azure" 10 | name = "${ var.name }azure" 11 | internal_tld = "${ var.name }-azure.cncf.demo" 12 | data_dir = "${ var.data_dir }/azure" 13 | } 14 | 15 | module "packet" { 16 | source = "../packet" 17 | name = "${ var.name }-packet" 18 | data_dir = "${ var.data_dir }/packet" 19 | packet_project_id = "${ var.packet_project_id }" 20 | } 21 | 22 | module "gce" { 23 | source = "../gce" 24 | name = "${ var.name }-gce" 25 | data_dir = "${ var.data_dir }/gce" 26 | } 27 | 28 | module "gke" { 29 | source = "../gke" 30 | name = "${ var.name }-gke" 31 | data_dir = "${ var.data_dir}/gke" 32 | } 33 | 34 | 35 | resource "null_resource" "kubeconfig" { 36 | 37 | provisioner "local-exec" { 38 | command = < ${ var.data_dir }/azure-config.json 40 | { 41 | "aadClientId": "$${ARM_CLIENT_ID}", 42 | "aadClientSecret": "$${ARM_CLIENT_SECRET}", 43 | "tenantId": "$${ARM_TENANT_ID}", 44 | "subscriptionId": "$${ARM_SUBSCRIPTION_ID}", 45 | "resourceGroup": "${ var.name }", 46 | "location": "${ var.location }", 47 | "subnetName": "${ var.name }", 48 | "securityGroupName": "${ var.name }", 49 | "vnetName": "${ var.name }", 50 | "routeTableName": "${ var.name }", 51 | "primaryAvailabilitySetName": "${ var.name }" 52 | } 53 | JSON 54 | COMMAND 55 | } 56 | 57 | provisioner "local-exec" { 58 | when = "destroy" 59 | on_failure = "continue" 60 | command = < * { align-self: center; } 21 | .space-between { justify-content: space-between; } 22 | .space-around { justify-content: space-around; } 23 | 24 | header { background-color: #ffffff; display: flex; flex-flow: column nowrap;} 25 | main { background-color: #f9f9f9; flex: 1; } 26 | footer { background-color: #333333; } 27 | 28 | .metadata { color: #333; font-size: 2rem; } 29 | 30 | 31 | #r1, #r2 { padding: 2rem; } 32 | #r2 { font-size: 0.625em; font-weight: 100; } 33 | #r2 > div > ul {list-style: none; font-size: 0.8em; padding: 0 0 0 0.2em; } 34 | 35 | #aws:before { 36 | background-image: url(img/aws.png); 37 | background-size: 50px 40px; 38 | content: ''; 39 | position: absolute; 40 | margin-left: -50px; 41 | width: 50px; 42 | height: 50px; 43 | background-position: left center; 44 | margin-top: -0.5rem; 45 | } 46 | 47 | #command { flex: 0 0 20rem; line-height: 1.5rem; font-size: 1rem; color: #7f7f7f; } 48 | #command span { display: inline-block; height: 32px; font-size: 13px; font-weight: 500; color: rgba(0,0,0,0.6); line-height: 32px; padding: 0 12px; border-radius: 16px; background-color: #e4e4e4; } 49 | 50 | footer > p { padding: 0 1rem; color: #ccc; } 51 | -------------------------------------------------------------------------------- /provisioning/aws/modules/etcd/kube-apiserver.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-apiserver 5 | namespace: kube-system 6 | spec: 7 | hostNetwork: true 8 | containers: 9 | - name: kube-apiserver 10 | image: ${ hyperkube } 11 | command: 12 | - /hyperkube 13 | - apiserver 14 | - --admission-control=LimitRanger 15 | - --admission-control=NamespaceExists 16 | - --admission-control=NamespaceLifecycle 17 | - --admission-control=ResourceQuota 18 | - --admission-control=SecurityContextDeny 19 | - --admission-control=ServiceAccount 20 | - --allow-privileged=true 21 | - --client-ca-file=/etc/kubernetes/ssl/ca.pem 22 | - --cloud-provider=aws 23 | - --etcd-servers=http://etcd.${ internal_tld }:2379 24 | - --insecure-bind-address=0.0.0.0 25 | - --secure-port=443 26 | - --service-account-key-file=/etc/kubernetes/ssl/k8s-apiserver-key.pem 27 | - --service-cluster-ip-range=${ service_cidr } 28 | - --tls-cert-file=/etc/kubernetes/ssl/k8s-apiserver.pem 29 | - --tls-private-key-file=/etc/kubernetes/ssl/k8s-apiserver-key.pem 30 | - --v=2 31 | livenessProbe: 32 | httpGet: 33 | host: 127.0.0.1 34 | port: 8080 35 | path: /healthz 36 | initialDelaySeconds: 15 37 | timeoutSeconds: 15 38 | ports: 39 | - containerPort: 443 40 | hostPort: 443 41 | name: https 42 | - containerPort: 8080 43 | hostPort: 8080 44 | name: local 45 | volumeMounts: 46 | - mountPath: /etc/kubernetes/ssl 47 | name: ssl-certs-kubernetes 48 | readOnly: true 49 | - mountPath: /etc/ssl/certs 50 | name: ssl-certs-host 51 | readOnly: true 52 | volumes: 53 | - hostPath: 54 | path: /etc/kubernetes/ssl 55 | name: ssl-certs-kubernetes 56 | - hostPath: 57 | path: /usr/share/ca-certificates 58 | name: ssl-certs-host 59 | -------------------------------------------------------------------------------- /provisioning/azure/modules/etcd/kube-apiserver.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-apiserver 5 | namespace: kube-system 6 | spec: 7 | hostNetwork: true 8 | containers: 9 | - name: kube-apiserver 10 | image: ${ kubelet_image_url }:${ kubelet_image_tag } 11 | command: 12 | - /hyperkube 13 | - apiserver 14 | - --admission-control=LimitRanger 15 | - --admission-control=NamespaceExists 16 | - --admission-control=NamespaceLifecycle 17 | - --admission-control=ResourceQuota 18 | - --admission-control=SecurityContextDeny 19 | - --admission-control=ServiceAccount 20 | - --allow-privileged=true 21 | - --client-ca-file=/etc/kubernetes/ssl/ca.pem 22 | - --cloud-provider=azure 23 | - --cloud-config=/etc/kubernetes/ssl/azure-config.json 24 | - --etcd-servers=http://etcd.${ internal_tld }:2379 25 | - --insecure-bind-address=0.0.0.0 26 | - --secure-port=443 27 | - --service-account-key-file=/etc/kubernetes/ssl/k8s-apiserver-key.pem 28 | - --service-cluster-ip-range=${ service_cidr } 29 | - --tls-cert-file=/etc/kubernetes/ssl/k8s-apiserver.pem 30 | - --tls-private-key-file=/etc/kubernetes/ssl/k8s-apiserver-key.pem 31 | - --v=2 32 | livenessProbe: 33 | httpGet: 34 | host: 127.0.0.1 35 | port: 8080 36 | path: /healthz 37 | initialDelaySeconds: 15 38 | timeoutSeconds: 15 39 | ports: 40 | - containerPort: 443 41 | hostPort: 443 42 | name: https 43 | - containerPort: 8080 44 | hostPort: 8080 45 | name: local 46 | volumeMounts: 47 | - mountPath: /etc/kubernetes/ssl 48 | name: ssl-certs-kubernetes 49 | readOnly: true 50 | - mountPath: /etc/ssl/certs 51 | name: ssl-certs-host 52 | readOnly: true 53 | volumes: 54 | - hostPath: 55 | path: /etc/kubernetes/ssl 56 | name: ssl-certs-kubernetes 57 | - hostPath: 58 | path: /usr/share/ca-certificates 59 | name: ssl-certs-host 60 | -------------------------------------------------------------------------------- /provisioning/azure/modules/worker/worker-nodes.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_network_interface" "cncf" { 2 | count = "${ var.worker_node_count }" 3 | name = "worker-interface${ count.index + 1 }" 4 | location = "${ var.location }" 5 | resource_group_name = "${ var.name }" 6 | 7 | ip_configuration { 8 | name = "worker-nic${ count.index + 1 }" 9 | subnet_id = "${ var.subnet_id }" 10 | private_ip_address_allocation = "dynamic" 11 | } 12 | } 13 | 14 | resource "azurerm_virtual_machine" "cncf" { 15 | count = "${ var.worker_node_count }" 16 | name = "worker-node${ count.index + 1 }" 17 | location = "${ var.location }" 18 | availability_set_id = "${ var.availability_id }" 19 | resource_group_name = "${ var.name }" 20 | network_interface_ids = ["${ element(azurerm_network_interface.cncf.*.id, count.index) }"] 21 | vm_size = "${ var.worker_vm_size }" 22 | 23 | storage_image_reference { 24 | publisher = "${ var.image_publisher }" 25 | offer = "${ var.image_offer }" 26 | sku = "${ var.image_sku }" 27 | version = "${ var.image_version}" 28 | } 29 | 30 | storage_os_disk { 31 | name = "worker-disks${ count.index + 1 }" 32 | vhd_uri = "${ var.storage_primary_endpoint }${ var.storage_container }/worker-vhd${ count.index + 1 }.vhd" 33 | caching = "ReadWrite" 34 | create_option = "FromImage" 35 | } 36 | 37 | os_profile { 38 | computer_name = "worker-node${ count.index + 1 }" 39 | admin_username = "${ var.admin_username }" 40 | admin_password = "Password1234!" 41 | custom_data = "${ element(data.template_file.worker_cloud_config.*.rendered, count.index) }" 42 | } 43 | 44 | os_profile_linux_config { 45 | disable_password_authentication = true 46 | ssh_keys { 47 | path = "/home/${ var.admin_username }/.ssh/authorized_keys" 48 | key_data = "${file("${ var.data_dir }/.ssh/id_rsa.pub")}" 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /Images/golden/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: False 4 | become: yes 5 | 6 | tasks: 7 | 8 | - yum_repository: 9 | name: Kubernetes 10 | description: Kubernetes Repository 11 | baseurl: http://yum.kubernetes.io/repos/kubernetes-el7-x86_64 12 | gpgcheck: no 13 | 14 | - yum: name={{ item }} state=latest 15 | with_items: 16 | - docker 17 | - kubernetes-cni 18 | - kubectl 19 | - kubelet 20 | - kubeadm 21 | 22 | - lineinfile: 23 | dest: /etc/sysconfig/docker-storage 24 | regexp: '^DOCKER_STORAGE_OPTIONS=' 25 | line: 'DOCKER_STORAGE_OPTIONS="--storage-driver=overlay"' 26 | 27 | - name: Temp remove of extra args from drop-in - upstream rpm sets incorrectly 28 | lineinfile: 29 | dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 30 | regexp: '^Environment="KUBELET_EXTRA_ARGS' 31 | state: absent 32 | 33 | - lineinfile: 34 | dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 35 | insertafter: '^Environment=\"KUBELET_AUTHZ_ARGS' 36 | line: 'Environment="KUBELET_EXTRA_ARGS=--cgroup-driver=systemd --cloud-provider=aws"' 37 | 38 | - copy: 39 | content: | 40 | kind: MasterConfiguration 41 | apiVersion: kubeadm.k8s.io/v1alpha1 42 | cloudProvider: aws 43 | dest: /etc/kubernetes/kubeadm.conf 44 | 45 | - file: path=/tmp/helm state=directory 46 | - unarchive: 47 | src: "https://storage.googleapis.com/kubernetes-helm/helm-v2.4.2-linux-amd64.tar.gz" 48 | dest: /tmp/helm 49 | remote_src: True 50 | - copy: 51 | src: "/tmp/helm/linux-amd64/helm" 52 | dest: /usr/local/bin/helm 53 | owner: root 54 | group: root 55 | mode: 0700 56 | remote_src: True 57 | 58 | - copy: src=services/ dest=/etc/systemd/system/ mode=0644 59 | - service: name={{ item }} enabled=true 60 | with_items: 61 | - docker 62 | - kubelet 63 | - setup-network-environment 64 | - setup-kubernetes-masters 65 | - setup-kubernetes-minions 66 | 67 | -------------------------------------------------------------------------------- /Docker/echo/echo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import random 4 | import json 5 | 6 | import falcon 7 | 8 | 9 | class JSONResource(object): 10 | def on_get(self, request, response): 11 | json_data = {'message': "Hello, world!"} 12 | response.body = json.dumps(json_data) 13 | 14 | 15 | class PlaintextResource(object): 16 | def on_get(self, request, response): 17 | response.set_header('Content-Type', 'text/plain') 18 | response.body = b'OK' 19 | 20 | 21 | def append_headers(request, response, resource, params): 22 | for pair in request.get_param_as_list('append_header') or []: 23 | try: 24 | name, value = pair.split(',', 1) 25 | except: 26 | name, value = pair.split(',', 1), None 27 | response.append_header(name, value) 28 | 29 | 30 | def timeout(request, response, resource, params): 31 | if random.randrange(100) < sorted((0, request.get_param_as_int('timeout_probability') or 0, 100))[1]: 32 | secs = request.get_param_as_int('timeout_seconds') or 1 33 | raise falcon.HTTPServiceUnavailable('Temporarily Unavailable', 'Timed out, wait {} second'.format(secs), secs) 34 | 35 | 36 | def error(request, response, resource, params): 37 | if random.randrange(100) < sorted((0, request.get_param_as_int('error_probability') or 0, 100))[1]: 38 | raise falcon.HTTPInternalServerError('INTERNAL SERVER ERROR', 'The server encountered an unexpected condition that prevented it from fulfilling the request.') 39 | 40 | 41 | @falcon.before(timeout) 42 | @falcon.before(error) 43 | @falcon.before(append_headers) 44 | class EchoResource(object): 45 | def on_get(self, request, response): 46 | response.set_header('Content-Type', request.get_param('Content-Type') or 'text/plain') 47 | response.status = request.get_param('status') or '200 OK' 48 | response.data = request.get_param('body') or 'OK' 49 | 50 | 51 | app = falcon.API() 52 | app.add_route("/json", JSONResource()) 53 | app.add_route("/plaintext", PlaintextResource()) 54 | app.add_route("/echo", EchoResource()) 55 | 56 | 57 | if __name__ == "__main__": 58 | from wsgiref import simple_server 59 | 60 | httpd = simple_server.make_server('localhost', 8080, app) 61 | httpd.serve_forever() 62 | -------------------------------------------------------------------------------- /provisioning/azure/modules/etcd/etcd-nodes.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_network_interface" "cncf" { 2 | count = "${ var.master_node_count }" 3 | name = "etcd-interface${ count.index + 1 }" 4 | location = "${ var.location }" 5 | resource_group_name = "${ var.name }" 6 | 7 | ip_configuration { 8 | name = "etcd-nic${ count.index + 1 }" 9 | subnet_id = "${ var.subnet_id }" 10 | private_ip_address_allocation = "dynamic" 11 | # private_ip_address = "${ element( split(",", var.etcd-ips), count.index ) }" 12 | load_balancer_backend_address_pools_ids = ["${ azurerm_lb_backend_address_pool.cncf.id }"] 13 | } 14 | } 15 | 16 | resource "azurerm_virtual_machine" "cncf" { 17 | count = "${ var.master_node_count }" 18 | name = "etcd-master${ count.index + 1 }" 19 | location = "${ var.location }" 20 | availability_set_id = "${ var.availability_id }" 21 | resource_group_name = "${ var.name }" 22 | network_interface_ids = ["${ element(azurerm_network_interface.cncf.*.id, count.index) }"] 23 | vm_size = "${ var.master_vm_size }" 24 | 25 | storage_image_reference { 26 | publisher = "${ var.image_publisher }" 27 | offer = "${ var.image_offer }" 28 | sku = "${ var.image_sku }" 29 | version = "${ var.image_version}" 30 | } 31 | 32 | storage_os_disk { 33 | name = "etcd-disks${ count.index + 1 }" 34 | vhd_uri = "${ var.storage_primary_endpoint }${ var.storage_container }/etcd-vhd${ count.index + 1 }.vhd" 35 | caching = "ReadWrite" 36 | create_option = "FromImage" 37 | } 38 | 39 | os_profile { 40 | computer_name = "etcd-master${ count.index + 1 }" 41 | admin_username = "${ var.admin_username }" 42 | admin_password = "Password1234!" 43 | custom_data = "${ element(data.template_file.etcd_cloud_config.*.rendered, count.index) }" 44 | } 45 | 46 | os_profile_linux_config { 47 | disable_password_authentication = true 48 | ssh_keys { 49 | path = "/home/${ var.admin_username }/.ssh/authorized_keys" 50 | key_data = "${file("${ var.data_dir }/.ssh/id_rsa.pub")}" 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /provisioning/packet/modules/etcd/kube-apiserver.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-apiserver 5 | namespace: kube-system 6 | spec: 7 | hostNetwork: true 8 | containers: 9 | - name: kube-apiserver 10 | image: ${ kubelet_image_url }:${ kubelet_image_tag } 11 | command: 12 | - /hyperkube 13 | - apiserver 14 | - --admission-control=NamespaceLifecycle 15 | - --admission-control=LimitRanger 16 | - --admission-control=ServiceAccount 17 | - --admission-control=DefaultStorageClass 18 | - --admission-control=ResourceQuota 19 | - --advertise-address=$private_ipv4 20 | - --apiserver-count=${ master_node_count } 21 | - --allow-privileged=true 22 | - --anonymous-auth=false 23 | - --client-ca-file=/etc/kubernetes/ssl/ca.pem 24 | - --enable-swagger-ui 25 | - --etcd-cafile=/etc/kubernetes/ssl/ca.pem 26 | - --etcd-certfile=/etc/kubernetes/ssl/k8s-etcd.pem 27 | - --etcd-keyfile=/etc/kubernetes/ssl/k8s-etcd-key.pem 28 | - --runtime-config=extensions/v1beta1/networkpolicies=true,batch/v2alpha1 29 | - --etcd-servers=https://etcd.${ internal_tld }:2379 30 | - --secure-port=443 31 | - --service-account-lookup 32 | - --service-account-private-key-file=/etc/kubernetes/ssl/k8s-apiserver-key.pem 33 | - --service-cluster-ip-range=${ service_cidr } 34 | - --tls-cert-file=/etc/kubernetes/ssl/k8s-apiserver.pem 35 | - --tls-private-key-file=/etc/kubernetes/ssl/k8s-apiserver-key.pem 36 | - --v=2 37 | livenessProbe: 38 | httpGet: 39 | host: 127.0.0.1 40 | port: 8080 41 | path: /healthz 42 | initialDelaySeconds: 15 43 | timeoutSeconds: 15 44 | ports: 45 | - containerPort: 443 46 | hostPort: 443 47 | name: https 48 | - containerPort: 8080 49 | hostPort: 8080 50 | name: local 51 | volumeMounts: 52 | - mountPath: /etc/kubernetes/ssl 53 | name: ssl-certs-kubernetes 54 | readOnly: true 55 | - mountPath: /etc/ssl/certs 56 | name: ssl-certs-host 57 | readOnly: true 58 | volumes: 59 | - hostPath: 60 | path: /etc/kubernetes/ssl 61 | name: ssl-certs-kubernetes 62 | - hostPath: 63 | path: /usr/share/ca-certificates 64 | name: ssl-certs-host 65 | -------------------------------------------------------------------------------- /provisioning/aws/modules/etcd/cloud-config.tf: -------------------------------------------------------------------------------- 1 | resource "gzip_me" "ca" { 2 | input = "${ var.ca }" 3 | } 4 | 5 | resource "gzip_me" "k8s_etcd" { 6 | input = "${ var.k8s_etcd }" 7 | } 8 | 9 | resource "gzip_me" "k8s_etcd_key" { 10 | input = "${ var.k8s_etcd_key }" 11 | } 12 | 13 | resource "gzip_me" "k8s_apiserver" { 14 | input = "${ var.k8s_apiserver }" 15 | } 16 | 17 | resource "gzip_me" "k8s_apiserver_key" { 18 | input = "${ var.k8s_apiserver_key }" 19 | } 20 | 21 | data "template_file" "kube-apiserver" { 22 | template = "${ file( "${ path.module }/kube-apiserver.yml" )}" 23 | 24 | vars { 25 | internal_tld = "${ var.internal_tld }" 26 | service_cidr = "${ var.service_cidr }" 27 | hyperkube = "${ var.kubelet_image_url }:${ var.kubelet_image_tag }" 28 | kubelet_image_url = "${ var.kubelet_image_url }" 29 | kubelet_image_tag = "${ var.kubelet_image_tag }" 30 | } 31 | } 32 | 33 | resource "gzip_me" "kube-apiserver" { 34 | input = "${ data.template_file.kube-apiserver.rendered }" 35 | } 36 | 37 | data "template_file" "cloud-config" { 38 | count = "${ var.master_node_count }" 39 | template = "${ file( "${ path.module }/cloud-config.yml" )}" 40 | 41 | vars { 42 | cluster_domain = "${ var.cluster_domain }" 43 | cluster-token = "etcd-cluster-${ var.name }" 44 | dns_service_ip = "${ var.dns_service_ip }" 45 | fqdn = "etcd${ count.index + 1 }.${ var.internal_tld }" 46 | hostname = "etcd${ count.index + 1 }" 47 | hyperkube = "${ var.kubelet_image_url }:${ var.kubelet_image_tag }" 48 | kubelet_image_url = "${ var.kubelet_image_url }" 49 | kubelet_image_tag = "${ var.kubelet_image_tag }" 50 | internal_tld = "${ var.internal_tld }" 51 | pod_cidr = "${ var.pod_cidr }" 52 | region = "${ var.region }" 53 | service_cidr = "${ var.service_cidr }" 54 | ca = "${ gzip_me.ca.output }" 55 | k8s_etcd = "${ gzip_me.k8s_etcd.output }" 56 | k8s_etcd_key = "${ gzip_me.k8s_etcd_key.output }" 57 | k8s_apiserver = "${ gzip_me.k8s_apiserver.output }" 58 | k8s_apiserver_key = "${ gzip_me.k8s_apiserver_key.output }" 59 | kube-apiserver-yml = "${ gzip_me.kube-apiserver.output }" 60 | } 61 | } 62 | 63 | 64 | 65 | # data "template_file" "kube-controller-manager" 66 | 67 | # data "template_file" "kube-proxy" 68 | 69 | # data "template_file" "kube-scheduler" 70 | -------------------------------------------------------------------------------- /provisioning/azure/modules/bastion/bastion-node.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_public_ip" "cncf" { 2 | name = "PublicIPForBastion" 3 | location = "${ var.location }" 4 | resource_group_name = "${ var.name }" 5 | public_ip_address_allocation = "static" 6 | domain_name_label = "bastion${ var.name }" 7 | } 8 | 9 | resource "azurerm_network_interface" "cncf" { 10 | name = "${ var.name }" 11 | location = "${ var.location }" 12 | resource_group_name = "${ var.name }" 13 | 14 | ip_configuration { 15 | name = "${ var.name }" 16 | subnet_id = "${ var.subnet_id }" 17 | private_ip_address_allocation = "dynamic" 18 | public_ip_address_id = "${ azurerm_public_ip.cncf.id }" 19 | } 20 | } 21 | 22 | resource "azurerm_virtual_machine" "cncf" { 23 | name = "${ var.name }" 24 | location = "${ var.location }" 25 | availability_set_id = "${ var.availability_id }" 26 | resource_group_name = "${ var.name }" 27 | network_interface_ids = ["${azurerm_network_interface.cncf.id}"] 28 | vm_size = "${ var.bastion_vm_size }" 29 | 30 | storage_image_reference { 31 | publisher = "${ var.image_publisher }" 32 | offer = "${ var.image_offer }" 33 | sku = "${ var.image_sku }" 34 | version = "${ var.image_version}" 35 | } 36 | 37 | storage_os_disk { 38 | name = "disk2" 39 | vhd_uri = "${ var.storage_primary_endpoint }${ var.storage_container }/disk2.vhd" 40 | caching = "ReadWrite" 41 | create_option = "FromImage" 42 | } 43 | 44 | os_profile { 45 | computer_name = "hostname" 46 | admin_username = "${ var.admin_username }" 47 | admin_password = "Password1234!" 48 | custom_data = "${ data.template_file.bastion-user-data.rendered }" 49 | #custom_data = "${file("${path.module}/user-data2.yml")}" 50 | } 51 | 52 | os_profile_linux_config { 53 | disable_password_authentication = true 54 | ssh_keys { 55 | path = "/home/${ var.admin_username }/.ssh/authorized_keys" 56 | key_data = "${file("${ var.data_dir }/.ssh/id_rsa.pub")}" 57 | } 58 | } 59 | } 60 | 61 | data "template_file" "bastion-user-data" { 62 | template = "${ file( "${ path.module }/bastion-user-data.yml" )}" 63 | vars { 64 | internal_tld = "${ var.internal_tld }" 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /provisioning/gce/modules/etcd/nodes.tf: -------------------------------------------------------------------------------- 1 | resource "google_compute_region_backend_service" "cncf" { 2 | name = "${ var.name }" 3 | region = "${ var.region }" 4 | protocol = "TCP" 5 | timeout_sec = 10 6 | session_affinity = "CLIENT_IP" 7 | 8 | backend { 9 | group = "${google_compute_instance_group.cncf.self_link}" 10 | } 11 | 12 | health_checks = ["${google_compute_health_check.cncf.self_link}"] 13 | } 14 | 15 | resource "google_compute_target_pool" "cncf" { 16 | name = "${var.name}-external" 17 | 18 | instances = [ 19 | "${google_compute_instance.cncf.*.self_link}" 20 | ] 21 | 22 | # health_checks = ["${google_compute_health_check.cncf.self_link}"] 23 | } 24 | 25 | resource "google_compute_instance_group" "cncf" { 26 | name = "${ var.name }" 27 | instances = ["${google_compute_instance.cncf.*.self_link}"] 28 | 29 | named_port = { 30 | name = "http" 31 | port = "8080" 32 | } 33 | 34 | named_port { 35 | name = "https" 36 | port = "443" 37 | } 38 | 39 | zone = "${ var.zone }" 40 | } 41 | 42 | resource "google_compute_instance" "cncf" { 43 | count = "${ var.master_node_count }" 44 | name = "${ var.name }-master${ count.index + 1 }" 45 | machine_type = "n1-standard-1" 46 | zone = "${ var.zone }" 47 | 48 | tags = ["foo", "bar"] 49 | 50 | disk { 51 | image = "coreos-stable-1298-7-0-v20170401" 52 | } 53 | 54 | // Local SSD disk 55 | disk { 56 | type = "local-ssd" 57 | scratch = true 58 | } 59 | 60 | network_interface { 61 | # network = "${ var.name }" 62 | subnetwork = "${ var.name }" 63 | subnetwork_project = "${ var.project }" 64 | 65 | access_config { 66 | // FIX ME Don't assign Public IP 67 | // Ephemeral IP 68 | } 69 | } 70 | 71 | metadata { 72 | user-data = "${ element(data.template_file.cloud-config.*.rendered, count.index) }" 73 | } 74 | 75 | service_account { 76 | scopes = ["userinfo-email", "compute-ro", "storage-ro"] 77 | } 78 | } 79 | 80 | resource "google_compute_health_check" "cncf" { 81 | name = "${ var.name }" 82 | check_interval_sec = 30 83 | healthy_threshold = 2 84 | unhealthy_threshold = 2 85 | timeout_sec = 3 86 | 87 | http_health_check { 88 | port = "8080" 89 | host = "" 90 | request_path = "/" 91 | } 92 | } 93 | 94 | -------------------------------------------------------------------------------- /provisioning/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine 2 | MAINTAINER "Denver Williams " 3 | ENV KUBECTL_VERSION=v1.5.2 4 | ENV HELM_VERSION=v2.4.1 5 | ENV GCLOUD_VERSION=150.0.0 6 | ENV AWSCLI_VERSION=1.11.75 7 | ENV AZURECLI_VERSION=2.0.2 8 | ENV PACKETCLI_VERSION=1.33 9 | ENV TERRAFORM_VERSION=0.9.4 10 | ENV ARC=amd64 11 | 12 | # Install AWS / AZURE CLI Deps 13 | RUN apk update 14 | RUN apk add --update git bash util-linux wget tar curl build-base jq \ 15 | py-pip groff less openssh bind-tools python python-dev libffi-dev openssl-dev 16 | 17 | # no way to pin this packet-cli at the moment 18 | RUN go get -u github.com/ebsarr/packet 19 | RUN pip install packet-python==${PACKETCLI_VERSION} argh tabulate 20 | RUN pip install azure-cli==${AZURECLI_VERSION} 21 | RUN pip install awscli==${AWSCLI_VERSION} 22 | 23 | RUN apk --purge -v del py-pip && \ 24 | rm /var/cache/apk/* 25 | 26 | # Install Google Cloud SDK 27 | RUN wget https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-${GCLOUD_VERSION}-linux-x86.tar.gz && \ 28 | tar xvfz google-cloud-sdk-${GCLOUD_VERSION}-linux-x86.tar.gz && \ 29 | ./google-cloud-sdk/install.sh -q 30 | 31 | 32 | #Install Kubectl 33 | RUN wget -O /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/$ARC/kubectl && \ 34 | chmod +x /usr/local/bin/kubectl 35 | 36 | #Install helm 37 | RUN wget https://storage.googleapis.com/kubernetes-helm/helm-${HELM_VERSION}-linux-amd64.tar.gz && \ 38 | tar xvzf helm-${HELM_VERSION}-linux-amd64.tar.gz && \ 39 | mv linux-amd64/helm /usr/local/bin && \ 40 | rm -rf helm-*gz linux-amd64 41 | 42 | # Install Terraform 43 | RUN wget https://releases.hashicorp.com/terraform/$TERRAFORM_VERSION/terraform_"${TERRAFORM_VERSION}"_linux_$ARC.zip 44 | RUN unzip terraform*.zip -d /usr/bin 45 | 46 | # Install CFSSL 47 | RUN go get -u github.com/cloudflare/cfssl/cmd/cfssl && \ 48 | go get -u github.com/cloudflare/cfssl/cmd/... 49 | 50 | # Install Gzip+base64 Provider 51 | RUN go get -u github.com/jakexks/terraform-provider-gzip && \ 52 | echo providers { >> ~/.terraformrc && \ 53 | echo ' gzip = "terraform-provider-gzip"' >> ~/.terraformrc && \ 54 | echo } >> ~/.terraformrc 55 | 56 | #Add Terraform Modules 57 | 58 | COPY provision.sh /cncf/ 59 | RUN chmod +x /cncf/provision.sh 60 | #ENTRYPOINT ["/cncf/provision.sh"] 61 | WORKDIR /cncf/ 62 | #CMD ["aws-deploy"] 63 | -------------------------------------------------------------------------------- /provisioning/aws/modules/worker/ec2.tf: -------------------------------------------------------------------------------- 1 | resource "aws_launch_configuration" "worker" { 2 | ebs_block_device { 3 | device_name = "/dev/xvdf" 4 | volume_size = "${ var.volume_size["ebs"] }" 5 | volume_type = "gp2" 6 | } 7 | iam_instance_profile = "${ var.instance_profile_name }" 8 | image_id = "${ var.ami_id }" 9 | instance_type = "${ var.instance_type }" 10 | key_name = "${ var.key_name }" 11 | 12 | # Storage 13 | root_block_device { 14 | volume_size = "${ var.volume_size["root"] }" 15 | volume_type = "gp2" 16 | } 17 | 18 | security_groups = [ 19 | "${ var.security_group_id }", 20 | ] 21 | 22 | user_data = "${ data.template_file.cloud-config.rendered }" 23 | 24 | /*lifecycle { 25 | create_before_destroy = true 26 | }*/ 27 | } 28 | 29 | resource "aws_autoscaling_group" "worker" { 30 | name = "worker-${ var.worker_name }-${ var.name }" 31 | 32 | desired_capacity = "${ var.capacity["desired"] }" 33 | health_check_grace_period = 60 34 | health_check_type = "EC2" 35 | force_delete = true 36 | launch_configuration = "${ aws_launch_configuration.worker.name }" 37 | max_size = "${ var.capacity["max"] }" 38 | min_size = "${ var.capacity["min"] }" 39 | vpc_zone_identifier = [ "${ split(",", var.subnet_ids) }" ] 40 | 41 | tag { 42 | key = "builtWith" 43 | value = "terraform" 44 | propagate_at_launch = true 45 | } 46 | 47 | tag { 48 | key = "depends-id" 49 | value = "${ var.depends_id }" 50 | propagate_at_launch = false 51 | } 52 | 53 | # used by kubelet's aws provider to determine cluster 54 | tag { 55 | key = "KubernetesCluster" 56 | value = "${ var.name }" 57 | propagate_at_launch = true 58 | } 59 | 60 | tag { 61 | key = "kz8s" 62 | value = "${ var.name }" 63 | propagate_at_launch = true 64 | } 65 | 66 | tag { 67 | key = "Name" 68 | value = "worker-${ var.name }" 69 | propagate_at_launch = true 70 | } 71 | 72 | tag { 73 | key = "role" 74 | value = "worker" 75 | propagate_at_launch = true 76 | } 77 | 78 | tag { 79 | key = "version" 80 | value = "${ var.kubelet_image_tag }" 81 | propagate_at_launch = true 82 | } 83 | 84 | tag { 85 | key = "visibility" 86 | value = "private" 87 | propagate_at_launch = true 88 | } 89 | } 90 | 91 | resource "null_resource" "dummy_dependency" { 92 | depends_on = [ 93 | "aws_autoscaling_group.worker", 94 | "aws_launch_configuration.worker", 95 | ] 96 | } 97 | -------------------------------------------------------------------------------- /provisioning/azure/modules/etcd/etcd-cloud-config.tf: -------------------------------------------------------------------------------- 1 | provider "gzip" { 2 | compressionlevel = "BestCompression" 3 | } 4 | 5 | resource "gzip_me" "kube-apiserver" { 6 | input = "${ data.template_file.kube_apiserver.rendered }" 7 | } 8 | resource "gzip_me" "k8s_cloud_config" { 9 | input = "${ var.k8s_cloud_config }" 10 | } 11 | 12 | resource "gzip_me" "ca" { 13 | input = "${ var.ca }" 14 | } 15 | 16 | resource "gzip_me" "k8s_etcd" { 17 | input = "${ var.k8s_etcd }" 18 | } 19 | 20 | resource "gzip_me" "k8s_etcd_key" { 21 | input = "${ var.k8s_etcd_key }" 22 | } 23 | 24 | resource "gzip_me" "k8s_apiserver" { 25 | input = "${ var.k8s_apiserver }" 26 | } 27 | 28 | resource "gzip_me" "k8s_apiserver_key" { 29 | input = "${ var.k8s_apiserver_key }" 30 | } 31 | 32 | data "template_file" "kube_apiserver" { 33 | template = "${ file( "${ path.module }/kube-apiserver.yml" )}" 34 | vars { 35 | internal_tld = "${ var.internal_tld }" 36 | service_cidr = "${ var.service_cidr }" 37 | hyperkube = "${ var.kubelet_image_url }:${ var.kubelet_image_tag }" 38 | kubelet_image_url = "${ var.kubelet_image_url }" 39 | kubelet_image_tag = "${ var.kubelet_image_tag }" 40 | } 41 | } 42 | 43 | data "template_file" "etcd_cloud_config" { 44 | count = "${ var.master_node_count }" 45 | template = "${ file( "${ path.module }/etcd-cloud-config.yml" )}" 46 | 47 | vars { 48 | # bucket = "${ var.s3_bucket }" 49 | cluster_domain = "${ var.cluster_domain }" 50 | cluster-token = "etcd-cluster-${ var.name }" 51 | dns_service_ip = "${ var.dns_service_ip }" 52 | fqdn = "etcd${ count.index + 1 }.${ var.internal_tld }" 53 | hostname = "etcd${ count.index + 1 }" 54 | kubelet_image_url = "${ var.kubelet_image_url }" 55 | kubelet_image_tag = "${ var.kubelet_image_tag }" 56 | internal_tld = "${ var.internal_tld }" 57 | pod_cidr = "${ var.pod_cidr }" 58 | location = "${ var.location }" 59 | service_cidr = "${ var.service_cidr }" 60 | k8s_cloud_config = "${ gzip_me.k8s_cloud_config.output }" 61 | ca = "${ gzip_me.ca.output }" 62 | k8s_etcd = "${ gzip_me.k8s_etcd.output }" 63 | k8s_etcd_key = "${ gzip_me.k8s_etcd_key.output }" 64 | k8s_apiserver = "${ gzip_me.k8s_apiserver.output }" 65 | k8s_apiserver_key = "${ gzip_me.k8s_apiserver_key.output }" 66 | k8s_apiserver_yml = "${ gzip_me.kube-apiserver.output }" 67 | node-ip = "${ element(azurerm_network_interface.cncf.*.private_ip_address, count.index) }" 68 | 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /provisioning/aws/Readme.mkd: -------------------------------------------------------------------------------- 1 | ## Prerequisites 2 | * [docker](https://docker.io/) 3 | 4 | * AWS User with following Permissions: 5 | - AmazonEC2FullAccess 6 | - AmazonS3FullAccess 7 | - AWSCodeDeployFullAccess 8 | - AmazonRoute53DomainsFullAccess 9 | - AmazonRoute53FullAccess 10 | - IAMFullAccess 11 | - IAMUserChangePassword 12 | 13 | * Must use a config config from the repo data/terraform.tfvars 14 | 15 | ## export AWS Authentication 16 | 17 | ``` 18 | export AWS_ACCESS_KEY_ID="YOUR_AWS_KEY_ID" 19 | export AWS_SECRET_ACCESS_KEY="YOUR_AWS_SECRET_KEY" 20 | ``` 21 | 22 | ## create AWS Kubernetes Endpoint 23 | 24 | ``` 25 | docker run -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ 26 | -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ 27 | -v $(pwd)/data:/cncf/data create/aws 28 | ``` 29 | 30 | ## configure kubectl on local system 31 | 32 | ``` 33 | sudo chown -R $(whoami):$(whoami) data/ 34 | export KUBECONFIG=$(pwd)/data/kubeconfig 35 | $ kubectl get nodes 36 | NAME STATUS AGE 37 | ip-10-0-10-10.ap-southeast-2.compute.internal Ready,SchedulingDisabled 7m 38 | ip-10-0-10-11.ap-southeast-2.compute.internal Ready,SchedulingDisabled 6m 39 | ip-10-0-10-12.ap-southeast-2.compute.internal Ready,SchedulingDisabled 7m 40 | ip-10-0-10-51.ap-southeast-2.compute.internal Ready 6m 41 | ip-10-0-11-7.ap-southeast-2.compute.internal Ready 6m 42 | ip-10-0-12-68.ap-southeast-2.compute.internal Ready 6m 43 | ``` 44 | 45 | ## data folder contains certs + kubeconfig 46 | 47 | It also contains a json file containing details on current cluster state. 48 | 49 | ``` 50 | $ sudo cat ./data/kubeconfig 51 | apiVersion: v1 52 | clusters: 53 | - cluster: 54 | certificate-authority: .cfssl/ca.pem 55 | server: https://kz8s-apiserver-test-453655923.ap-southeast-2.elb.amazonaws.com 56 | name: cluster-test 57 | contexts: 58 | - context: 59 | cluster: cluster-test 60 | user: admin-test 61 | name: test 62 | current-context: test 63 | kind: Config 64 | preferences: {} 65 | users: 66 | - name: admin-test 67 | user: 68 | client-certificate: .cfssl/k8s-admin.pem 69 | client-key: .cfssl/k8s-admin-key.pem 70 | ``` 71 | 72 | 73 | ## destroy AWS Kubernetes Endpoint 74 | 75 | ``` 76 | docker run -e AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \ 77 | -e AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY \ 78 | -v $(pwd)/data:/cncf/data terminate/aws 79 | ``` 80 | -------------------------------------------------------------------------------- /Web/results/chart.js: -------------------------------------------------------------------------------- 1 | 2 | var colors = d3.scale.category20(); 3 | var chart; 4 | nv.addGraph(function() { 5 | chart = nv.models.stackedAreaChart() 6 | .useInteractiveGuideline(true) 7 | .x(function(d) { return d[0] }) 8 | .y(function(d) { return d[1] }) 9 | .duration(300); 10 | 11 | chart.showControls(false) 12 | chart.style("expand"); 13 | 14 | chart.xAxis.tickFormat(function(d) { return d3.time.format('%H:%M')(new Date(d)) }); 15 | chart.yAxis.tickFormat(d3.format(',.4f')); 16 | chart.legend.vers('furious'); 17 | d3.select('#chart1') 18 | .datum(histcatexplong) 19 | .transition().duration(1000) 20 | .call(chart) 21 | .each('start', function() { 22 | setTimeout(function() { 23 | d3.selectAll('#chart1 *').each(function() { 24 | if(this.__transition__) 25 | this.__transition__.duration = 1; 26 | }) 27 | }, 0) 28 | }); 29 | nv.utils.windowResize(chart.update); 30 | return chart; 31 | }); 32 | 33 | 34 | function volatileChart(startPrice, volatility, numPoints) { 35 | var rval = []; 36 | var now =+new Date(); 37 | numPoints = numPoints || 100; 38 | for(var i = 1; i < numPoints; i++) { 39 | rval.push({x: now + i * 1000 * 60 * 60 * 24, y: startPrice}); 40 | var rnd = Math.random(); 41 | var changePct = 2 * volatility * rnd; 42 | if ( changePct > volatility) { 43 | changePct -= (2*volatility); 44 | } 45 | startPrice = startPrice + startPrice * changePct; 46 | } 47 | return rval; 48 | } 49 | 50 | wrk = volatileChart(25.0, 0.09,30); 51 | 52 | nv.addGraph(function() { 53 | var chart = nv.models.sparklinePlus(); 54 | chart.margin({left:70}) 55 | .x(function(d,i) { return i }) 56 | .showLastValue(true) 57 | .xTickFormat(function(d) { 58 | return d3.time.format('%M:%S')(new Date(wrk[d].x)) 59 | }); 60 | d3.select('#spark1') 61 | .datum(wrk) 62 | .call(chart); 63 | 64 | chart.alignValue(false); 65 | chart.showLastValue(false); 66 | chart.animate(false); 67 | return chart; 68 | }); 69 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/bootstrap/aws/utils.py: -------------------------------------------------------------------------------- 1 | from functools import partial, reduce 2 | import collections 3 | 4 | import sys 5 | import botocore 6 | import click 7 | import time 8 | 9 | 10 | class Action(collections.namedtuple('Action', [ "resource", "method", "arguments", "saveas" ])): 11 | def __new__(cls, resource, method, arguments, saveas=""): 12 | return super(Action, cls).__new__(cls, resource, method, arguments, saveas) 13 | 14 | 15 | def pluck(source, selector): 16 | return reduce(lambda d,k: d.get(k, {}), selector.split('.'), source) 17 | 18 | 19 | def unroll(pair): 20 | get, selector = pair 21 | selector = selector.split('.') 22 | item = selector.pop(0) 23 | return getattr(get(item), '.'.join(selector)) 24 | 25 | 26 | def walk(adict): 27 | for key, value in adict.iteritems(): 28 | if isinstance(value, dict): 29 | walk(value) 30 | elif isinstance(value, tuple) and isinstance(value[0], partial): 31 | adict[key] = unroll(value) 32 | elif isinstance(value, collections.Sequence): 33 | for item in value: 34 | if isinstance(item, dict): 35 | walk(item) 36 | return adict 37 | 38 | 39 | def execute2(context, actions): 40 | 41 | for a in map(lambda action: Action(*action), actions): 42 | 43 | try: 44 | 45 | if a.method == 'create_launch_configuration': 46 | click.echo('waiting some more..') 47 | time.sleep(10) # AWS API bug, remove in future 48 | 49 | resource = context[a.resource] 50 | arguments = walk(a.arguments) 51 | result = getattr(resource, a.method)(**arguments) 52 | click.echo("{}... OK".format(a.method)) 53 | if a.saveas: 54 | context[a.saveas] = result 55 | 56 | 57 | except botocore.exceptions.ClientError as e: 58 | 59 | Errors = ['InvalidKeyPair.Duplicate','InvalidGroup.Duplicate','InvalidPermission.Duplicate','EntityAlreadyExists','AlreadyExists', \ 60 | 'InvalidGroup.NotFound','NoSuchEntity','ValidationError','LimitExceeded','DependencyViolation', 'DryRunOperation'] 61 | 62 | if e.response['Error']['Code'] in Errors: 63 | click.echo(e.response['Error']['Message']) 64 | else: 65 | click.echo("Unexpected error: {}".format(e)) 66 | sys.exit("Aborting..") 67 | 68 | return context 69 | 70 | 71 | def DhcpConfigurations(region): 72 | domain_name = 'ec2.internal' if region == 'us-east-1' else '{}.compute.internal'.format(region) 73 | return [{'Key': 'domain-name-servers', 'Values': ['AmazonProvidedDNS']}, {'Key': 'domain-name', 'Values': ['{} k8s'.format(domain_name)]}] 74 | 75 | -------------------------------------------------------------------------------- /cncfdemo-cli/cncfdemo/kubectl/cmd_create.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import sys 5 | 6 | import yaml, json 7 | 8 | import glob2 9 | 10 | import click 11 | import jinja2 12 | 13 | from cncfdemo.kubectl.configmap import configmap 14 | from cncfdemo.kubectl.utils import create as kreate, json_dump 15 | 16 | 17 | @click.group() 18 | def cli(): 19 | pass 20 | 21 | 22 | @click.group(invoke_without_command=True) 23 | #@click.group() 24 | @click.option('-f', '--filename', type=click.Path(exists=True), help='Filename or directory to use to create the resource', required=False) 25 | @click.option('-R', '--recursive', is_flag=True, help='Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.') 26 | @click.option('--dry-run', is_flag=True, help='Do not submit to kubernetes apiserver') 27 | @click.option('--debug', is_flag=True, help='Print output to stdout') 28 | @click.pass_context 29 | def create(ctx, filename, recursive, dry_run, debug): 30 | """Either '-f' option or subcommand required.""" 31 | 32 | if ctx.invoked_subcommand: 33 | return 'defer to subcommand' 34 | 35 | if not filename: 36 | #click.echo('error: Missing option "-f".') 37 | click.echo(create.get_help(ctx)) 38 | sys.exit(0) 39 | 40 | realpath = os.path.realpath(filename) 41 | manifests = [] 42 | 43 | if os.path.isfile(filename): 44 | manifests.extend([realpath]) 45 | 46 | if os.path.isdir(filename): 47 | if recursive: 48 | manifests.extend([f for f in glob2.glob(realpath + '/**/*.j2')]) 49 | manifests.extend([f for f in glob2.glob(realpath + '/**/*.yml')]) 50 | manifests.extend([f for f in glob2.glob(realpath + '/**/*.yaml')]) 51 | manifests = [f for f in manifests if os.path.isfile(f)] 52 | else: 53 | manifests.extend([realpath+'/'+f for f in os.listdir(realpath) if os.path.isfile(realpath+'/'+f) and f.endswith(('.j2','.yaml','.yml'))]) 54 | 55 | 56 | if not manifests: 57 | click.echo('no manifest files found') 58 | sys.exit(0) 59 | 60 | if debug: 61 | click.echo(manifests) 62 | 63 | for manifest in manifests: 64 | definitions = None 65 | 66 | t = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(os.path.realpath(manifest)))) 67 | t.filters['json_dump'] = json_dump 68 | definitions = t.get_template(os.path.basename(manifest)).render() 69 | 70 | if debug: 71 | print definitions if definitions else '' 72 | 73 | for definition in yaml.load_all(definitions): 74 | # import ipdb; ipdb.set_trace() 75 | if not dry_run: 76 | resp, status = kreate(definition) 77 | 78 | 79 | cli.add_command(create) 80 | create.add_command(configmap) 81 | 82 | 83 | if __name__ == '__main__': 84 | create() 85 | -------------------------------------------------------------------------------- /provisioning/aws/modules/security/security.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "bastion" { 2 | description = "k8s bastion security group" 3 | 4 | egress = { 5 | from_port = 0 6 | to_port = 0 7 | protocol = "-1" 8 | cidr_blocks = [ "0.0.0.0/0" ] 9 | } 10 | 11 | ingress = { 12 | from_port = 22 13 | to_port = 22 14 | protocol = "tcp" 15 | cidr_blocks = [ "${ var.allow_ssh_cidr }" ] 16 | } 17 | 18 | name = "bastion-k8s-${ var.name }" 19 | 20 | tags { 21 | KubernetesCluster = "${ var.name }" 22 | Name = "bastion-k8s-${ var.name }" 23 | builtWith = "terraform" 24 | } 25 | 26 | vpc_id = "${ var.vpc_id }" 27 | } 28 | 29 | resource "aws_security_group" "etcd" { 30 | description = "k8s etcd security group" 31 | 32 | egress = { 33 | from_port = 0 34 | to_port = 0 35 | protocol = "-1" 36 | /*self = true*/ 37 | cidr_blocks = [ "0.0.0.0/0" ] 38 | } 39 | 40 | ingress = { 41 | from_port = 0 42 | to_port = 0 43 | protocol = "-1" 44 | self = true 45 | cidr_blocks = [ "${ var.vpc_cidr }" ] 46 | } 47 | 48 | name = "etcd-k8s-${ var.name }" 49 | 50 | tags { 51 | KubernetesCluster = "${ var.name }" 52 | Name = "etcd-k8s-${ var.name }" 53 | builtWith = "terraform" 54 | } 55 | 56 | vpc_id = "${ var.vpc_id }" 57 | } 58 | 59 | resource "aws_security_group" "external_elb" { 60 | description = "k8s-${ var.name } master (apiserver) external elb" 61 | 62 | egress { 63 | from_port = 0 64 | to_port = 0 65 | protocol = "-1" 66 | /*cidr_blocks = [ "${ var.vpc_cidr }" ]*/ 67 | security_groups = [ "${ aws_security_group.etcd.id }" ] 68 | } 69 | 70 | ingress { 71 | from_port = -1 72 | to_port = -1 73 | protocol = "icmp" 74 | cidr_blocks = [ "0.0.0.0/0" ] 75 | } 76 | 77 | ingress { 78 | from_port = 443 79 | to_port = 443 80 | protocol = "tcp" 81 | cidr_blocks = [ "0.0.0.0/0" ] 82 | } 83 | 84 | name = "master-external-elb-k8s-${ var.name }" 85 | 86 | tags { 87 | KubernetesCluster = "${ var.name }" 88 | Name = "master-external-elb-k8s-${ var.name }" 89 | builtWith = "terraform" 90 | } 91 | 92 | vpc_id = "${ var.vpc_id }" 93 | } 94 | 95 | resource "aws_security_group" "worker" { 96 | description = "k8s worker security group" 97 | 98 | egress = { 99 | from_port = 0 100 | to_port = 0 101 | protocol = "-1" 102 | /*self = true*/ 103 | cidr_blocks = [ "0.0.0.0/0" ] 104 | } 105 | 106 | ingress = { 107 | from_port = 0 108 | to_port = 0 109 | protocol = "-1" 110 | self = true 111 | cidr_blocks = [ "${ var.vpc_cidr }" ] 112 | } 113 | 114 | name = "worker-k8s-${ var.name }" 115 | 116 | tags { 117 | KubernetesCluster = "${ var.name }" 118 | Name = "worker-k8s-${ var.name }" 119 | builtWith = "terraform" 120 | } 121 | 122 | vpc_id = "${ var.vpc_id }" 123 | } 124 | -------------------------------------------------------------------------------- /provisioning/packet/init-cfssl: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | function usage { cat <ca-csr.json 95 | echo "$(ca-config)" >ca-config.json 96 | 97 | # generate ca 98 | cfssl gencert -initca ca-csr.json | cfssljson -bare ca - 99 | _chmod ca 100 | 101 | # generate keys and certs 102 | generate k8s-admin client-server "${DEFAULT_HOSTS}" 103 | generate k8s-apiserver client-server "${DEFAULT_HOSTS},${K8S_SERVICE_IP},master.${INTERNAL_TLD},endpoint.${INTERNAL_TLD}" 104 | generate k8s-etcd client-server "etcd.${INTERNAL_TLD},etcd1.${INTERNAL_TLD},etcd2.${INTERNAL_TLD},etcd3.${INTERNAL_TLD}" 105 | generate k8s-worker client "${DEFAULT_HOSTS}" 106 | 107 | # TODO: fix cert provisioning hacks 108 | #tar -rf k8s-apiserver.tar k8s-etcd.pem k8s-etcd-key.pem 109 | #tar -rf k8s-worker.tar ca.pem 110 | #bzip2 k8s-apiserver.tar 111 | #bzip2 k8s-worker.tar 112 | -------------------------------------------------------------------------------- /provisioning/azure/init-cfssl: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | function usage { cat <ca-csr.json 95 | echo "$(ca-config)" >ca-config.json 96 | 97 | # generate ca 98 | cfssl gencert -initca ca-csr.json | cfssljson -bare ca - 99 | _chmod ca 100 | 101 | # generate keys and certs 102 | generate k8s-admin client-server "${DEFAULT_HOSTS}" 103 | generate k8s-apiserver client-server "${DEFAULT_HOSTS},${K8S_SERVICE_IP},master.${INTERNAL_TLD},*.${REGION}.cloudapp.azure.com" 104 | generate k8s-etcd client-server "etcd.${INTERNAL_TLD},etcd1.${INTERNAL_TLD},etcd2.${INTERNAL_TLD},etcd3.${INTERNAL_TLD}" 105 | generate k8s-worker client "${DEFAULT_HOSTS}" 106 | 107 | # TODO: fix cert provisioning hacks 108 | #tar -rf k8s-apiserver.tar k8s-etcd.pem k8s-etcd-key.pem 109 | #tar -rf k8s-worker.tar ca.pem 110 | #bzip2 k8s-apiserver.tar 111 | #bzip2 k8s-worker.tar 112 | --------------------------------------------------------------------------------